From 7b7d80d12576b32bd59f7a54973360998d47ee16 Mon Sep 17 00:00:00 2001 From: Antoine Richard Date: Tue, 27 Jan 2026 16:22:57 +0100 Subject: [PATCH 01/38] early sign of life. Cartpole and Ant are training. --- apps/isaaclab.python.headless.kit | 1 + apps/isaaclab.python.kit | 1 + scripts/demos/pick_and_place.py | 3 +- .../01_assets/run_surface_gripper.py | 3 +- source/isaaclab/isaaclab/assets/__init__.py | 28 +- .../isaaclab/assets/articulation/__init__.py | 10 + .../assets/articulation/articulation.py | 2168 +--------------- .../assets/articulation/articulation_data.py | 1164 +-------- .../assets/articulation/base_articulation.py | 1147 +++++++++ .../articulation/base_articulation_data.py | 962 ++++++++ .../isaaclab/assets/rigid_object/__init__.py | 10 + .../assets/rigid_object/base_rigid_object.py | 447 ++++ .../rigid_object/base_rigid_object_data.py | 640 +++++ .../assets/rigid_object/rigid_object.py | 580 +---- .../assets/rigid_object/rigid_object_data.py | 656 +---- .../rigid_object_collection/__init__.py | 10 + .../base_rigid_object_collection.py | 464 ++++ .../base_rigid_object_collection_data.py | 376 +++ .../rigid_object_collection.py | 781 +----- .../rigid_object_collection_data.py | 514 +--- .../mdp/actions/surface_gripper_actions.py | 2 +- source/isaaclab/isaaclab/envs/mdp/events.py | 3 +- .../isaaclab/scene/interactive_scene.py | 7 +- .../isaaclab/sim/simulation_manager.py | 1004 ++++++++ .../isaaclab/isaaclab/utils/backend_utils.py | 79 + .../isaaclab/utils/wrench_composer.py | 9 +- .../test/assets/test_deformable_object.py | 2 +- .../test/assets/test_surface_gripper.py | 3 +- source/isaaclab_physx/config/extension.toml | 21 + source/isaaclab_physx/docs/CHANGELOG.rst | 10 + source/isaaclab_physx/docs/README.md | 1 + .../isaaclab_physx/isaaclab_physx/__init__.py | 19 + .../isaaclab_physx/assets/__init__.py | 59 + .../assets/articulation/__init__.py | 14 + .../assets/articulation/articulation.py | 2177 +++++++++++++++++ .../assets/articulation/articulation_data.py | 1479 +++++++++++ .../articulation/articulation_data_old.py | 1165 +++++++++ .../assets/articulation/articulation_old.py | 2143 ++++++++++++++++ .../assets/deformable_object/__init__.py | 6 + .../deformable_object/deformable_object.py | 2 +- .../deformable_object_cfg.py | 2 +- .../deformable_object_data.py | 0 .../assets/rigid_object/__init__.py | 14 + .../assets/rigid_object/rigid_object.py | 698 ++++++ .../assets/rigid_object/rigid_object_data.py | 840 +++++++ .../rigid_object/rigid_object_data_old.py | 657 +++++ .../assets/rigid_object/rigid_object_old.py | 582 +++++ .../rigid_object_collection/__init__.py | 14 + .../rigid_object_collection.py | 845 +++++++ .../rigid_object_collection_data.py | 910 +++++++ .../rigid_object_collection_data_old.py | 515 ++++ .../rigid_object_collection_old.py | 783 ++++++ .../assets/surface_gripper/__init__.py | 5 + .../assets/surface_gripper/surface_gripper.py | 0 .../surface_gripper/surface_gripper_cfg.py | 2 +- .../isaaclab_physx/sensors/__init__.py | 47 + .../isaaclab_physx/sensors/camera/__init__.py | 13 + .../isaaclab_physx/sensors/camera/camera.py | 722 ++++++ .../sensors/camera/camera_cfg.py | 143 ++ .../sensors/camera/camera_data.py | 92 + .../sensors/camera/tiled_camera.py | 425 ++++ .../sensors/camera/tiled_camera_cfg.py | 16 + .../isaaclab_physx/sensors/camera/utils.py | 272 ++ .../sensors/contact_sensor/__init__.py | 10 + .../sensors/contact_sensor/contact_sensor.py | 539 ++++ .../contact_sensor/contact_sensor_cfg.py | 79 + .../contact_sensor/contact_sensor_data.py | 153 ++ .../sensors/frame_transformer/__init__.py | 10 + .../frame_transformer/frame_transformer.py | 560 +++++ .../frame_transformer_cfg.py | 76 + .../frame_transformer_data.py | 57 + .../isaaclab_physx/sensors/imu}/__init__.py | 8 + .../isaaclab_physx/sensors/imu/imu.py | 299 +++ .../isaaclab_physx/sensors/imu/imu_cfg.py | 46 + .../isaaclab_physx/sensors/imu/imu_data.py | 57 + .../sensors/ray_caster/__init__.py | 29 + .../ray_caster/multi_mesh_ray_caster.py | 427 ++++ .../multi_mesh_ray_caster_camera.py | 221 ++ .../multi_mesh_ray_caster_camera_cfg.py | 35 + .../multi_mesh_ray_caster_camera_data.py | 23 + .../ray_caster/multi_mesh_ray_caster_cfg.py | 71 + .../ray_caster/multi_mesh_ray_caster_data.py | 22 + .../sensors/ray_caster/patterns/__init__.py | 9 + .../sensors/ray_caster/patterns/patterns.py | 180 ++ .../ray_caster/patterns/patterns_cfg.py | 219 ++ .../sensors/ray_caster/ray_cast_utils.py | 51 + .../sensors/ray_caster/ray_caster.py | 428 ++++ .../sensors/ray_caster/ray_caster_camera.py | 456 ++++ .../ray_caster/ray_caster_camera_cfg.py | 64 + .../sensors/ray_caster/ray_caster_cfg.py | 98 + .../sensors/ray_caster/ray_caster_data.py | 30 + .../isaaclab_physx/sensors/sensor_base.py | 363 +++ .../isaaclab_physx/sensors/sensor_base_cfg.py | 42 + .../sensors/tacsl_sensor/__init__.py | 10 + source/isaaclab_physx/pyproject.toml | 3 + source/isaaclab_physx/setup.py | 61 + .../lift/config/franka/ik_abs_env_cfg.py | 2 +- .../config/openarm/lift_openarm_env_cfg.py | 3 +- .../manipulation/lift/lift_env_cfg.py | 3 +- .../config/galbot/stack_joint_pos_env_cfg.py | 3 +- .../ur10_gripper/stack_joint_pos_env_cfg.py | 3 +- 101 files changed, 24698 insertions(+), 5819 deletions(-) create mode 100644 source/isaaclab/isaaclab/assets/articulation/base_articulation.py create mode 100644 source/isaaclab/isaaclab/assets/articulation/base_articulation_data.py create mode 100644 source/isaaclab/isaaclab/assets/rigid_object/base_rigid_object.py create mode 100644 source/isaaclab/isaaclab/assets/rigid_object/base_rigid_object_data.py create mode 100644 source/isaaclab/isaaclab/assets/rigid_object_collection/base_rigid_object_collection.py create mode 100644 source/isaaclab/isaaclab/assets/rigid_object_collection/base_rigid_object_collection_data.py create mode 100644 source/isaaclab/isaaclab/sim/simulation_manager.py create mode 100644 source/isaaclab/isaaclab/utils/backend_utils.py create mode 100644 source/isaaclab_physx/config/extension.toml create mode 100644 source/isaaclab_physx/docs/CHANGELOG.rst create mode 100644 source/isaaclab_physx/docs/README.md create mode 100644 source/isaaclab_physx/isaaclab_physx/__init__.py create mode 100644 source/isaaclab_physx/isaaclab_physx/assets/__init__.py create mode 100644 source/isaaclab_physx/isaaclab_physx/assets/articulation/__init__.py create mode 100644 source/isaaclab_physx/isaaclab_physx/assets/articulation/articulation.py create mode 100644 source/isaaclab_physx/isaaclab_physx/assets/articulation/articulation_data.py create mode 100644 source/isaaclab_physx/isaaclab_physx/assets/articulation/articulation_data_old.py create mode 100644 source/isaaclab_physx/isaaclab_physx/assets/articulation/articulation_old.py rename source/{isaaclab/isaaclab => isaaclab_physx/isaaclab_physx}/assets/deformable_object/__init__.py (81%) rename source/{isaaclab/isaaclab => isaaclab_physx/isaaclab_physx}/assets/deformable_object/deformable_object.py (99%) rename source/{isaaclab/isaaclab => isaaclab_physx/isaaclab_physx}/assets/deformable_object/deformable_object_cfg.py (94%) rename source/{isaaclab/isaaclab => isaaclab_physx/isaaclab_physx}/assets/deformable_object/deformable_object_data.py (100%) create mode 100644 source/isaaclab_physx/isaaclab_physx/assets/rigid_object/__init__.py create mode 100644 source/isaaclab_physx/isaaclab_physx/assets/rigid_object/rigid_object.py create mode 100644 source/isaaclab_physx/isaaclab_physx/assets/rigid_object/rigid_object_data.py create mode 100644 source/isaaclab_physx/isaaclab_physx/assets/rigid_object/rigid_object_data_old.py create mode 100644 source/isaaclab_physx/isaaclab_physx/assets/rigid_object/rigid_object_old.py create mode 100644 source/isaaclab_physx/isaaclab_physx/assets/rigid_object_collection/__init__.py create mode 100644 source/isaaclab_physx/isaaclab_physx/assets/rigid_object_collection/rigid_object_collection.py create mode 100644 source/isaaclab_physx/isaaclab_physx/assets/rigid_object_collection/rigid_object_collection_data.py create mode 100644 source/isaaclab_physx/isaaclab_physx/assets/rigid_object_collection/rigid_object_collection_data_old.py create mode 100644 source/isaaclab_physx/isaaclab_physx/assets/rigid_object_collection/rigid_object_collection_old.py rename source/{isaaclab/isaaclab => isaaclab_physx/isaaclab_physx}/assets/surface_gripper/__init__.py (84%) rename source/{isaaclab/isaaclab => isaaclab_physx/isaaclab_physx}/assets/surface_gripper/surface_gripper.py (100%) rename source/{isaaclab/isaaclab => isaaclab_physx/isaaclab_physx}/assets/surface_gripper/surface_gripper_cfg.py (94%) create mode 100644 source/isaaclab_physx/isaaclab_physx/sensors/__init__.py create mode 100644 source/isaaclab_physx/isaaclab_physx/sensors/camera/__init__.py create mode 100644 source/isaaclab_physx/isaaclab_physx/sensors/camera/camera.py create mode 100644 source/isaaclab_physx/isaaclab_physx/sensors/camera/camera_cfg.py create mode 100644 source/isaaclab_physx/isaaclab_physx/sensors/camera/camera_data.py create mode 100644 source/isaaclab_physx/isaaclab_physx/sensors/camera/tiled_camera.py create mode 100644 source/isaaclab_physx/isaaclab_physx/sensors/camera/tiled_camera_cfg.py create mode 100644 source/isaaclab_physx/isaaclab_physx/sensors/camera/utils.py create mode 100644 source/isaaclab_physx/isaaclab_physx/sensors/contact_sensor/__init__.py create mode 100644 source/isaaclab_physx/isaaclab_physx/sensors/contact_sensor/contact_sensor.py create mode 100644 source/isaaclab_physx/isaaclab_physx/sensors/contact_sensor/contact_sensor_cfg.py create mode 100644 source/isaaclab_physx/isaaclab_physx/sensors/contact_sensor/contact_sensor_data.py create mode 100644 source/isaaclab_physx/isaaclab_physx/sensors/frame_transformer/__init__.py create mode 100644 source/isaaclab_physx/isaaclab_physx/sensors/frame_transformer/frame_transformer.py create mode 100644 source/isaaclab_physx/isaaclab_physx/sensors/frame_transformer/frame_transformer_cfg.py create mode 100644 source/isaaclab_physx/isaaclab_physx/sensors/frame_transformer/frame_transformer_data.py rename source/{isaaclab/isaaclab/assets/utils => isaaclab_physx/isaaclab_physx/sensors/imu}/__init__.py (65%) create mode 100644 source/isaaclab_physx/isaaclab_physx/sensors/imu/imu.py create mode 100644 source/isaaclab_physx/isaaclab_physx/sensors/imu/imu_cfg.py create mode 100644 source/isaaclab_physx/isaaclab_physx/sensors/imu/imu_data.py create mode 100644 source/isaaclab_physx/isaaclab_physx/sensors/ray_caster/__init__.py create mode 100644 source/isaaclab_physx/isaaclab_physx/sensors/ray_caster/multi_mesh_ray_caster.py create mode 100644 source/isaaclab_physx/isaaclab_physx/sensors/ray_caster/multi_mesh_ray_caster_camera.py create mode 100644 source/isaaclab_physx/isaaclab_physx/sensors/ray_caster/multi_mesh_ray_caster_camera_cfg.py create mode 100644 source/isaaclab_physx/isaaclab_physx/sensors/ray_caster/multi_mesh_ray_caster_camera_data.py create mode 100644 source/isaaclab_physx/isaaclab_physx/sensors/ray_caster/multi_mesh_ray_caster_cfg.py create mode 100644 source/isaaclab_physx/isaaclab_physx/sensors/ray_caster/multi_mesh_ray_caster_data.py create mode 100644 source/isaaclab_physx/isaaclab_physx/sensors/ray_caster/patterns/__init__.py create mode 100644 source/isaaclab_physx/isaaclab_physx/sensors/ray_caster/patterns/patterns.py create mode 100644 source/isaaclab_physx/isaaclab_physx/sensors/ray_caster/patterns/patterns_cfg.py create mode 100644 source/isaaclab_physx/isaaclab_physx/sensors/ray_caster/ray_cast_utils.py create mode 100644 source/isaaclab_physx/isaaclab_physx/sensors/ray_caster/ray_caster.py create mode 100644 source/isaaclab_physx/isaaclab_physx/sensors/ray_caster/ray_caster_camera.py create mode 100644 source/isaaclab_physx/isaaclab_physx/sensors/ray_caster/ray_caster_camera_cfg.py create mode 100644 source/isaaclab_physx/isaaclab_physx/sensors/ray_caster/ray_caster_cfg.py create mode 100644 source/isaaclab_physx/isaaclab_physx/sensors/ray_caster/ray_caster_data.py create mode 100644 source/isaaclab_physx/isaaclab_physx/sensors/sensor_base.py create mode 100644 source/isaaclab_physx/isaaclab_physx/sensors/sensor_base_cfg.py create mode 100644 source/isaaclab_physx/isaaclab_physx/sensors/tacsl_sensor/__init__.py create mode 100644 source/isaaclab_physx/pyproject.toml create mode 100644 source/isaaclab_physx/setup.py diff --git a/apps/isaaclab.python.headless.kit b/apps/isaaclab.python.headless.kit index 5e93d229c04..f5afd6eb16a 100644 --- a/apps/isaaclab.python.headless.kit +++ b/apps/isaaclab.python.headless.kit @@ -206,6 +206,7 @@ enabled=true # Enable this for DLSS # Load Isaac Lab extensions last "isaaclab" = {order = 1000} +"isaaclab_physx" = {order = 1000} "isaaclab_assets" = {order = 1000} "isaaclab_tasks" = {order = 1000} "isaaclab_mimic" = {order = 1000} diff --git a/apps/isaaclab.python.kit b/apps/isaaclab.python.kit index de4252f2995..12810aa5165 100644 --- a/apps/isaaclab.python.kit +++ b/apps/isaaclab.python.kit @@ -101,6 +101,7 @@ keywords = ["experience", "app", "usd"] # Load Isaac Lab extensions last "isaaclab" = {order = 1000} +"isaaclab_physx" = {order = 1000} "isaaclab_assets" = {order = 1000} "isaaclab_tasks" = {order = 1000} "isaaclab_mimic" = {order = 1000} diff --git a/scripts/demos/pick_and_place.py b/scripts/demos/pick_and_place.py index c98998de124..9c43b35ec9d 100644 --- a/scripts/demos/pick_and_place.py +++ b/scripts/demos/pick_and_place.py @@ -36,9 +36,8 @@ ArticulationCfg, RigidObject, RigidObjectCfg, - SurfaceGripper, - SurfaceGripperCfg, ) +from isaaclab_physx.assets import SurfaceGripper, SurfaceGripperCfg from isaaclab.envs import DirectRLEnv, DirectRLEnvCfg from isaaclab.markers import SPHERE_MARKER_CFG, VisualizationMarkers from isaaclab.scene import InteractiveSceneCfg diff --git a/scripts/tutorials/01_assets/run_surface_gripper.py b/scripts/tutorials/01_assets/run_surface_gripper.py index 066dd9a077d..64711041fe2 100644 --- a/scripts/tutorials/01_assets/run_surface_gripper.py +++ b/scripts/tutorials/01_assets/run_surface_gripper.py @@ -36,7 +36,8 @@ import torch import isaaclab.sim as sim_utils -from isaaclab.assets import Articulation, SurfaceGripper, SurfaceGripperCfg +from isaaclab.assets import Articulation +from isaaclab_physx.assets import SurfaceGripper, SurfaceGripperCfg from isaaclab.sim import SimulationContext ## diff --git a/source/isaaclab/isaaclab/assets/__init__.py b/source/isaaclab/isaaclab/assets/__init__.py index 41640e5286f..c25a8919453 100644 --- a/source/isaaclab/isaaclab/assets/__init__.py +++ b/source/isaaclab/isaaclab/assets/__init__.py @@ -38,10 +38,28 @@ the corresponding actuator torques. """ -from .articulation import Articulation, ArticulationCfg, ArticulationData +from .articulation import BaseArticulation, BaseArticulationData, Articulation, ArticulationCfg, ArticulationData from .asset_base import AssetBase from .asset_base_cfg import AssetBaseCfg -from .deformable_object import DeformableObject, DeformableObjectCfg, DeformableObjectData -from .rigid_object import RigidObject, RigidObjectCfg, RigidObjectData -from .rigid_object_collection import RigidObjectCollection, RigidObjectCollectionCfg, RigidObjectCollectionData -from .surface_gripper import SurfaceGripper, SurfaceGripperCfg +from .rigid_object import BaseRigidObject, BaseRigidObjectData, RigidObject, RigidObjectCfg, RigidObjectData +from .rigid_object_collection import BaseRigidObjectCollection, BaseRigidObjectCollectionData, RigidObjectCollection, RigidObjectCollectionCfg, RigidObjectCollectionData + +__all__ = [ + "AssetBase", + "AssetBaseCfg", + "BaseArticulation", + "BaseArticulationData", + "Articulation", + "ArticulationCfg", + "ArticulationData", + "BaseRigidObject", + "BaseRigidObjectData", + "RigidObject", + "RigidObjectCfg", + "RigidObjectData", + "BaseRigidObjectCollection", + "BaseRigidObjectCollectionData", + "RigidObjectCollection", + "RigidObjectCollectionCfg", + "RigidObjectCollectionData", +] \ No newline at end of file diff --git a/source/isaaclab/isaaclab/assets/articulation/__init__.py b/source/isaaclab/isaaclab/assets/articulation/__init__.py index e24b3ba10f4..4e52538e2a3 100644 --- a/source/isaaclab/isaaclab/assets/articulation/__init__.py +++ b/source/isaaclab/isaaclab/assets/articulation/__init__.py @@ -5,6 +5,16 @@ """Sub-module for rigid articulated assets.""" +from .base_articulation import BaseArticulation +from .base_articulation_data import BaseArticulationData from .articulation import Articulation from .articulation_cfg import ArticulationCfg from .articulation_data import ArticulationData + +__all__ = [ + "BaseArticulation", + "BaseArticulationData", + "Articulation", + "ArticulationCfg", + "ArticulationData", +] \ No newline at end of file diff --git a/source/isaaclab/isaaclab/assets/articulation/articulation.py b/source/isaaclab/isaaclab/assets/articulation/articulation.py index b67ded15ac4..4efd7074585 100644 --- a/source/isaaclab/isaaclab/assets/articulation/articulation.py +++ b/source/isaaclab/isaaclab/assets/articulation/articulation.py @@ -3,2168 +3,28 @@ # # SPDX-License-Identifier: BSD-3-Clause -# Flag for pyright to ignore type errors in this file. -# pyright: reportPrivateUsage=false - from __future__ import annotations -import logging -from collections.abc import Sequence from typing import TYPE_CHECKING -import torch -import warp as wp -from prettytable import PrettyTable - -import omni.physics.tensors.impl.api as physx -from isaacsim.core.simulation_manager import SimulationManager -from pxr import PhysxSchema, UsdPhysics +from isaaclab.utils.backend_utils import FactoryBase -import isaaclab.sim as sim_utils -import isaaclab.utils.math as math_utils -import isaaclab.utils.string as string_utils -from isaaclab.actuators import ActuatorBase, ActuatorBaseCfg, ImplicitActuator -from isaaclab.utils.types import ArticulationActions -from isaaclab.utils.version import get_isaac_sim_version -from isaaclab.utils.wrench_composer import WrenchComposer - -from ..asset_base import AssetBase -from .articulation_data import ArticulationData +from .base_articulation import BaseArticulation +from .base_articulation_data import BaseArticulationData if TYPE_CHECKING: - from .articulation_cfg import ArticulationCfg - -# import logger -logger = logging.getLogger(__name__) - - -class Articulation(AssetBase): - """An articulation asset class. - - An articulation is a collection of rigid bodies connected by joints. The joints can be either - fixed or actuated. The joints can be of different types, such as revolute, prismatic, D-6, etc. - However, the articulation class has currently been tested with revolute and prismatic joints. - The class supports both floating-base and fixed-base articulations. The type of articulation - is determined based on the root joint of the articulation. If the root joint is fixed, then - the articulation is considered a fixed-base system. Otherwise, it is considered a floating-base - system. This can be checked using the :attr:`Articulation.is_fixed_base` attribute. - - For an asset to be considered an articulation, the root prim of the asset must have the - `USD ArticulationRootAPI`_. This API is used to define the sub-tree of the articulation using - the reduced coordinate formulation. On playing the simulation, the physics engine parses the - articulation root prim and creates the corresponding articulation in the physics engine. The - articulation root prim can be specified using the :attr:`AssetBaseCfg.prim_path` attribute. - - The articulation class also provides the functionality to augment the simulation of an articulated - system with custom actuator models. These models can either be explicit or implicit, as detailed in - the :mod:`isaaclab.actuators` module. The actuator models are specified using the - :attr:`ArticulationCfg.actuators` attribute. These are then parsed and used to initialize the - corresponding actuator models, when the simulation is played. - - During the simulation step, the articulation class first applies the actuator models to compute - the joint commands based on the user-specified targets. These joint commands are then applied - into the simulation. The joint commands can be either position, velocity, or effort commands. - As an example, the following snippet shows how this can be used for position commands: - - .. code-block:: python - - # an example instance of the articulation class - my_articulation = Articulation(cfg) - - # set joint position targets - my_articulation.set_joint_position_target(position) - # propagate the actuator models and apply the computed commands into the simulation - my_articulation.write_data_to_sim() - - # step the simulation using the simulation context - sim_context.step() - - # update the articulation state, where dt is the simulation time step - my_articulation.update(dt) - - .. _`USD ArticulationRootAPI`: https://openusd.org/dev/api/class_usd_physics_articulation_root_a_p_i.html - - """ - - cfg: ArticulationCfg - """Configuration instance for the articulations.""" - - actuators: dict[str, ActuatorBase] - """Dictionary of actuator instances for the articulation. - - The keys are the actuator names and the values are the actuator instances. The actuator instances - are initialized based on the actuator configurations specified in the :attr:`ArticulationCfg.actuators` - attribute. They are used to compute the joint commands during the :meth:`write_data_to_sim` function. - """ - - def __init__(self, cfg: ArticulationCfg): - """Initialize the articulation. - - Args: - cfg: A configuration instance. - """ - super().__init__(cfg) - - """ - Properties - """ - - @property - def data(self) -> ArticulationData: - return self._data - - @property - def num_instances(self) -> int: - return self.root_physx_view.count - - @property - def is_fixed_base(self) -> bool: - """Whether the articulation is a fixed-base or floating-base system.""" - return self.root_physx_view.shared_metatype.fixed_base - - @property - def num_joints(self) -> int: - """Number of joints in articulation.""" - return self.root_physx_view.shared_metatype.dof_count - - @property - def num_fixed_tendons(self) -> int: - """Number of fixed tendons in articulation.""" - return self.root_physx_view.max_fixed_tendons - - @property - def num_spatial_tendons(self) -> int: - """Number of spatial tendons in articulation.""" - return self.root_physx_view.max_spatial_tendons - - @property - def num_bodies(self) -> int: - """Number of bodies in articulation.""" - return self.root_physx_view.shared_metatype.link_count - - @property - def joint_names(self) -> list[str]: - """Ordered names of joints in articulation.""" - return self.root_physx_view.shared_metatype.dof_names - - @property - def fixed_tendon_names(self) -> list[str]: - """Ordered names of fixed tendons in articulation.""" - return self._fixed_tendon_names - - @property - def spatial_tendon_names(self) -> list[str]: - """Ordered names of spatial tendons in articulation.""" - return self._spatial_tendon_names - - @property - def body_names(self) -> list[str]: - """Ordered names of bodies in articulation.""" - return self.root_physx_view.shared_metatype.link_names - - @property - def root_physx_view(self) -> physx.ArticulationView: - """Articulation view for the asset (PhysX). - - Note: - Use this view with caution. It requires handling of tensors in a specific way. - """ - return self._root_physx_view - - @property - def instantaneous_wrench_composer(self) -> WrenchComposer: - """Instantaneous wrench composer. - - Returns a :class:`~isaaclab.utils.wrench_composer.WrenchComposer` instance. Wrenches added or set to this wrench - composer are only valid for the current simulation step. At the end of the simulation step, the wrenches set - to this object are discarded. This is useful to apply forces that change all the time, things like drag forces - for instance. - - Note: - Permanent wrenches are composed into the instantaneous wrench before the instantaneous wrenches are - applied to the simulation. - """ - return self._instantaneous_wrench_composer - - @property - def permanent_wrench_composer(self) -> WrenchComposer: - """Permanent wrench composer. - - Returns a :class:`~isaaclab.utils.wrench_composer.WrenchComposer` instance. Wrenches added or set to this wrench - composer are persistent and are applied to the simulation at every step. This is useful to apply forces that - are constant over a period of time, things like the thrust of a motor for instance. - - Note: - Permanent wrenches are composed into the instantaneous wrench before the instantaneous wrenches are - applied to the simulation. - """ - return self._permanent_wrench_composer - - """ - Operations. - """ - - def reset(self, env_ids: Sequence[int] | None = None): - # use ellipses object to skip initial indices. - if env_ids is None: - env_ids = slice(None) - # reset actuators - for actuator in self.actuators.values(): - actuator.reset(env_ids) - # reset external wrenches. - self._instantaneous_wrench_composer.reset(env_ids) - self._permanent_wrench_composer.reset(env_ids) - - def write_data_to_sim(self): - """Write external wrenches and joint commands to the simulation. - - If any explicit actuators are present, then the actuator models are used to compute the - joint commands. Otherwise, the joint commands are directly set into the simulation. - - Note: - We write external wrench to the simulation here since this function is called before the simulation step. - This ensures that the external wrench is applied at every simulation step. - """ - # write external wrench - if self._instantaneous_wrench_composer.active or self._permanent_wrench_composer.active: - if self._instantaneous_wrench_composer.active: - # Compose instantaneous wrench with permanent wrench - self._instantaneous_wrench_composer.add_forces_and_torques( - forces=self._permanent_wrench_composer.composed_force, - torques=self._permanent_wrench_composer.composed_torque, - body_ids=self._ALL_BODY_INDICES_WP, - env_ids=self._ALL_INDICES_WP, - ) - # Apply both instantaneous and permanent wrench to the simulation - self.root_physx_view.apply_forces_and_torques_at_position( - force_data=self._instantaneous_wrench_composer.composed_force_as_torch.view(-1, 3), - torque_data=self._instantaneous_wrench_composer.composed_torque_as_torch.view(-1, 3), - position_data=None, - indices=self._ALL_INDICES, - is_global=False, - ) - else: - # Apply permanent wrench to the simulation - self.root_physx_view.apply_forces_and_torques_at_position( - force_data=self._permanent_wrench_composer.composed_force_as_torch.view(-1, 3), - torque_data=self._permanent_wrench_composer.composed_torque_as_torch.view(-1, 3), - position_data=None, - indices=self._ALL_INDICES, - is_global=False, - ) - self._instantaneous_wrench_composer.reset() - - # apply actuator models - self._apply_actuator_model() - # write actions into simulation - self.root_physx_view.set_dof_actuation_forces(self._joint_effort_target_sim, self._ALL_INDICES) - # position and velocity targets only for implicit actuators - if self._has_implicit_actuators: - self.root_physx_view.set_dof_position_targets(self._joint_pos_target_sim, self._ALL_INDICES) - self.root_physx_view.set_dof_velocity_targets(self._joint_vel_target_sim, self._ALL_INDICES) - - def update(self, dt: float): - self._data.update(dt) - - """ - Operations - Finders. - """ - - def find_bodies(self, name_keys: str | Sequence[str], preserve_order: bool = False) -> tuple[list[int], list[str]]: - """Find bodies in the articulation based on the name keys. - - Please check the :meth:`isaaclab.utils.string_utils.resolve_matching_names` function for more - information on the name matching. - - Args: - name_keys: A regular expression or a list of regular expressions to match the body names. - preserve_order: Whether to preserve the order of the name keys in the output. Defaults to False. - - Returns: - A tuple of lists containing the body indices and names. - """ - return string_utils.resolve_matching_names(name_keys, self.body_names, preserve_order) - - def find_joints( - self, name_keys: str | Sequence[str], joint_subset: list[str] | None = None, preserve_order: bool = False - ) -> tuple[list[int], list[str]]: - """Find joints in the articulation based on the name keys. - - Please see the :func:`isaaclab.utils.string.resolve_matching_names` function for more information - on the name matching. - - Args: - name_keys: A regular expression or a list of regular expressions to match the joint names. - joint_subset: A subset of joints to search for. Defaults to None, which means all joints - in the articulation are searched. - preserve_order: Whether to preserve the order of the name keys in the output. Defaults to False. - - Returns: - A tuple of lists containing the joint indices and names. - """ - if joint_subset is None: - joint_subset = self.joint_names - # find joints - return string_utils.resolve_matching_names(name_keys, joint_subset, preserve_order) - - def find_fixed_tendons( - self, name_keys: str | Sequence[str], tendon_subsets: list[str] | None = None, preserve_order: bool = False - ) -> tuple[list[int], list[str]]: - """Find fixed tendons in the articulation based on the name keys. - - Please see the :func:`isaaclab.utils.string.resolve_matching_names` function for more information - on the name matching. - - Args: - name_keys: A regular expression or a list of regular expressions to match the joint - names with fixed tendons. - tendon_subsets: A subset of joints with fixed tendons to search for. Defaults to None, which means - all joints in the articulation are searched. - preserve_order: Whether to preserve the order of the name keys in the output. Defaults to False. - - Returns: - A tuple of lists containing the tendon indices and names. - """ - if tendon_subsets is None: - # tendons follow the joint names they are attached to - tendon_subsets = self.fixed_tendon_names - # find tendons - return string_utils.resolve_matching_names(name_keys, tendon_subsets, preserve_order) - - def find_spatial_tendons( - self, name_keys: str | Sequence[str], tendon_subsets: list[str] | None = None, preserve_order: bool = False - ) -> tuple[list[int], list[str]]: - """Find spatial tendons in the articulation based on the name keys. - - Please see the :func:`isaaclab.utils.string.resolve_matching_names` function for more information - on the name matching. - - Args: - name_keys: A regular expression or a list of regular expressions to match the tendon names. - tendon_subsets: A subset of tendons to search for. Defaults to None, which means all tendons - in the articulation are searched. - preserve_order: Whether to preserve the order of the name keys in the output. Defaults to False. - - Returns: - A tuple of lists containing the tendon indices and names. - """ - if tendon_subsets is None: - tendon_subsets = self.spatial_tendon_names - # find tendons - return string_utils.resolve_matching_names(name_keys, tendon_subsets, preserve_order) - - """ - Operations - State Writers. - """ - - def write_root_state_to_sim(self, root_state: torch.Tensor, env_ids: Sequence[int] | None = None): - """Set the root state over selected environment indices into the simulation. - - The root state comprises of the cartesian position, quaternion orientation in (w, x, y, z), and linear - and angular velocity. All the quantities are in the simulation frame. - - Args: - root_state: Root state in simulation frame. Shape is (len(env_ids), 13). - env_ids: Environment indices. If None, then all indices are used. - """ - self.write_root_link_pose_to_sim(root_state[:, :7], env_ids=env_ids) - self.write_root_com_velocity_to_sim(root_state[:, 7:], env_ids=env_ids) - - def write_root_com_state_to_sim(self, root_state: torch.Tensor, env_ids: Sequence[int] | None = None): - """Set the root center of mass state over selected environment indices into the simulation. - - The root state comprises of the cartesian position, quaternion orientation in (w, x, y, z), and linear - and angular velocity. All the quantities are in the simulation frame. - - Args: - root_state: Root state in simulation frame. Shape is (len(env_ids), 13). - env_ids: Environment indices. If None, then all indices are used. - """ - self.write_root_com_pose_to_sim(root_state[:, :7], env_ids=env_ids) - self.write_root_com_velocity_to_sim(root_state[:, 7:], env_ids=env_ids) - - def write_root_link_state_to_sim(self, root_state: torch.Tensor, env_ids: Sequence[int] | None = None): - """Set the root link state over selected environment indices into the simulation. - - The root state comprises of the cartesian position, quaternion orientation in (w, x, y, z), and linear - and angular velocity. All the quantities are in the simulation frame. - - Args: - root_state: Root state in simulation frame. Shape is (len(env_ids), 13). - env_ids: Environment indices. If None, then all indices are used. - """ - self.write_root_link_pose_to_sim(root_state[:, :7], env_ids=env_ids) - self.write_root_link_velocity_to_sim(root_state[:, 7:], env_ids=env_ids) - - def write_root_pose_to_sim(self, root_pose: torch.Tensor, env_ids: Sequence[int] | None = None): - """Set the root pose over selected environment indices into the simulation. - - The root pose comprises of the cartesian position and quaternion orientation in (w, x, y, z). - - Args: - root_pose: Root poses in simulation frame. Shape is (len(env_ids), 7). - env_ids: Environment indices. If None, then all indices are used. - """ - self.write_root_link_pose_to_sim(root_pose, env_ids=env_ids) - - def write_root_link_pose_to_sim(self, root_pose: torch.Tensor, env_ids: Sequence[int] | None = None): - """Set the root link pose over selected environment indices into the simulation. - - The root pose comprises of the cartesian position and quaternion orientation in (w, x, y, z). - - Args: - root_pose: Root poses in simulation frame. Shape is (len(env_ids), 7). - env_ids: Environment indices. If None, then all indices are used. - """ - # resolve all indices - physx_env_ids = env_ids - if env_ids is None: - env_ids = slice(None) - physx_env_ids = self._ALL_INDICES - - # note: we need to do this here since tensors are not set into simulation until step. - # set into internal buffers - self._data.root_link_pose_w[env_ids] = root_pose.clone() - # update these buffers only if the user is using them. Otherwise this adds to overhead. - if self._data._root_link_state_w.data is not None: - self._data.root_link_state_w[env_ids, :7] = self._data.root_link_pose_w[env_ids] - if self._data._root_state_w.data is not None: - self._data.root_state_w[env_ids, :7] = self._data.root_link_pose_w[env_ids] - - # convert root quaternion from wxyz to xyzw - root_poses_xyzw = self._data.root_link_pose_w.clone() - root_poses_xyzw[:, 3:] = math_utils.convert_quat(root_poses_xyzw[:, 3:], to="xyzw") - - # Need to invalidate the buffer to trigger the update with the new state. - self._data._body_link_pose_w.timestamp = -1.0 - self._data._body_com_pose_w.timestamp = -1.0 - self._data._body_state_w.timestamp = -1.0 - self._data._body_link_state_w.timestamp = -1.0 - self._data._body_com_state_w.timestamp = -1.0 - - # set into simulation - self.root_physx_view.set_root_transforms(root_poses_xyzw, indices=physx_env_ids) - - def write_root_com_pose_to_sim(self, root_pose: torch.Tensor, env_ids: Sequence[int] | None = None): - """Set the root center of mass pose over selected environment indices into the simulation. - - The root pose comprises of the cartesian position and quaternion orientation in (w, x, y, z). - The orientation is the orientation of the principle axes of inertia. - - Args: - root_pose: Root center of mass poses in simulation frame. Shape is (len(env_ids), 7). - env_ids: Environment indices. If None, then all indices are used. - """ - # resolve all indices - if env_ids is None: - local_env_ids = slice(env_ids) - else: - local_env_ids = env_ids - - # set into internal buffers - self._data.root_com_pose_w[local_env_ids] = root_pose.clone() - # update these buffers only if the user is using them. Otherwise this adds to overhead. - if self._data._root_com_state_w.data is not None: - self._data.root_com_state_w[local_env_ids, :7] = self._data.root_com_pose_w[local_env_ids] - - # get CoM pose in link frame - com_pos_b = self.data.body_com_pos_b[local_env_ids, 0, :] - com_quat_b = self.data.body_com_quat_b[local_env_ids, 0, :] - # transform input CoM pose to link frame - root_link_pos, root_link_quat = math_utils.combine_frame_transforms( - root_pose[..., :3], - root_pose[..., 3:7], - math_utils.quat_apply(math_utils.quat_inv(com_quat_b), -com_pos_b), - math_utils.quat_inv(com_quat_b), - ) - root_link_pose = torch.cat((root_link_pos, root_link_quat), dim=-1) - - # write transformed pose in link frame to sim - self.write_root_link_pose_to_sim(root_pose=root_link_pose, env_ids=env_ids) - - def write_root_velocity_to_sim(self, root_velocity: torch.Tensor, env_ids: Sequence[int] | None = None): - """Set the root center of mass velocity over selected environment indices into the simulation. - - The velocity comprises linear velocity (x, y, z) and angular velocity (x, y, z) in that order. - NOTE: This sets the velocity of the root's center of mass rather than the roots frame. - - Args: - root_velocity: Root center of mass velocities in simulation world frame. Shape is (len(env_ids), 6). - env_ids: Environment indices. If None, then all indices are used. - """ - self.write_root_com_velocity_to_sim(root_velocity=root_velocity, env_ids=env_ids) - - def write_root_com_velocity_to_sim(self, root_velocity: torch.Tensor, env_ids: Sequence[int] | None = None): - """Set the root center of mass velocity over selected environment indices into the simulation. - - The velocity comprises linear velocity (x, y, z) and angular velocity (x, y, z) in that order. - NOTE: This sets the velocity of the root's center of mass rather than the roots frame. - - Args: - root_velocity: Root center of mass velocities in simulation world frame. Shape is (len(env_ids), 6). - env_ids: Environment indices. If None, then all indices are used. - """ - # resolve all indices - physx_env_ids = env_ids - if env_ids is None: - env_ids = slice(None) - physx_env_ids = self._ALL_INDICES - - # note: we need to do this here since tensors are not set into simulation until step. - # set into internal buffers - self._data.root_com_vel_w[env_ids] = root_velocity.clone() - # update these buffers only if the user is using them. Otherwise this adds to overhead. - if self._data._root_com_state_w.data is not None: - self._data.root_com_state_w[env_ids, 7:] = self._data.root_com_vel_w[env_ids] - if self._data._root_state_w.data is not None: - self._data.root_state_w[env_ids, 7:] = self._data.root_com_vel_w[env_ids] - # make the acceleration zero to prevent reporting old values - self._data.body_acc_w[env_ids] = 0.0 - - # set into simulation - self.root_physx_view.set_root_velocities(self._data.root_com_vel_w, indices=physx_env_ids) - - def write_root_link_velocity_to_sim(self, root_velocity: torch.Tensor, env_ids: Sequence[int] | None = None): - """Set the root link velocity over selected environment indices into the simulation. - - The velocity comprises linear velocity (x, y, z) and angular velocity (x, y, z) in that order. - NOTE: This sets the velocity of the root's frame rather than the roots center of mass. - - Args: - root_velocity: Root frame velocities in simulation world frame. Shape is (len(env_ids), 6). - env_ids: Environment indices. If None, then all indices are used. - """ - # resolve all indices - if env_ids is None: - local_env_ids = slice(env_ids) - else: - local_env_ids = env_ids - - # set into internal buffers - self._data.root_link_vel_w[local_env_ids] = root_velocity.clone() - # update these buffers only if the user is using them. Otherwise this adds to overhead. - if self._data._root_link_state_w.data is not None: - self._data.root_link_state_w[local_env_ids, 7:] = self._data.root_link_vel_w[local_env_ids] - - # get CoM pose in link frame - quat = self.data.root_link_quat_w[local_env_ids] - com_pos_b = self.data.body_com_pos_b[local_env_ids, 0, :] - # transform input velocity to center of mass frame - root_com_velocity = root_velocity.clone() - root_com_velocity[:, :3] += torch.linalg.cross( - root_com_velocity[:, 3:], math_utils.quat_apply(quat, com_pos_b), dim=-1 - ) - - # write transformed velocity in CoM frame to sim - self.write_root_com_velocity_to_sim(root_velocity=root_com_velocity, env_ids=env_ids) - - def write_joint_state_to_sim( - self, - position: torch.Tensor, - velocity: torch.Tensor, - joint_ids: Sequence[int] | slice | None = None, - env_ids: Sequence[int] | slice | None = None, - ): - """Write joint positions and velocities to the simulation. - - Args: - position: Joint positions. Shape is (len(env_ids), len(joint_ids)). - velocity: Joint velocities. Shape is (len(env_ids), len(joint_ids)). - joint_ids: The joint indices to set the targets for. Defaults to None (all joints). - env_ids: The environment indices to set the targets for. Defaults to None (all environments). - """ - # set into simulation - self.write_joint_position_to_sim(position, joint_ids=joint_ids, env_ids=env_ids) - self.write_joint_velocity_to_sim(velocity, joint_ids=joint_ids, env_ids=env_ids) - - def write_joint_position_to_sim( - self, - position: torch.Tensor, - joint_ids: Sequence[int] | slice | None = None, - env_ids: Sequence[int] | slice | None = None, - ): - """Write joint positions to the simulation. - - Args: - position: Joint positions. Shape is (len(env_ids), len(joint_ids)). - joint_ids: The joint indices to set the targets for. Defaults to None (all joints). - env_ids: The environment indices to set the targets for. Defaults to None (all environments). - """ - # resolve indices - physx_env_ids = env_ids - if env_ids is None: - env_ids = slice(None) - physx_env_ids = self._ALL_INDICES - if joint_ids is None: - joint_ids = slice(None) - # broadcast env_ids if needed to allow double indexing - if env_ids != slice(None) and joint_ids != slice(None): - env_ids = env_ids[:, None] - # set into internal buffers - self._data.joint_pos[env_ids, joint_ids] = position - # Need to invalidate the buffer to trigger the update with the new root pose. - self._data._body_com_vel_w.timestamp = -1.0 - self._data._body_link_vel_w.timestamp = -1.0 - self._data._body_com_pose_b.timestamp = -1.0 - self._data._body_com_pose_w.timestamp = -1.0 - self._data._body_link_pose_w.timestamp = -1.0 - - self._data._body_state_w.timestamp = -1.0 - self._data._body_link_state_w.timestamp = -1.0 - self._data._body_com_state_w.timestamp = -1.0 - # set into simulation - self.root_physx_view.set_dof_positions(self._data.joint_pos, indices=physx_env_ids) - - def write_joint_velocity_to_sim( - self, - velocity: torch.Tensor, - joint_ids: Sequence[int] | slice | None = None, - env_ids: Sequence[int] | slice | None = None, - ): - """Write joint velocities to the simulation. - - Args: - velocity: Joint velocities. Shape is (len(env_ids), len(joint_ids)). - joint_ids: The joint indices to set the targets for. Defaults to None (all joints). - env_ids: The environment indices to set the targets for. Defaults to None (all environments). - """ - # resolve indices - physx_env_ids = env_ids - if env_ids is None: - env_ids = slice(None) - physx_env_ids = self._ALL_INDICES - if joint_ids is None: - joint_ids = slice(None) - # broadcast env_ids if needed to allow double indexing - if env_ids != slice(None) and joint_ids != slice(None): - env_ids = env_ids[:, None] - # set into internal buffers - self._data.joint_vel[env_ids, joint_ids] = velocity - self._data._previous_joint_vel[env_ids, joint_ids] = velocity - self._data.joint_acc[env_ids, joint_ids] = 0.0 - # set into simulation - self.root_physx_view.set_dof_velocities(self._data.joint_vel, indices=physx_env_ids) - - """ - Operations - Simulation Parameters Writers. - """ - - def write_joint_stiffness_to_sim( - self, - stiffness: torch.Tensor | float, - joint_ids: Sequence[int] | slice | None = None, - env_ids: Sequence[int] | None = None, - ): - """Write joint stiffness into the simulation. - - Args: - stiffness: Joint stiffness. Shape is (len(env_ids), len(joint_ids)). - joint_ids: The joint indices to set the stiffness for. Defaults to None (all joints). - env_ids: The environment indices to set the stiffness for. Defaults to None (all environments). - """ - # note: This function isn't setting the values for actuator models. (#128) - # resolve indices - physx_env_ids = env_ids - if env_ids is None: - env_ids = slice(None) - physx_env_ids = self._ALL_INDICES - if joint_ids is None: - joint_ids = slice(None) - # broadcast env_ids if needed to allow double indexing - if env_ids != slice(None) and joint_ids != slice(None): - env_ids = env_ids[:, None] - # set into internal buffers - self._data.joint_stiffness[env_ids, joint_ids] = stiffness - # set into simulation - self.root_physx_view.set_dof_stiffnesses(self._data.joint_stiffness.cpu(), indices=physx_env_ids.cpu()) - - def write_joint_damping_to_sim( - self, - damping: torch.Tensor | float, - joint_ids: Sequence[int] | slice | None = None, - env_ids: Sequence[int] | None = None, - ): - """Write joint damping into the simulation. - - Args: - damping: Joint damping. Shape is (len(env_ids), len(joint_ids)). - joint_ids: The joint indices to set the damping for. Defaults to None (all joints). - env_ids: The environment indices to set the damping for. Defaults to None (all environments). - """ - # note: This function isn't setting the values for actuator models. (#128) - # resolve indices - physx_env_ids = env_ids - if env_ids is None: - env_ids = slice(None) - physx_env_ids = self._ALL_INDICES - if joint_ids is None: - joint_ids = slice(None) - # broadcast env_ids if needed to allow double indexing - if env_ids != slice(None) and joint_ids != slice(None): - env_ids = env_ids[:, None] - # set into internal buffers - self._data.joint_damping[env_ids, joint_ids] = damping - # set into simulation - self.root_physx_view.set_dof_dampings(self._data.joint_damping.cpu(), indices=physx_env_ids.cpu()) - - def write_joint_position_limit_to_sim( - self, - limits: torch.Tensor | float, - joint_ids: Sequence[int] | slice | None = None, - env_ids: Sequence[int] | None = None, - warn_limit_violation: bool = True, - ): - """Write joint position limits into the simulation. - - Args: - limits: Joint limits. Shape is (len(env_ids), len(joint_ids), 2). - joint_ids: The joint indices to set the limits for. Defaults to None (all joints). - env_ids: The environment indices to set the limits for. Defaults to None (all environments). - warn_limit_violation: Whether to use warning or info level logging when default joint positions - exceed the new limits. Defaults to True. - """ - # note: This function isn't setting the values for actuator models. (#128) - # resolve indices - physx_env_ids = env_ids - if env_ids is None: - env_ids = slice(None) - physx_env_ids = self._ALL_INDICES - if joint_ids is None: - joint_ids = slice(None) - # broadcast env_ids if needed to allow double indexing - if env_ids != slice(None) and joint_ids != slice(None): - env_ids = env_ids[:, None] - # set into internal buffers - self._data.joint_pos_limits[env_ids, joint_ids] = limits - # update default joint pos to stay within the new limits - if torch.any( - (self._data.default_joint_pos[env_ids, joint_ids] < limits[..., 0]) - | (self._data.default_joint_pos[env_ids, joint_ids] > limits[..., 1]) - ): - self._data.default_joint_pos[env_ids, joint_ids] = torch.clamp( - self._data.default_joint_pos[env_ids, joint_ids], limits[..., 0], limits[..., 1] - ) - violation_message = ( - "Some default joint positions are outside of the range of the new joint limits. Default joint positions" - " will be clamped to be within the new joint limits." - ) - if warn_limit_violation: - # warn level will show in console - logger.warning(violation_message) - else: - # info level is only written to log file - logger.info(violation_message) - # set into simulation - self.root_physx_view.set_dof_limits(self._data.joint_pos_limits.cpu(), indices=physx_env_ids.cpu()) - - # compute the soft limits based on the joint limits - # TODO: Optimize this computation for only selected joints - # soft joint position limits (recommended not to be too close to limits). - joint_pos_mean = (self._data.joint_pos_limits[..., 0] + self._data.joint_pos_limits[..., 1]) / 2 - joint_pos_range = self._data.joint_pos_limits[..., 1] - self._data.joint_pos_limits[..., 0] - soft_limit_factor = self.cfg.soft_joint_pos_limit_factor - # add to data - self._data.soft_joint_pos_limits[..., 0] = joint_pos_mean - 0.5 * joint_pos_range * soft_limit_factor - self._data.soft_joint_pos_limits[..., 1] = joint_pos_mean + 0.5 * joint_pos_range * soft_limit_factor - - def write_joint_velocity_limit_to_sim( - self, - limits: torch.Tensor | float, - joint_ids: Sequence[int] | slice | None = None, - env_ids: Sequence[int] | None = None, - ): - """Write joint max velocity to the simulation. - - The velocity limit is used to constrain the joint velocities in the physics engine. The joint will only - be able to reach this velocity if the joint's effort limit is sufficiently large. If the joint is moving - faster than this velocity, the physics engine will actually try to brake the joint to reach this velocity. - - Args: - limits: Joint max velocity. Shape is (len(env_ids), len(joint_ids)). - joint_ids: The joint indices to set the max velocity for. Defaults to None (all joints). - env_ids: The environment indices to set the max velocity for. Defaults to None (all environments). - """ - # resolve indices - physx_env_ids = env_ids - if env_ids is None: - env_ids = slice(None) - physx_env_ids = self._ALL_INDICES - if joint_ids is None: - joint_ids = slice(None) - # broadcast env_ids if needed to allow double indexing - if env_ids != slice(None) and joint_ids != slice(None): - env_ids = env_ids[:, None] - # move tensor to cpu if needed - if isinstance(limits, torch.Tensor): - limits = limits.to(self.device) - # set into internal buffers - self._data.joint_vel_limits[env_ids, joint_ids] = limits - # set into simulation - self.root_physx_view.set_dof_max_velocities(self._data.joint_vel_limits.cpu(), indices=physx_env_ids.cpu()) - - def write_joint_effort_limit_to_sim( - self, - limits: torch.Tensor | float, - joint_ids: Sequence[int] | slice | None = None, - env_ids: Sequence[int] | None = None, - ): - """Write joint effort limits into the simulation. - - The effort limit is used to constrain the computed joint efforts in the physics engine. If the - computed effort exceeds this limit, the physics engine will clip the effort to this value. - - Args: - limits: Joint torque limits. Shape is (len(env_ids), len(joint_ids)). - joint_ids: The joint indices to set the joint torque limits for. Defaults to None (all joints). - env_ids: The environment indices to set the joint torque limits for. Defaults to None (all environments). - """ - # note: This function isn't setting the values for actuator models. (#128) - # resolve indices - physx_env_ids = env_ids - if env_ids is None: - env_ids = slice(None) - physx_env_ids = self._ALL_INDICES - if joint_ids is None: - joint_ids = slice(None) - # broadcast env_ids if needed to allow double indexing - if env_ids != slice(None) and joint_ids != slice(None): - env_ids = env_ids[:, None] - # move tensor to cpu if needed - if isinstance(limits, torch.Tensor): - limits = limits.to(self.device) - # set into internal buffers - self._data.joint_effort_limits[env_ids, joint_ids] = limits - # set into simulation - self.root_physx_view.set_dof_max_forces(self._data.joint_effort_limits.cpu(), indices=physx_env_ids.cpu()) - - def write_joint_armature_to_sim( - self, - armature: torch.Tensor | float, - joint_ids: Sequence[int] | slice | None = None, - env_ids: Sequence[int] | None = None, - ): - """Write joint armature into the simulation. - - The armature is directly added to the corresponding joint-space inertia. It helps improve the - simulation stability by reducing the joint velocities. - - Args: - armature: Joint armature. Shape is (len(env_ids), len(joint_ids)). - joint_ids: The joint indices to set the joint torque limits for. Defaults to None (all joints). - env_ids: The environment indices to set the joint torque limits for. Defaults to None (all environments). - """ - # resolve indices - physx_env_ids = env_ids - if env_ids is None: - env_ids = slice(None) - physx_env_ids = self._ALL_INDICES - if joint_ids is None: - joint_ids = slice(None) - # broadcast env_ids if needed to allow double indexing - if env_ids != slice(None) and joint_ids != slice(None): - env_ids = env_ids[:, None] - # set into internal buffers - self._data.joint_armature[env_ids, joint_ids] = armature - # set into simulation - self.root_physx_view.set_dof_armatures(self._data.joint_armature.cpu(), indices=physx_env_ids.cpu()) - - def write_joint_friction_coefficient_to_sim( - self, - joint_friction_coeff: torch.Tensor | float, - joint_dynamic_friction_coeff: torch.Tensor | float | None = None, - joint_viscous_friction_coeff: torch.Tensor | float | None = None, - joint_ids: Sequence[int] | slice | None = None, - env_ids: Sequence[int] | None = None, - ): - r"""Write joint friction coefficients into the simulation. - - For Isaac Sim versions below 5.0, only the static friction coefficient is set. - This limits the resisting force or torque up to a maximum proportional to the transmitted - spatial force: :math:`\|F_{resist}\| \leq \mu_s \, \|F_{spatial}\|`. - - For Isaac Sim versions 5.0 and above, the static, dynamic, and viscous friction coefficients - are set. The model combines Coulomb (static & dynamic) friction with a viscous term: - - - Static friction :math:`\mu_s` defines the maximum effort that prevents motion at rest. - - Dynamic friction :math:`\mu_d` applies once motion begins and remains constant during motion. - - Viscous friction :math:`c_v` is a velocity-proportional resistive term. - - Args: - joint_friction_coeff: Static friction coefficient :math:`\mu_s`. - Shape is (len(env_ids), len(joint_ids)). Scalars are broadcast to all selections. - joint_dynamic_friction_coeff: Dynamic (Coulomb) friction coefficient :math:`\mu_d`. - Same shape as above. If None, the dynamic coefficient is not updated. - joint_viscous_friction_coeff: Viscous friction coefficient :math:`c_v`. - Same shape as above. If None, the viscous coefficient is not updated. - joint_ids: The joint indices to set the friction coefficients for. Defaults to None (all joints). - env_ids: The environment indices to set the friction coefficients for. Defaults to None (all environments). - """ - # resolve indices - physx_env_ids = env_ids - if env_ids is None: - env_ids = slice(None) - physx_env_ids = self._ALL_INDICES - if joint_ids is None: - joint_ids = slice(None) - # broadcast env_ids if needed to allow double indexing - if env_ids != slice(None) and joint_ids != slice(None): - env_ids = env_ids[:, None] - # set into internal buffers - self._data.joint_friction_coeff[env_ids, joint_ids] = joint_friction_coeff - - # if dynamic or viscous friction coeffs are provided, set them too - if joint_dynamic_friction_coeff is not None: - self._data.joint_dynamic_friction_coeff[env_ids, joint_ids] = joint_dynamic_friction_coeff - if joint_viscous_friction_coeff is not None: - self._data.joint_viscous_friction_coeff[env_ids, joint_ids] = joint_viscous_friction_coeff - - # move the indices to cpu - physx_envs_ids_cpu = physx_env_ids.cpu() - - # set into simulation - if get_isaac_sim_version().major < 5: - self.root_physx_view.set_dof_friction_coefficients( - self._data.joint_friction_coeff.cpu(), indices=physx_envs_ids_cpu - ) - else: - friction_props = self.root_physx_view.get_dof_friction_properties() - friction_props[physx_envs_ids_cpu, :, 0] = self._data.joint_friction_coeff[physx_envs_ids_cpu, :].cpu() - - # only set dynamic and viscous friction if provided - if joint_dynamic_friction_coeff is not None: - friction_props[physx_envs_ids_cpu, :, 1] = self._data.joint_dynamic_friction_coeff[ - physx_envs_ids_cpu, : - ].cpu() - - # only set viscous friction if provided - if joint_viscous_friction_coeff is not None: - friction_props[physx_envs_ids_cpu, :, 2] = self._data.joint_viscous_friction_coeff[ - physx_envs_ids_cpu, : - ].cpu() - - self.root_physx_view.set_dof_friction_properties(friction_props, indices=physx_envs_ids_cpu) - - def write_joint_dynamic_friction_coefficient_to_sim( - self, - joint_dynamic_friction_coeff: torch.Tensor | float, - joint_ids: Sequence[int] | slice | None = None, - env_ids: Sequence[int] | None = None, - ): - if get_isaac_sim_version().major < 5: - logger.warning("Setting joint dynamic friction coefficients are not supported in Isaac Sim < 5.0") - return - # resolve indices - physx_env_ids = env_ids - if env_ids is None: - env_ids = slice(None) - physx_env_ids = self._ALL_INDICES - if joint_ids is None: - joint_ids = slice(None) - # broadcast env_ids if needed to allow double indexing - if env_ids != slice(None) and joint_ids != slice(None): - env_ids = env_ids[:, None] - # set into internal buffers - self._data.joint_dynamic_friction_coeff[env_ids, joint_ids] = joint_dynamic_friction_coeff - # set into simulation - friction_props = self.root_physx_view.get_dof_friction_properties() - friction_props[physx_env_ids.cpu(), :, 1] = self._data.joint_dynamic_friction_coeff[physx_env_ids, :].cpu() - self.root_physx_view.set_dof_friction_properties(friction_props, indices=physx_env_ids.cpu()) - - def write_joint_viscous_friction_coefficient_to_sim( - self, - joint_viscous_friction_coeff: torch.Tensor | float, - joint_ids: Sequence[int] | slice | None = None, - env_ids: Sequence[int] | None = None, - ): - if get_isaac_sim_version().major < 5: - logger.warning("Setting joint viscous friction coefficients are not supported in Isaac Sim < 5.0") - return - # resolve indices - physx_env_ids = env_ids - if env_ids is None: - env_ids = slice(None) - physx_env_ids = self._ALL_INDICES - if joint_ids is None: - joint_ids = slice(None) - # broadcast env_ids if needed to allow double indexing - if env_ids != slice(None) and joint_ids != slice(None): - env_ids = env_ids[:, None] - # set into internal buffers - self._data.joint_viscous_friction_coeff[env_ids, joint_ids] = joint_viscous_friction_coeff - # set into simulation - friction_props = self.root_physx_view.get_dof_friction_properties() - friction_props[physx_env_ids.cpu(), :, 2] = self._data.joint_viscous_friction_coeff[physx_env_ids, :].cpu() - self.root_physx_view.set_dof_friction_properties(friction_props, indices=physx_env_ids.cpu()) - - """ - Operations - Setters. - """ - - def set_external_force_and_torque( - self, - forces: torch.Tensor, - torques: torch.Tensor, - positions: torch.Tensor | None = None, - body_ids: Sequence[int] | slice | None = None, - env_ids: Sequence[int] | None = None, - is_global: bool = False, - ): - """Set external force and torque to apply on the asset's bodies in their local frame. - - For many applications, we want to keep the applied external force on rigid bodies constant over a period of - time (for instance, during the policy control). This function allows us to store the external force and torque - into buffers which are then applied to the simulation at every step. Optionally, set the position to apply the - external wrench at (in the local link frame of the bodies). - - .. caution:: - If the function is called with empty forces and torques, then this function disables the application - of external wrench to the simulation. - - .. code-block:: python - - # example of disabling external wrench - asset.set_external_force_and_torque(forces=torch.zeros(0, 3), torques=torch.zeros(0, 3)) - - .. note:: - This function does not apply the external wrench to the simulation. It only fills the buffers with - the desired values. To apply the external wrench, call the :meth:`write_data_to_sim` function - right before the simulation step. - - Args: - forces: External forces in bodies' local frame. Shape is (len(env_ids), len(body_ids), 3). - torques: External torques in bodies' local frame. Shape is (len(env_ids), len(body_ids), 3). - positions: Positions to apply external wrench. Shape is (len(env_ids), len(body_ids), 3). Defaults to None. - body_ids: Body indices to apply external wrench to. Defaults to None (all bodies). - env_ids: Environment indices to apply external wrench to. Defaults to None (all instances). - is_global: Whether to apply the external wrench in the global frame. Defaults to False. If set to False, - the external wrench is applied in the link frame of the articulations' bodies. - """ - logger.warning( - "The function 'set_external_force_and_torque' will be deprecated in a future release. Please" - " use 'permanent_wrench_composer.set_forces_and_torques' instead." - ) - if forces is None and torques is None: - logger.warning("No forces or torques provided. No permanent external wrench will be applied.") - - # resolve all indices - # -- env_ids - if env_ids is None: - env_ids = self._ALL_INDICES_WP - elif not isinstance(env_ids, torch.Tensor): - env_ids = wp.array(env_ids, dtype=wp.int32, device=self.device) - else: - env_ids = wp.from_torch(env_ids.to(torch.int32), dtype=wp.int32) - # -- body_ids - if body_ids is None: - body_ids = self._ALL_BODY_INDICES_WP - elif isinstance(body_ids, slice): - body_ids = wp.from_torch( - torch.arange(self.num_bodies, dtype=torch.int32, device=self.device)[body_ids], dtype=wp.int32 - ) - elif not isinstance(body_ids, torch.Tensor): - body_ids = wp.array(body_ids, dtype=wp.int32, device=self.device) - else: - body_ids = wp.from_torch(body_ids.to(torch.int32), dtype=wp.int32) - - # Write to wrench composer - self._permanent_wrench_composer.set_forces_and_torques( - forces=wp.from_torch(forces, dtype=wp.vec3f) if forces is not None else None, - torques=wp.from_torch(torques, dtype=wp.vec3f) if torques is not None else None, - positions=wp.from_torch(positions, dtype=wp.vec3f) if positions is not None else None, - body_ids=body_ids, - env_ids=env_ids, - is_global=is_global, - ) - - def set_joint_position_target( - self, target: torch.Tensor, joint_ids: Sequence[int] | slice | None = None, env_ids: Sequence[int] | None = None - ): - """Set joint position targets into internal buffers. - - This function does not apply the joint targets to the simulation. It only fills the buffers with - the desired values. To apply the joint targets, call the :meth:`write_data_to_sim` function. - - Args: - target: Joint position targets. Shape is (len(env_ids), len(joint_ids)). - joint_ids: The joint indices to set the targets for. Defaults to None (all joints). - env_ids: The environment indices to set the targets for. Defaults to None (all environments). - """ - # resolve indices - if env_ids is None: - env_ids = slice(None) - if joint_ids is None: - joint_ids = slice(None) - # broadcast env_ids if needed to allow double indexing - if env_ids != slice(None) and joint_ids != slice(None): - env_ids = env_ids[:, None] - # set targets - self._data.joint_pos_target[env_ids, joint_ids] = target - - def set_joint_velocity_target( - self, target: torch.Tensor, joint_ids: Sequence[int] | slice | None = None, env_ids: Sequence[int] | None = None - ): - """Set joint velocity targets into internal buffers. - - This function does not apply the joint targets to the simulation. It only fills the buffers with - the desired values. To apply the joint targets, call the :meth:`write_data_to_sim` function. - - Args: - target: Joint velocity targets. Shape is (len(env_ids), len(joint_ids)). - joint_ids: The joint indices to set the targets for. Defaults to None (all joints). - env_ids: The environment indices to set the targets for. Defaults to None (all environments). - """ - # resolve indices - if env_ids is None: - env_ids = slice(None) - if joint_ids is None: - joint_ids = slice(None) - # broadcast env_ids if needed to allow double indexing - if env_ids != slice(None) and joint_ids != slice(None): - env_ids = env_ids[:, None] - # set targets - self._data.joint_vel_target[env_ids, joint_ids] = target - - def set_joint_effort_target( - self, target: torch.Tensor, joint_ids: Sequence[int] | slice | None = None, env_ids: Sequence[int] | None = None - ): - """Set joint efforts into internal buffers. - - This function does not apply the joint targets to the simulation. It only fills the buffers with - the desired values. To apply the joint targets, call the :meth:`write_data_to_sim` function. - - Args: - target: Joint effort targets. Shape is (len(env_ids), len(joint_ids)). - joint_ids: The joint indices to set the targets for. Defaults to None (all joints). - env_ids: The environment indices to set the targets for. Defaults to None (all environments). - """ - # resolve indices - if env_ids is None: - env_ids = slice(None) - if joint_ids is None: - joint_ids = slice(None) - # broadcast env_ids if needed to allow double indexing - if env_ids != slice(None) and joint_ids != slice(None): - env_ids = env_ids[:, None] - # set targets - self._data.joint_effort_target[env_ids, joint_ids] = target - - """ - Operations - Tendons. - """ - - def set_fixed_tendon_stiffness( - self, - stiffness: torch.Tensor, - fixed_tendon_ids: Sequence[int] | slice | None = None, - env_ids: Sequence[int] | None = None, - ): - """Set fixed tendon stiffness into internal buffers. - - This function does not apply the tendon stiffness to the simulation. It only fills the buffers with - the desired values. To apply the tendon stiffness, call the - :meth:`write_fixed_tendon_properties_to_sim` method. - - Args: - stiffness: Fixed tendon stiffness. Shape is (len(env_ids), len(fixed_tendon_ids)). - fixed_tendon_ids: The tendon indices to set the stiffness for. Defaults to None (all fixed tendons). - env_ids: The environment indices to set the stiffness for. Defaults to None (all environments). - """ - # resolve indices - if env_ids is None: - env_ids = slice(None) - if fixed_tendon_ids is None: - fixed_tendon_ids = slice(None) - if env_ids != slice(None) and fixed_tendon_ids != slice(None): - env_ids = env_ids[:, None] - # set stiffness - self._data.fixed_tendon_stiffness[env_ids, fixed_tendon_ids] = stiffness - - def set_fixed_tendon_damping( - self, - damping: torch.Tensor, - fixed_tendon_ids: Sequence[int] | slice | None = None, - env_ids: Sequence[int] | None = None, - ): - """Set fixed tendon damping into internal buffers. - - This function does not apply the tendon damping to the simulation. It only fills the buffers with - the desired values. To apply the tendon damping, call the :meth:`write_fixed_tendon_properties_to_sim` function. - - Args: - damping: Fixed tendon damping. Shape is (len(env_ids), len(fixed_tendon_ids)). - fixed_tendon_ids: The tendon indices to set the damping for. Defaults to None (all fixed tendons). - env_ids: The environment indices to set the damping for. Defaults to None (all environments). - """ - # resolve indices - if env_ids is None: - env_ids = slice(None) - if fixed_tendon_ids is None: - fixed_tendon_ids = slice(None) - if env_ids != slice(None) and fixed_tendon_ids != slice(None): - env_ids = env_ids[:, None] - # set damping - self._data.fixed_tendon_damping[env_ids, fixed_tendon_ids] = damping - - def set_fixed_tendon_limit_stiffness( - self, - limit_stiffness: torch.Tensor, - fixed_tendon_ids: Sequence[int] | slice | None = None, - env_ids: Sequence[int] | None = None, - ): - """Set fixed tendon limit stiffness efforts into internal buffers. - - This function does not apply the tendon limit stiffness to the simulation. It only fills the buffers with - the desired values. To apply the tendon limit stiffness, call the - :meth:`write_fixed_tendon_properties_to_sim` method. - - Args: - limit_stiffness: Fixed tendon limit stiffness. Shape is (len(env_ids), len(fixed_tendon_ids)). - fixed_tendon_ids: The tendon indices to set the limit stiffness for. Defaults to None (all fixed tendons). - env_ids: The environment indices to set the limit stiffness for. Defaults to None (all environments). - """ - # resolve indices - if env_ids is None: - env_ids = slice(None) - if fixed_tendon_ids is None: - fixed_tendon_ids = slice(None) - if env_ids != slice(None) and fixed_tendon_ids != slice(None): - env_ids = env_ids[:, None] - # set limit_stiffness - self._data.fixed_tendon_limit_stiffness[env_ids, fixed_tendon_ids] = limit_stiffness - - def set_fixed_tendon_position_limit( - self, - limit: torch.Tensor, - fixed_tendon_ids: Sequence[int] | slice | None = None, - env_ids: Sequence[int] | None = None, - ): - """Set fixed tendon limit efforts into internal buffers. - - This function does not apply the tendon limit to the simulation. It only fills the buffers with - the desired values. To apply the tendon limit, call the :meth:`write_fixed_tendon_properties_to_sim` function. - - Args: - limit: Fixed tendon limit. Shape is (len(env_ids), len(fixed_tendon_ids)). - fixed_tendon_ids: The tendon indices to set the limit for. Defaults to None (all fixed tendons). - env_ids: The environment indices to set the limit for. Defaults to None (all environments). - """ - # resolve indices - if env_ids is None: - env_ids = slice(None) - if fixed_tendon_ids is None: - fixed_tendon_ids = slice(None) - if env_ids != slice(None) and fixed_tendon_ids != slice(None): - env_ids = env_ids[:, None] - # set limit - self._data.fixed_tendon_pos_limits[env_ids, fixed_tendon_ids] = limit - - def set_fixed_tendon_rest_length( - self, - rest_length: torch.Tensor, - fixed_tendon_ids: Sequence[int] | slice | None = None, - env_ids: Sequence[int] | None = None, - ): - """Set fixed tendon rest length efforts into internal buffers. - - This function does not apply the tendon rest length to the simulation. It only fills the buffers with - the desired values. To apply the tendon rest length, call the - :meth:`write_fixed_tendon_properties_to_sim` method. - - Args: - rest_length: Fixed tendon rest length. Shape is (len(env_ids), len(fixed_tendon_ids)). - fixed_tendon_ids: The tendon indices to set the rest length for. Defaults to None (all fixed tendons). - env_ids: The environment indices to set the rest length for. Defaults to None (all environments). - """ - # resolve indices - if env_ids is None: - env_ids = slice(None) - if fixed_tendon_ids is None: - fixed_tendon_ids = slice(None) - if env_ids != slice(None) and fixed_tendon_ids != slice(None): - env_ids = env_ids[:, None] - # set rest_length - self._data.fixed_tendon_rest_length[env_ids, fixed_tendon_ids] = rest_length - - def set_fixed_tendon_offset( - self, - offset: torch.Tensor, - fixed_tendon_ids: Sequence[int] | slice | None = None, - env_ids: Sequence[int] | None = None, - ): - """Set fixed tendon offset efforts into internal buffers. - - This function does not apply the tendon offset to the simulation. It only fills the buffers with - the desired values. To apply the tendon offset, call the :meth:`write_fixed_tendon_properties_to_sim` function. - - Args: - offset: Fixed tendon offset. Shape is (len(env_ids), len(fixed_tendon_ids)). - fixed_tendon_ids: The tendon indices to set the offset for. Defaults to None (all fixed tendons). - env_ids: The environment indices to set the offset for. Defaults to None (all environments). - """ - # resolve indices - if env_ids is None: - env_ids = slice(None) - if fixed_tendon_ids is None: - fixed_tendon_ids = slice(None) - if env_ids != slice(None) and fixed_tendon_ids != slice(None): - env_ids = env_ids[:, None] - # set offset - self._data.fixed_tendon_offset[env_ids, fixed_tendon_ids] = offset - - def write_fixed_tendon_properties_to_sim( - self, - fixed_tendon_ids: Sequence[int] | slice | None = None, - env_ids: Sequence[int] | None = None, - ): - """Write fixed tendon properties into the simulation. - - Args: - fixed_tendon_ids: The fixed tendon indices to set the limits for. Defaults to None (all fixed tendons). - env_ids: The environment indices to set the limits for. Defaults to None (all environments). - """ - # resolve indices - physx_env_ids = env_ids - if env_ids is None: - physx_env_ids = self._ALL_INDICES - if fixed_tendon_ids is None: - fixed_tendon_ids = slice(None) - - # set into simulation - self.root_physx_view.set_fixed_tendon_properties( - self._data.fixed_tendon_stiffness, - self._data.fixed_tendon_damping, - self._data.fixed_tendon_limit_stiffness, - self._data.fixed_tendon_pos_limits, - self._data.fixed_tendon_rest_length, - self._data.fixed_tendon_offset, - indices=physx_env_ids, - ) - - def set_spatial_tendon_stiffness( - self, - stiffness: torch.Tensor, - spatial_tendon_ids: Sequence[int] | slice | None = None, - env_ids: Sequence[int] | None = None, - ): - """Set spatial tendon stiffness into internal buffers. - - This function does not apply the tendon stiffness to the simulation. It only fills the buffers with - the desired values. To apply the tendon stiffness, call the - :meth:`write_spatial_tendon_properties_to_sim` method. - - Args: - stiffness: Spatial tendon stiffness. Shape is (len(env_ids), len(spatial_tendon_ids)). - spatial_tendon_ids: The tendon indices to set the stiffness for. Defaults to None (all spatial tendons). - env_ids: The environment indices to set the stiffness for. Defaults to None (all environments). - """ - if get_isaac_sim_version().major < 5: - logger.warning( - "Spatial tendons are not supported in Isaac Sim < 5.0. Please update to Isaac Sim 5.0 or later." - ) - return - # resolve indices - if env_ids is None: - env_ids = slice(None) - if spatial_tendon_ids is None: - spatial_tendon_ids = slice(None) - if env_ids != slice(None) and spatial_tendon_ids != slice(None): - env_ids = env_ids[:, None] - # set stiffness - self._data.spatial_tendon_stiffness[env_ids, spatial_tendon_ids] = stiffness - - def set_spatial_tendon_damping( - self, - damping: torch.Tensor, - spatial_tendon_ids: Sequence[int] | slice | None = None, - env_ids: Sequence[int] | None = None, - ): - """Set spatial tendon damping into internal buffers. - - This function does not apply the tendon damping to the simulation. It only fills the buffers with - the desired values. To apply the tendon damping, call the - :meth:`write_spatial_tendon_properties_to_sim` method. - - Args: - damping: Spatial tendon damping. Shape is (len(env_ids), len(spatial_tendon_ids)). - spatial_tendon_ids: The tendon indices to set the damping for. Defaults to None, - which means all spatial tendons. - env_ids: The environment indices to set the damping for. Defaults to None, which means all environments. - """ - if get_isaac_sim_version().major < 5: - logger.warning( - "Spatial tendons are not supported in Isaac Sim < 5.0. Please update to Isaac Sim 5.0 or later." - ) - return - # resolve indices - if env_ids is None: - env_ids = slice(None) - if spatial_tendon_ids is None: - spatial_tendon_ids = slice(None) - if env_ids != slice(None) and spatial_tendon_ids != slice(None): - env_ids = env_ids[:, None] - # set damping - self._data.spatial_tendon_damping[env_ids, spatial_tendon_ids] = damping - - def set_spatial_tendon_limit_stiffness( - self, - limit_stiffness: torch.Tensor, - spatial_tendon_ids: Sequence[int] | slice | None = None, - env_ids: Sequence[int] | None = None, - ): - """Set spatial tendon limit stiffness into internal buffers. - - This function does not apply the tendon limit stiffness to the simulation. It only fills the buffers with - the desired values. To apply the tendon limit stiffness, call the - :meth:`write_spatial_tendon_properties_to_sim` method. - - Args: - limit_stiffness: Spatial tendon limit stiffness. Shape is (len(env_ids), len(spatial_tendon_ids)). - spatial_tendon_ids: The tendon indices to set the limit stiffness for. Defaults to None, - which means all spatial tendons. - env_ids: The environment indices to set the limit stiffness for. Defaults to None (all environments). - """ - if get_isaac_sim_version().major < 5: - logger.warning( - "Spatial tendons are not supported in Isaac Sim < 5.0. Please update to Isaac Sim 5.0 or later." - ) - return - # resolve indices - if env_ids is None: - env_ids = slice(None) - if spatial_tendon_ids is None: - spatial_tendon_ids = slice(None) - if env_ids != slice(None) and spatial_tendon_ids != slice(None): - env_ids = env_ids[:, None] - # set limit stiffness - self._data.spatial_tendon_limit_stiffness[env_ids, spatial_tendon_ids] = limit_stiffness - - def set_spatial_tendon_offset( - self, - offset: torch.Tensor, - spatial_tendon_ids: Sequence[int] | slice | None = None, - env_ids: Sequence[int] | None = None, - ): - """Set spatial tendon offset efforts into internal buffers. - - This function does not apply the tendon offset to the simulation. It only fills the buffers with - the desired values. To apply the tendon offset, call the - :meth:`write_spatial_tendon_properties_to_sim` method. - - Args: - offset: Spatial tendon offset. Shape is (len(env_ids), len(spatial_tendon_ids)). - spatial_tendon_ids: The tendon indices to set the offset for. Defaults to None (all spatial tendons). - env_ids: The environment indices to set the offset for. Defaults to None (all environments). - """ - if get_isaac_sim_version().major < 5: - logger.warning( - "Spatial tendons are not supported in Isaac Sim < 5.0. Please update to Isaac Sim 5.0 or later." - ) - return - # resolve indices - if env_ids is None: - env_ids = slice(None) - if spatial_tendon_ids is None: - spatial_tendon_ids = slice(None) - if env_ids != slice(None) and spatial_tendon_ids != slice(None): - env_ids = env_ids[:, None] - # set offset - self._data.spatial_tendon_offset[env_ids, spatial_tendon_ids] = offset - - def write_spatial_tendon_properties_to_sim( - self, - spatial_tendon_ids: Sequence[int] | slice | None = None, - env_ids: Sequence[int] | None = None, - ): - """Write spatial tendon properties into the simulation. - - Args: - spatial_tendon_ids: The spatial tendon indices to set the properties for. Defaults to None, - which means all spatial tendons. - env_ids: The environment indices to set the properties for. Defaults to None, - which means all environments. - """ - # resolve indices - physx_env_ids = env_ids - if env_ids is None: - physx_env_ids = self._ALL_INDICES - if spatial_tendon_ids is None: - spatial_tendon_ids = slice(None) - - # set into simulation - self.root_physx_view.set_spatial_tendon_properties( - self._data.spatial_tendon_stiffness, - self._data.spatial_tendon_damping, - self._data.spatial_tendon_limit_stiffness, - self._data.spatial_tendon_offset, - indices=physx_env_ids, - ) - - """ - Internal helper. - """ - - def _initialize_impl(self): - # obtain global simulation view - self._physics_sim_view = SimulationManager.get_physics_sim_view() - - if self.cfg.articulation_root_prim_path is not None: - # The articulation root prim path is specified explicitly, so we can just use this. - root_prim_path_expr = self.cfg.prim_path + self.cfg.articulation_root_prim_path - else: - # No articulation root prim path was specified, so we need to search - # for it. We search for this in the first environment and then - # create a regex that matches all environments. - first_env_matching_prim = sim_utils.find_first_matching_prim(self.cfg.prim_path) - if first_env_matching_prim is None: - raise RuntimeError(f"Failed to find prim for expression: '{self.cfg.prim_path}'.") - first_env_matching_prim_path = first_env_matching_prim.GetPath().pathString - - # Find all articulation root prims in the first environment. - first_env_root_prims = sim_utils.get_all_matching_child_prims( - first_env_matching_prim_path, - predicate=lambda prim: prim.HasAPI(UsdPhysics.ArticulationRootAPI), - traverse_instance_prims=False, - ) - if len(first_env_root_prims) == 0: - raise RuntimeError( - f"Failed to find an articulation when resolving '{first_env_matching_prim_path}'." - " Please ensure that the prim has 'USD ArticulationRootAPI' applied." - ) - if len(first_env_root_prims) > 1: - raise RuntimeError( - f"Failed to find a single articulation when resolving '{first_env_matching_prim_path}'." - f" Found multiple '{first_env_root_prims}' under '{first_env_matching_prim_path}'." - " Please ensure that there is only one articulation in the prim path tree." - ) - - # Now we convert the found articulation root from the first - # environment back into a regex that matches all environments. - first_env_root_prim_path = first_env_root_prims[0].GetPath().pathString - root_prim_path_relative_to_prim_path = first_env_root_prim_path[len(first_env_matching_prim_path) :] - root_prim_path_expr = self.cfg.prim_path + root_prim_path_relative_to_prim_path - - # -- articulation - self._root_physx_view = self._physics_sim_view.create_articulation_view(root_prim_path_expr.replace(".*", "*")) - - # check if the articulation was created - if self._root_physx_view._backend is None: - raise RuntimeError(f"Failed to create articulation at: {root_prim_path_expr}. Please check PhysX logs.") - - if get_isaac_sim_version().major < 5: - logger.warning( - "Spatial tendons are not supported in Isaac Sim < 5.0: patching spatial-tendon getter" - " and setter to use dummy value" - ) - self._root_physx_view.max_spatial_tendons = 0 - self._root_physx_view.get_spatial_tendon_stiffnesses = lambda: torch.empty(0, device=self.device) - self._root_physx_view.get_spatial_tendon_dampings = lambda: torch.empty(0, device=self.device) - self._root_physx_view.get_spatial_tendon_limit_stiffnesses = lambda: torch.empty(0, device=self.device) - self._root_physx_view.get_spatial_tendon_offsets = lambda: torch.empty(0, device=self.device) - self._root_physx_view.set_spatial_tendon_properties = lambda *args, **kwargs: logger.warning( - "Spatial tendons are not supported in Isaac Sim < 5.0: Calling" - " set_spatial_tendon_properties has no effect" - ) - - # log information about the articulation - logger.info(f"Articulation initialized at: {self.cfg.prim_path} with root '{root_prim_path_expr}'.") - logger.info(f"Is fixed root: {self.is_fixed_base}") - logger.info(f"Number of bodies: {self.num_bodies}") - logger.info(f"Body names: {self.body_names}") - logger.info(f"Number of joints: {self.num_joints}") - logger.info(f"Joint names: {self.joint_names}") - logger.info(f"Number of fixed tendons: {self.num_fixed_tendons}") - - # container for data access - self._data = ArticulationData(self.root_physx_view, self.device) - - # create buffers - self._create_buffers() - # process configuration - self._process_cfg() - self._process_actuators_cfg() - self._process_tendons() - # validate configuration - self._validate_cfg() - # update the robot data - self.update(0.0) - # log joint information - self._log_articulation_info() - - def _create_buffers(self): - # constants - self._ALL_INDICES = torch.arange(self.num_instances, dtype=torch.long, device=self.device) - self._ALL_BODY_INDICES = torch.arange(self.num_bodies, dtype=torch.long, device=self.device) - self._ALL_INDICES_WP = wp.from_torch(self._ALL_INDICES.to(torch.int32), dtype=wp.int32) - self._ALL_BODY_INDICES_WP = wp.from_torch(self._ALL_BODY_INDICES.to(torch.int32), dtype=wp.int32) - - # external wrench composer - self._instantaneous_wrench_composer = WrenchComposer(self) - self._permanent_wrench_composer = WrenchComposer(self) - - # asset named data - self._data.joint_names = self.joint_names - self._data.body_names = self.body_names - # tendon names are set in _process_tendons function - - # -- joint properties - self._data.default_joint_pos_limits = self.root_physx_view.get_dof_limits().to(self.device).clone() - self._data.default_joint_stiffness = self.root_physx_view.get_dof_stiffnesses().to(self.device).clone() - self._data.default_joint_damping = self.root_physx_view.get_dof_dampings().to(self.device).clone() - self._data.default_joint_armature = self.root_physx_view.get_dof_armatures().to(self.device).clone() - if get_isaac_sim_version().major < 5: - self._data.default_joint_friction_coeff = ( - self.root_physx_view.get_dof_friction_coefficients().to(self.device).clone() - ) - self._data.default_joint_dynamic_friction_coeff = torch.zeros_like(self._data.default_joint_friction_coeff) - self._data.default_joint_viscous_friction_coeff = torch.zeros_like(self._data.default_joint_friction_coeff) - else: - friction_props = self.root_physx_view.get_dof_friction_properties() - self._data.default_joint_friction_coeff = friction_props[:, :, 0].to(self.device).clone() - self._data.default_joint_dynamic_friction_coeff = friction_props[:, :, 1].to(self.device).clone() - self._data.default_joint_viscous_friction_coeff = friction_props[:, :, 2].to(self.device).clone() - - self._data.joint_pos_limits = self._data.default_joint_pos_limits.clone() - self._data.joint_vel_limits = self.root_physx_view.get_dof_max_velocities().to(self.device).clone() - self._data.joint_effort_limits = self.root_physx_view.get_dof_max_forces().to(self.device).clone() - self._data.joint_stiffness = self._data.default_joint_stiffness.clone() - self._data.joint_damping = self._data.default_joint_damping.clone() - self._data.joint_armature = self._data.default_joint_armature.clone() - self._data.joint_friction_coeff = self._data.default_joint_friction_coeff.clone() - self._data.joint_dynamic_friction_coeff = self._data.default_joint_dynamic_friction_coeff.clone() - self._data.joint_viscous_friction_coeff = self._data.default_joint_viscous_friction_coeff.clone() - - # -- body properties - self._data.default_mass = self.root_physx_view.get_masses().clone() - self._data.default_inertia = self.root_physx_view.get_inertias().clone() - - # -- joint commands (sent to the actuator from the user) - self._data.joint_pos_target = torch.zeros(self.num_instances, self.num_joints, device=self.device) - self._data.joint_vel_target = torch.zeros_like(self._data.joint_pos_target) - self._data.joint_effort_target = torch.zeros_like(self._data.joint_pos_target) - # -- joint commands (sent to the simulation after actuator processing) - self._joint_pos_target_sim = torch.zeros_like(self._data.joint_pos_target) - self._joint_vel_target_sim = torch.zeros_like(self._data.joint_pos_target) - self._joint_effort_target_sim = torch.zeros_like(self._data.joint_pos_target) - - # -- computed joint efforts from the actuator models - self._data.computed_torque = torch.zeros_like(self._data.joint_pos_target) - self._data.applied_torque = torch.zeros_like(self._data.joint_pos_target) - - # -- other data that are filled based on explicit actuator models - self._data.soft_joint_vel_limits = torch.zeros(self.num_instances, self.num_joints, device=self.device) - self._data.gear_ratio = torch.ones(self.num_instances, self.num_joints, device=self.device) - - # soft joint position limits (recommended not to be too close to limits). - joint_pos_mean = (self._data.joint_pos_limits[..., 0] + self._data.joint_pos_limits[..., 1]) / 2 - joint_pos_range = self._data.joint_pos_limits[..., 1] - self._data.joint_pos_limits[..., 0] - soft_limit_factor = self.cfg.soft_joint_pos_limit_factor - # add to data - self._data.soft_joint_pos_limits = torch.zeros(self.num_instances, self.num_joints, 2, device=self.device) - self._data.soft_joint_pos_limits[..., 0] = joint_pos_mean - 0.5 * joint_pos_range * soft_limit_factor - self._data.soft_joint_pos_limits[..., 1] = joint_pos_mean + 0.5 * joint_pos_range * soft_limit_factor - - def _process_cfg(self): - """Post processing of configuration parameters.""" - # default state - # -- root state - # note: we cast to tuple to avoid torch/numpy type mismatch. - default_root_state = ( - tuple(self.cfg.init_state.pos) - + tuple(self.cfg.init_state.rot) - + tuple(self.cfg.init_state.lin_vel) - + tuple(self.cfg.init_state.ang_vel) - ) - default_root_state = torch.tensor(default_root_state, dtype=torch.float, device=self.device) - self._data.default_root_state = default_root_state.repeat(self.num_instances, 1) - - # -- joint state - self._data.default_joint_pos = torch.zeros(self.num_instances, self.num_joints, device=self.device) - self._data.default_joint_vel = torch.zeros_like(self._data.default_joint_pos) - # joint pos - indices_list, _, values_list = string_utils.resolve_matching_names_values( - self.cfg.init_state.joint_pos, self.joint_names - ) - self._data.default_joint_pos[:, indices_list] = torch.tensor(values_list, device=self.device) - # joint vel - indices_list, _, values_list = string_utils.resolve_matching_names_values( - self.cfg.init_state.joint_vel, self.joint_names - ) - self._data.default_joint_vel[:, indices_list] = torch.tensor(values_list, device=self.device) - - """ - Internal simulation callbacks. - """ - - def _invalidate_initialize_callback(self, event): - """Invalidates the scene elements.""" - # call parent - super()._invalidate_initialize_callback(event) - self._root_physx_view = None - - """ - Internal helpers -- Actuators. - """ - - def _process_actuators_cfg(self): - """Process and apply articulation joint properties.""" - # create actuators - self.actuators = dict() - # flag for implicit actuators - # if this is false, we by-pass certain checks when doing actuator-related operations - self._has_implicit_actuators = False - - # iterate over all actuator configurations - for actuator_name, actuator_cfg in self.cfg.actuators.items(): - # type annotation for type checkers - actuator_cfg: ActuatorBaseCfg - # create actuator group - joint_ids, joint_names = self.find_joints(actuator_cfg.joint_names_expr) - # check if any joints are found - if len(joint_names) == 0: - raise ValueError( - f"No joints found for actuator group: {actuator_name} with joint name expression:" - f" {actuator_cfg.joint_names_expr}." - ) - # resolve joint indices - # we pass a slice if all joints are selected to avoid indexing overhead - if len(joint_names) == self.num_joints: - joint_ids = slice(None) - else: - joint_ids = torch.tensor(joint_ids, device=self.device) - # create actuator collection - # note: for efficiency avoid indexing when over all indices - actuator: ActuatorBase = actuator_cfg.class_type( - cfg=actuator_cfg, - joint_names=joint_names, - joint_ids=joint_ids, - num_envs=self.num_instances, - device=self.device, - stiffness=self._data.default_joint_stiffness[:, joint_ids], - damping=self._data.default_joint_damping[:, joint_ids], - armature=self._data.default_joint_armature[:, joint_ids], - friction=self._data.default_joint_friction_coeff[:, joint_ids], - dynamic_friction=self._data.default_joint_dynamic_friction_coeff[:, joint_ids], - viscous_friction=self._data.default_joint_viscous_friction_coeff[:, joint_ids], - effort_limit=self._data.joint_effort_limits[:, joint_ids].clone(), - velocity_limit=self._data.joint_vel_limits[:, joint_ids], - ) - # log information on actuator groups - model_type = "implicit" if actuator.is_implicit_model else "explicit" - logger.info( - f"Actuator collection: {actuator_name} with model '{actuator_cfg.class_type.__name__}'" - f" (type: {model_type}) and joint names: {joint_names} [{joint_ids}]." - ) - # store actuator group - self.actuators[actuator_name] = actuator - # set the passed gains and limits into the simulation - if isinstance(actuator, ImplicitActuator): - self._has_implicit_actuators = True - # the gains and limits are set into the simulation since actuator model is implicit - self.write_joint_stiffness_to_sim(actuator.stiffness, joint_ids=actuator.joint_indices) - self.write_joint_damping_to_sim(actuator.damping, joint_ids=actuator.joint_indices) - else: - # the gains and limits are processed by the actuator model - # we set gains to zero, and torque limit to a high value in simulation to avoid any interference - self.write_joint_stiffness_to_sim(0.0, joint_ids=actuator.joint_indices) - self.write_joint_damping_to_sim(0.0, joint_ids=actuator.joint_indices) - - # Set common properties into the simulation - self.write_joint_effort_limit_to_sim(actuator.effort_limit_sim, joint_ids=actuator.joint_indices) - self.write_joint_velocity_limit_to_sim(actuator.velocity_limit_sim, joint_ids=actuator.joint_indices) - self.write_joint_armature_to_sim(actuator.armature, joint_ids=actuator.joint_indices) - self.write_joint_friction_coefficient_to_sim(actuator.friction, joint_ids=actuator.joint_indices) - if get_isaac_sim_version().major >= 5: - self.write_joint_dynamic_friction_coefficient_to_sim( - actuator.dynamic_friction, joint_ids=actuator.joint_indices - ) - self.write_joint_viscous_friction_coefficient_to_sim( - actuator.viscous_friction, joint_ids=actuator.joint_indices - ) - - # Store the configured values from the actuator model - # note: this is the value configured in the actuator model (for implicit and explicit actuators) - self._data.default_joint_stiffness[:, actuator.joint_indices] = actuator.stiffness - self._data.default_joint_damping[:, actuator.joint_indices] = actuator.damping - self._data.default_joint_armature[:, actuator.joint_indices] = actuator.armature - self._data.default_joint_friction_coeff[:, actuator.joint_indices] = actuator.friction - if get_isaac_sim_version().major >= 5: - self._data.default_joint_dynamic_friction_coeff[:, actuator.joint_indices] = actuator.dynamic_friction - self._data.default_joint_viscous_friction_coeff[:, actuator.joint_indices] = actuator.viscous_friction - - # perform some sanity checks to ensure actuators are prepared correctly - total_act_joints = sum(actuator.num_joints for actuator in self.actuators.values()) - if total_act_joints != (self.num_joints - self.num_fixed_tendons): - logger.warning( - "Not all actuators are configured! Total number of actuated joints not equal to number of" - f" joints available: {total_act_joints} != {self.num_joints - self.num_fixed_tendons}." - ) - - if self.cfg.actuator_value_resolution_debug_print: - t = PrettyTable(["Group", "Property", "Name", "ID", "USD Value", "ActutatorCfg Value", "Applied"]) - for actuator_group, actuator in self.actuators.items(): - group_count = 0 - for property, resolution_details in actuator.joint_property_resolution_table.items(): - for prop_idx, resolution_detail in enumerate(resolution_details): - actuator_group_str = actuator_group if group_count == 0 else "" - property_str = property if prop_idx == 0 else "" - fmt = [f"{v:.2e}" if isinstance(v, float) else str(v) for v in resolution_detail] - t.add_row([actuator_group_str, property_str, *fmt]) - group_count += 1 - logger.warning(f"\nActuatorCfg-USD Value Discrepancy Resolution (matching values are skipped): \n{t}") - - def _process_tendons(self): - """Process fixed and spatial tendons.""" - # create a list to store the fixed tendon names - self._fixed_tendon_names = list() - self._spatial_tendon_names = list() - # parse fixed tendons properties if they exist - if self.num_fixed_tendons > 0 or self.num_spatial_tendons > 0: - joint_paths = self.root_physx_view.dof_paths[0] - - # iterate over all joints to find tendons attached to them - for j in range(self.num_joints): - usd_joint_path = joint_paths[j] - # check whether joint has tendons - tendon name follows the joint name it is attached to - joint = UsdPhysics.Joint.Get(self.stage, usd_joint_path) - if joint.GetPrim().HasAPI(PhysxSchema.PhysxTendonAxisRootAPI): - joint_name = usd_joint_path.split("/")[-1] - self._fixed_tendon_names.append(joint_name) - elif joint.GetPrim().HasAPI(PhysxSchema.PhysxTendonAttachmentRootAPI) or joint.GetPrim().HasAPI( - PhysxSchema.PhysxTendonAttachmentLeafAPI - ): - joint_name = usd_joint_path.split("/")[-1] - self._spatial_tendon_names.append(joint_name) - - # store the fixed tendon names - self._data.fixed_tendon_names = self._fixed_tendon_names - self._data.spatial_tendon_names = self._spatial_tendon_names - # store the current USD fixed tendon properties - self._data.default_fixed_tendon_stiffness = self.root_physx_view.get_fixed_tendon_stiffnesses().clone() - self._data.default_fixed_tendon_damping = self.root_physx_view.get_fixed_tendon_dampings().clone() - self._data.default_fixed_tendon_limit_stiffness = ( - self.root_physx_view.get_fixed_tendon_limit_stiffnesses().clone() - ) - self._data.default_fixed_tendon_pos_limits = self.root_physx_view.get_fixed_tendon_limits().clone() - self._data.default_fixed_tendon_rest_length = self.root_physx_view.get_fixed_tendon_rest_lengths().clone() - self._data.default_fixed_tendon_offset = self.root_physx_view.get_fixed_tendon_offsets().clone() - self._data.default_spatial_tendon_stiffness = self.root_physx_view.get_spatial_tendon_stiffnesses().clone() - self._data.default_spatial_tendon_damping = self.root_physx_view.get_spatial_tendon_dampings().clone() - self._data.default_spatial_tendon_limit_stiffness = ( - self.root_physx_view.get_spatial_tendon_limit_stiffnesses().clone() - ) - self._data.default_spatial_tendon_offset = self.root_physx_view.get_spatial_tendon_offsets().clone() - - # store a copy of the default values for the fixed tendons - self._data.fixed_tendon_stiffness = self._data.default_fixed_tendon_stiffness.clone() - self._data.fixed_tendon_damping = self._data.default_fixed_tendon_damping.clone() - self._data.fixed_tendon_limit_stiffness = self._data.default_fixed_tendon_limit_stiffness.clone() - self._data.fixed_tendon_pos_limits = self._data.default_fixed_tendon_pos_limits.clone() - self._data.fixed_tendon_rest_length = self._data.default_fixed_tendon_rest_length.clone() - self._data.fixed_tendon_offset = self._data.default_fixed_tendon_offset.clone() - self._data.spatial_tendon_stiffness = self._data.default_spatial_tendon_stiffness.clone() - self._data.spatial_tendon_damping = self._data.default_spatial_tendon_damping.clone() - self._data.spatial_tendon_limit_stiffness = self._data.default_spatial_tendon_limit_stiffness.clone() - self._data.spatial_tendon_offset = self._data.default_spatial_tendon_offset.clone() - - def _apply_actuator_model(self): - """Processes joint commands for the articulation by forwarding them to the actuators. - - The actions are first processed using actuator models. Depending on the robot configuration, - the actuator models compute the joint level simulation commands and sets them into the PhysX buffers. - """ - # process actions per group - for actuator in self.actuators.values(): - # prepare input for actuator model based on cached data - # TODO : A tensor dict would be nice to do the indexing of all tensors together - control_action = ArticulationActions( - joint_positions=self._data.joint_pos_target[:, actuator.joint_indices], - joint_velocities=self._data.joint_vel_target[:, actuator.joint_indices], - joint_efforts=self._data.joint_effort_target[:, actuator.joint_indices], - joint_indices=actuator.joint_indices, - ) - # compute joint command from the actuator model - control_action = actuator.compute( - control_action, - joint_pos=self._data.joint_pos[:, actuator.joint_indices], - joint_vel=self._data.joint_vel[:, actuator.joint_indices], - ) - # update targets (these are set into the simulation) - if control_action.joint_positions is not None: - self._joint_pos_target_sim[:, actuator.joint_indices] = control_action.joint_positions - if control_action.joint_velocities is not None: - self._joint_vel_target_sim[:, actuator.joint_indices] = control_action.joint_velocities - if control_action.joint_efforts is not None: - self._joint_effort_target_sim[:, actuator.joint_indices] = control_action.joint_efforts - # update state of the actuator model - # -- torques - self._data.computed_torque[:, actuator.joint_indices] = actuator.computed_effort - self._data.applied_torque[:, actuator.joint_indices] = actuator.applied_effort - # -- actuator data - self._data.soft_joint_vel_limits[:, actuator.joint_indices] = actuator.velocity_limit - # TODO: find a cleaner way to handle gear ratio. Only needed for variable gear ratio actuators. - if hasattr(actuator, "gear_ratio"): - self._data.gear_ratio[:, actuator.joint_indices] = actuator.gear_ratio - - """ - Internal helpers -- Debugging. - """ - - def _validate_cfg(self): - """Validate the configuration after processing. - - Note: - This function should be called only after the configuration has been processed and the buffers have been - created. Otherwise, some settings that are altered during processing may not be validated. - For instance, the actuator models may change the joint max velocity limits. - """ - # check that the default values are within the limits - joint_pos_limits = self.root_physx_view.get_dof_limits()[0].to(self.device) - out_of_range = self._data.default_joint_pos[0] < joint_pos_limits[:, 0] - out_of_range |= self._data.default_joint_pos[0] > joint_pos_limits[:, 1] - violated_indices = torch.nonzero(out_of_range, as_tuple=False).squeeze(-1) - # throw error if any of the default joint positions are out of the limits - if len(violated_indices) > 0: - # prepare message for violated joints - msg = "The following joints have default positions out of the limits: \n" - for idx in violated_indices: - joint_name = self.data.joint_names[idx] - joint_limit = joint_pos_limits[idx] - joint_pos = self.data.default_joint_pos[0, idx] - # add to message - msg += f"\t- '{joint_name}': {joint_pos:.3f} not in [{joint_limit[0]:.3f}, {joint_limit[1]:.3f}]\n" - raise ValueError(msg) - - # check that the default joint velocities are within the limits - joint_max_vel = self.root_physx_view.get_dof_max_velocities()[0].to(self.device) - out_of_range = torch.abs(self._data.default_joint_vel[0]) > joint_max_vel - violated_indices = torch.nonzero(out_of_range, as_tuple=False).squeeze(-1) - if len(violated_indices) > 0: - # prepare message for violated joints - msg = "The following joints have default velocities out of the limits: \n" - for idx in violated_indices: - joint_name = self.data.joint_names[idx] - joint_limit = [-joint_max_vel[idx], joint_max_vel[idx]] - joint_vel = self.data.default_joint_vel[0, idx] - # add to message - msg += f"\t- '{joint_name}': {joint_vel:.3f} not in [{joint_limit[0]:.3f}, {joint_limit[1]:.3f}]\n" - raise ValueError(msg) - - def _log_articulation_info(self): - """Log information about the articulation. - - Note: We purposefully read the values from the simulator to ensure that the values are configured as expected. - """ - - # define custom formatters for large numbers and limit ranges - def format_large_number(_, v: float) -> str: - """Format large numbers using scientific notation.""" - if abs(v) >= 1e3: - return f"{v:.1e}" - else: - return f"{v:.3f}" - - def format_limits(_, v: tuple[float, float]) -> str: - """Format limit ranges using scientific notation.""" - if abs(v[0]) >= 1e3 or abs(v[1]) >= 1e3: - return f"[{v[0]:.1e}, {v[1]:.1e}]" - else: - return f"[{v[0]:.3f}, {v[1]:.3f}]" - - # read out all joint parameters from simulation - # -- gains - stiffnesses = self.root_physx_view.get_dof_stiffnesses()[0].tolist() - dampings = self.root_physx_view.get_dof_dampings()[0].tolist() - # -- properties - armatures = self.root_physx_view.get_dof_armatures()[0].tolist() - if get_isaac_sim_version().major < 5: - static_frictions = self.root_physx_view.get_dof_friction_coefficients()[0].tolist() - else: - friction_props = self.root_physx_view.get_dof_friction_properties() - static_frictions = friction_props[:, :, 0][0].tolist() - dynamic_frictions = friction_props[:, :, 1][0].tolist() - viscous_frictions = friction_props[:, :, 2][0].tolist() - # -- limits - position_limits = self.root_physx_view.get_dof_limits()[0].tolist() - velocity_limits = self.root_physx_view.get_dof_max_velocities()[0].tolist() - effort_limits = self.root_physx_view.get_dof_max_forces()[0].tolist() - # create table for term information - joint_table = PrettyTable() - joint_table.title = f"Simulation Joint Information (Prim path: {self.cfg.prim_path})" - # build field names based on Isaac Sim version - field_names = ["Index", "Name", "Stiffness", "Damping", "Armature"] - if get_isaac_sim_version().major < 5: - field_names.append("Static Friction") - else: - field_names.extend(["Static Friction", "Dynamic Friction", "Viscous Friction"]) - field_names.extend(["Position Limits", "Velocity Limits", "Effort Limits"]) - joint_table.field_names = field_names - - # apply custom formatters to numeric columns - joint_table.custom_format["Stiffness"] = format_large_number - joint_table.custom_format["Damping"] = format_large_number - joint_table.custom_format["Armature"] = format_large_number - joint_table.custom_format["Static Friction"] = format_large_number - if get_isaac_sim_version().major >= 5: - joint_table.custom_format["Dynamic Friction"] = format_large_number - joint_table.custom_format["Viscous Friction"] = format_large_number - joint_table.custom_format["Position Limits"] = format_limits - joint_table.custom_format["Velocity Limits"] = format_large_number - joint_table.custom_format["Effort Limits"] = format_large_number - - # set alignment of table columns - joint_table.align["Name"] = "l" - # add info on each term - for index, name in enumerate(self.joint_names): - # build row data based on Isaac Sim version - row_data = [index, name, stiffnesses[index], dampings[index], armatures[index]] - if get_isaac_sim_version().major < 5: - row_data.append(static_frictions[index]) - else: - row_data.extend([static_frictions[index], dynamic_frictions[index], viscous_frictions[index]]) - row_data.extend([position_limits[index], velocity_limits[index], effort_limits[index]]) - # add row to table - joint_table.add_row(row_data) - # convert table to string - logger.info(f"Simulation parameters for joints in {self.cfg.prim_path}:\n" + joint_table.get_string()) - - # read out all fixed tendon parameters from simulation - if self.num_fixed_tendons > 0: - # -- gains - ft_stiffnesses = self.root_physx_view.get_fixed_tendon_stiffnesses()[0].tolist() - ft_dampings = self.root_physx_view.get_fixed_tendon_dampings()[0].tolist() - # -- limits - ft_limit_stiffnesses = self.root_physx_view.get_fixed_tendon_limit_stiffnesses()[0].tolist() - ft_limits = self.root_physx_view.get_fixed_tendon_limits()[0].tolist() - ft_rest_lengths = self.root_physx_view.get_fixed_tendon_rest_lengths()[0].tolist() - ft_offsets = self.root_physx_view.get_fixed_tendon_offsets()[0].tolist() - # create table for term information - tendon_table = PrettyTable() - tendon_table.title = f"Simulation Fixed Tendon Information (Prim path: {self.cfg.prim_path})" - tendon_table.field_names = [ - "Index", - "Stiffness", - "Damping", - "Limit Stiffness", - "Limits", - "Rest Length", - "Offset", - ] - tendon_table.float_format = ".3" - - # apply custom formatters to tendon table columns - tendon_table.custom_format["Stiffness"] = format_large_number - tendon_table.custom_format["Damping"] = format_large_number - tendon_table.custom_format["Limit Stiffness"] = format_large_number - tendon_table.custom_format["Limits"] = format_limits - tendon_table.custom_format["Rest Length"] = format_large_number - tendon_table.custom_format["Offset"] = format_large_number - - # add info on each term - for index in range(self.num_fixed_tendons): - tendon_table.add_row( - [ - index, - ft_stiffnesses[index], - ft_dampings[index], - ft_limit_stiffnesses[index], - ft_limits[index], - ft_rest_lengths[index], - ft_offsets[index], - ] - ) - # convert table to string - logger.info( - f"Simulation parameters for fixed tendons in {self.cfg.prim_path}:\n" + tendon_table.get_string() - ) - - if self.num_spatial_tendons > 0: - # -- gains - st_stiffnesses = self.root_physx_view.get_spatial_tendon_stiffnesses()[0].tolist() - st_dampings = self.root_physx_view.get_spatial_tendon_dampings()[0].tolist() - # -- limits - st_limit_stiffnesses = self.root_physx_view.get_spatial_tendon_limit_stiffnesses()[0].tolist() - st_offsets = self.root_physx_view.get_spatial_tendon_offsets()[0].tolist() - # create table for term information - tendon_table = PrettyTable() - tendon_table.title = f"Simulation Spatial Tendon Information (Prim path: {self.cfg.prim_path})" - tendon_table.field_names = [ - "Index", - "Stiffness", - "Damping", - "Limit Stiffness", - "Offset", - ] - tendon_table.float_format = ".3" - # add info on each term - for index in range(self.num_spatial_tendons): - tendon_table.add_row( - [ - index, - st_stiffnesses[index], - st_dampings[index], - st_limit_stiffnesses[index], - st_offsets[index], - ] - ) - # convert table to string - logger.info( - f"Simulation parameters for spatial tendons in {self.cfg.prim_path}:\n" + tendon_table.get_string() - ) - - """ - Deprecated methods. - """ - - def write_joint_friction_to_sim( - self, - joint_friction: torch.Tensor | float, - joint_ids: Sequence[int] | slice | None = None, - env_ids: Sequence[int] | None = None, - ): - """Write joint friction coefficients into the simulation. - - .. deprecated:: 2.1.0 - Please use :meth:`write_joint_friction_coefficient_to_sim` instead. - """ - logger.warning( - "The function 'write_joint_friction_to_sim' will be deprecated in a future release. Please" - " use 'write_joint_friction_coefficient_to_sim' instead." - ) - self.write_joint_friction_coefficient_to_sim(joint_friction, joint_ids=joint_ids, env_ids=env_ids) + from isaaclab_physx.assets.articulation import Articulation as PhysXArticulation + from isaaclab_physx.assets.articulation import ArticulationData as PhysXArticulationData - def write_joint_limits_to_sim( - self, - limits: torch.Tensor | float, - joint_ids: Sequence[int] | slice | None = None, - env_ids: Sequence[int] | None = None, - warn_limit_violation: bool = True, - ): - """Write joint limits into the simulation. - .. deprecated:: 2.1.0 - Please use :meth:`write_joint_position_limit_to_sim` instead. - """ - logger.warning( - "The function 'write_joint_limits_to_sim' will be deprecated in a future release. Please" - " use 'write_joint_position_limit_to_sim' instead." - ) - self.write_joint_position_limit_to_sim( - limits, joint_ids=joint_ids, env_ids=env_ids, warn_limit_violation=warn_limit_violation - ) +class Articulation(FactoryBase, BaseArticulation): + """Factory for creating articulation instances.""" - def set_fixed_tendon_limit( - self, - limit: torch.Tensor, - fixed_tendon_ids: Sequence[int] | slice | None = None, - env_ids: Sequence[int] | None = None, - ): - """Set fixed tendon position limits into internal buffers. + data: BaseArticulationData | PhysXArticulationData - .. deprecated:: 2.1.0 - Please use :meth:`set_fixed_tendon_position_limit` instead. - """ - logger.warning( - "The function 'set_fixed_tendon_limit' will be deprecated in a future release. Please" - " use 'set_fixed_tendon_position_limit' instead." - ) - self.set_fixed_tendon_position_limit(limit, fixed_tendon_ids=fixed_tendon_ids, env_ids=env_ids) + def __new__(cls, *args, **kwargs) -> BaseArticulation | PhysXArticulation: + """Create a new instance of an articulation based on the backend.""" + # The `FactoryBase` __new__ method will handle the logic and return + # an instance of the correct backend-specific articulation class, + # which is guaranteed to be a subclass of `BaseArticulation` by convention. + return super().__new__(cls, *args, **kwargs) diff --git a/source/isaaclab/isaaclab/assets/articulation/articulation_data.py b/source/isaaclab/isaaclab/assets/articulation/articulation_data.py index 47902edc0a5..5a45d0d1425 100644 --- a/source/isaaclab/isaaclab/assets/articulation/articulation_data.py +++ b/source/isaaclab/isaaclab/assets/articulation/articulation_data.py @@ -3,1163 +3,21 @@ # # SPDX-License-Identifier: BSD-3-Clause -import logging -import weakref +from __future__ import annotations -import torch +from typing import TYPE_CHECKING -import omni.physics.tensors.impl.api as physx -from isaacsim.core.simulation_manager import SimulationManager +from isaaclab.utils.backend_utils import FactoryBase -import isaaclab.utils.math as math_utils -from isaaclab.utils.buffers import TimestampedBuffer +from .base_articulation_data import BaseArticulationData -# import logger -logger = logging.getLogger(__name__) +if TYPE_CHECKING: + from isaaclab_physx.assets.articulation.articulation_data import ArticulationData as PhysXArticulationData -class ArticulationData: - """Data container for an articulation. +class ArticulationData(FactoryBase): + """Factory for creating articulation data instances.""" - This class contains the data for an articulation in the simulation. The data includes the state of - the root rigid body, the state of all the bodies in the articulation, and the joint state. The data is - stored in the simulation world frame unless otherwise specified. - - An articulation is comprised of multiple rigid bodies or links. For a rigid body, there are two frames - of reference that are used: - - - Actor frame: The frame of reference of the rigid body prim. This typically corresponds to the Xform prim - with the rigid body schema. - - Center of mass frame: The frame of reference of the center of mass of the rigid body. - - Depending on the settings, the two frames may not coincide with each other. In the robotics sense, the actor frame - can be interpreted as the link frame. - """ - - def __init__(self, root_physx_view: physx.ArticulationView, device: str): - """Initializes the articulation data. - - Args: - root_physx_view: The root articulation view. - device: The device used for processing. - """ - # Set the parameters - self.device = device - # Set the root articulation view - # note: this is stored as a weak reference to avoid circular references between the asset class - # and the data container. This is important to avoid memory leaks. - self._root_physx_view: physx.ArticulationView = weakref.proxy(root_physx_view) - - # Set initial time stamp - self._sim_timestamp = 0.0 - - # obtain global simulation view - self._physics_sim_view = SimulationManager.get_physics_sim_view() - gravity = self._physics_sim_view.get_gravity() - # Convert to direction vector - gravity_dir = torch.tensor((gravity[0], gravity[1], gravity[2]), device=self.device) - gravity_dir = math_utils.normalize(gravity_dir.unsqueeze(0)).squeeze(0) - - # Initialize constants - self.GRAVITY_VEC_W = gravity_dir.repeat(self._root_physx_view.count, 1) - self.FORWARD_VEC_B = torch.tensor((1.0, 0.0, 0.0), device=self.device).repeat(self._root_physx_view.count, 1) - - # Initialize history for finite differencing - self._previous_joint_vel = self._root_physx_view.get_dof_velocities().clone() - - # Initialize the lazy buffers. - # -- link frame w.r.t. world frame - self._root_link_pose_w = TimestampedBuffer() - self._root_link_vel_w = TimestampedBuffer() - self._body_link_pose_w = TimestampedBuffer() - self._body_link_vel_w = TimestampedBuffer() - # -- com frame w.r.t. link frame - self._body_com_pose_b = TimestampedBuffer() - # -- com frame w.r.t. world frame - self._root_com_pose_w = TimestampedBuffer() - self._root_com_vel_w = TimestampedBuffer() - self._body_com_pose_w = TimestampedBuffer() - self._body_com_vel_w = TimestampedBuffer() - self._body_com_acc_w = TimestampedBuffer() - # -- combined state (these are cached as they concatenate) - self._root_state_w = TimestampedBuffer() - self._root_link_state_w = TimestampedBuffer() - self._root_com_state_w = TimestampedBuffer() - self._body_state_w = TimestampedBuffer() - self._body_link_state_w = TimestampedBuffer() - self._body_com_state_w = TimestampedBuffer() - # -- joint state - self._joint_pos = TimestampedBuffer() - self._joint_vel = TimestampedBuffer() - self._joint_acc = TimestampedBuffer() - self._body_incoming_joint_wrench_b = TimestampedBuffer() - - def update(self, dt: float): - # update the simulation timestamp - self._sim_timestamp += dt - # Trigger an update of the joint acceleration buffer at a higher frequency - # since we do finite differencing. - self.joint_acc - - ## - # Names. - ## - - body_names: list[str] = None - """Body names in the order parsed by the simulation view.""" - - joint_names: list[str] = None - """Joint names in the order parsed by the simulation view.""" - - fixed_tendon_names: list[str] = None - """Fixed tendon names in the order parsed by the simulation view.""" - - spatial_tendon_names: list[str] = None - """Spatial tendon names in the order parsed by the simulation view.""" - - ## - # Defaults - Initial state. - ## - - default_root_state: torch.Tensor = None - """Default root state ``[pos, quat, lin_vel, ang_vel]`` in the local environment frame. - Shape is (num_instances, 13). - - The position and quaternion are of the articulation root's actor frame. Meanwhile, the linear and angular - velocities are of its center of mass frame. - - This quantity is configured through the :attr:`isaaclab.assets.ArticulationCfg.init_state` parameter. - """ - - default_joint_pos: torch.Tensor = None - """Default joint positions of all joints. Shape is (num_instances, num_joints). - - This quantity is configured through the :attr:`isaaclab.assets.ArticulationCfg.init_state` parameter. - """ - - default_joint_vel: torch.Tensor = None - """Default joint velocities of all joints. Shape is (num_instances, num_joints). - - This quantity is configured through the :attr:`isaaclab.assets.ArticulationCfg.init_state` parameter. - """ - - ## - # Defaults - Physical properties. - ## - - default_mass: torch.Tensor = None - """Default mass for all the bodies in the articulation. Shape is (num_instances, num_bodies). - - This quantity is parsed from the USD schema at the time of initialization. - """ - - default_inertia: torch.Tensor = None - """Default inertia for all the bodies in the articulation. Shape is (num_instances, num_bodies, 9). - - The inertia tensor should be given with respect to the center of mass, expressed in the articulation links' - actor frame. The values are stored in the order - :math:`[I_{xx}, I_{yx}, I_{zx}, I_{xy}, I_{yy}, I_{zy}, I_{xz}, I_{yz}, I_{zz}]`. However, due to the - symmetry of inertia tensors, row- and column-major orders are equivalent. - - This quantity is parsed from the USD schema at the time of initialization. - """ - - default_joint_stiffness: torch.Tensor = None - """Default joint stiffness of all joints. Shape is (num_instances, num_joints). - - This quantity is configured through the actuator model's :attr:`isaaclab.actuators.ActuatorBaseCfg.stiffness` - parameter. If the parameter's value is None, the value parsed from the USD schema, at the time of initialization, - is used. - - .. attention:: - The default stiffness is the value configured by the user or the value parsed from the USD schema. - It should not be confused with :attr:`joint_stiffness`, which is the value set into the simulation. - """ - - default_joint_damping: torch.Tensor = None - """Default joint damping of all joints. Shape is (num_instances, num_joints). - - This quantity is configured through the actuator model's :attr:`isaaclab.actuators.ActuatorBaseCfg.damping` - parameter. If the parameter's value is None, the value parsed from the USD schema, at the time of initialization, - is used. - - .. attention:: - The default stiffness is the value configured by the user or the value parsed from the USD schema. - It should not be confused with :attr:`joint_damping`, which is the value set into the simulation. - """ - - default_joint_armature: torch.Tensor = None - """Default joint armature of all joints. Shape is (num_instances, num_joints). - - This quantity is configured through the actuator model's :attr:`isaaclab.actuators.ActuatorBaseCfg.armature` - parameter. If the parameter's value is None, the value parsed from the USD schema, at the time of initialization, - is used. - """ - - default_joint_friction_coeff: torch.Tensor = None - """Default joint static friction coefficient of all joints. Shape is (num_instances, num_joints). - - This quantity is configured through the actuator model's :attr:`isaaclab.actuators.ActuatorBaseCfg.friction` - parameter. If the parameter's value is None, the value parsed from the USD schema, at the time of initialization, - is used. - - Note: - In Isaac Sim 4.5, this parameter is modeled as a coefficient. In Isaac Sim 5.0 and later, - it is modeled as an effort (torque or force). - """ - - default_joint_dynamic_friction_coeff: torch.Tensor = None - """Default joint dynamic friction coefficient of all joints. Shape is (num_instances, num_joints). - - This quantity is configured through the actuator model's - :attr:`isaaclab.actuators.ActuatorBaseCfg.dynamic_friction` parameter. If the parameter's value is None, - the value parsed from the USD schema, at the time of initialization, is used. - - Note: - In Isaac Sim 4.5, this parameter is modeled as a coefficient. In Isaac Sim 5.0 and later, - it is modeled as an effort (torque or force). - """ - - default_joint_viscous_friction_coeff: torch.Tensor = None - """Default joint viscous friction coefficient of all joints. Shape is (num_instances, num_joints). - - This quantity is configured through the actuator model's - :attr:`isaaclab.actuators.ActuatorBaseCfg.viscous_friction` parameter. If the parameter's value is None, - the value parsed from the USD schema, at the time of initialization, is used. - """ - - default_joint_pos_limits: torch.Tensor = None - """Default joint position limits of all joints. Shape is (num_instances, num_joints, 2). - - The limits are in the order :math:`[lower, upper]`. They are parsed from the USD schema at the - time of initialization. - """ - - default_fixed_tendon_stiffness: torch.Tensor = None - """Default tendon stiffness of all fixed tendons. Shape is (num_instances, num_fixed_tendons). - - This quantity is parsed from the USD schema at the time of initialization. - """ - - default_fixed_tendon_damping: torch.Tensor = None - """Default tendon damping of all fixed tendons. Shape is (num_instances, num_fixed_tendons). - - This quantity is parsed from the USD schema at the time of initialization. - """ - - default_fixed_tendon_limit_stiffness: torch.Tensor = None - """Default tendon limit stiffness of all fixed tendons. Shape is (num_instances, num_fixed_tendons). - - This quantity is parsed from the USD schema at the time of initialization. - """ - - default_fixed_tendon_rest_length: torch.Tensor = None - """Default tendon rest length of all fixed tendons. Shape is (num_instances, num_fixed_tendons). - - This quantity is parsed from the USD schema at the time of initialization. - """ - - default_fixed_tendon_offset: torch.Tensor = None - """Default tendon offset of all fixed tendons. Shape is (num_instances, num_fixed_tendons). - - This quantity is parsed from the USD schema at the time of initialization. - """ - - default_fixed_tendon_pos_limits: torch.Tensor = None - """Default tendon position limits of all fixed tendons. Shape is (num_instances, num_fixed_tendons, 2). - - The position limits are in the order :math:`[lower, upper]`. They are parsed from the USD schema at the time of - initialization. - """ - - default_spatial_tendon_stiffness: torch.Tensor = None - """Default tendon stiffness of all spatial tendons. Shape is (num_instances, num_spatial_tendons). - - This quantity is parsed from the USD schema at the time of initialization. - """ - - default_spatial_tendon_damping: torch.Tensor = None - """Default tendon damping of all spatial tendons. Shape is (num_instances, num_spatial_tendons). - - This quantity is parsed from the USD schema at the time of initialization. - """ - - default_spatial_tendon_limit_stiffness: torch.Tensor = None - """Default tendon limit stiffness of all spatial tendons. Shape is (num_instances, num_spatial_tendons). - - This quantity is parsed from the USD schema at the time of initialization. - """ - - default_spatial_tendon_offset: torch.Tensor = None - """Default tendon offset of all spatial tendons. Shape is (num_instances, num_spatial_tendons). - - This quantity is parsed from the USD schema at the time of initialization. - """ - - ## - # Joint commands -- Set into simulation. - ## - - joint_pos_target: torch.Tensor = None - """Joint position targets commanded by the user. Shape is (num_instances, num_joints). - - For an implicit actuator model, the targets are directly set into the simulation. - For an explicit actuator model, the targets are used to compute the joint torques (see :attr:`applied_torque`), - which are then set into the simulation. - """ - - joint_vel_target: torch.Tensor = None - """Joint velocity targets commanded by the user. Shape is (num_instances, num_joints). - - For an implicit actuator model, the targets are directly set into the simulation. - For an explicit actuator model, the targets are used to compute the joint torques (see :attr:`applied_torque`), - which are then set into the simulation. - """ - - joint_effort_target: torch.Tensor = None - """Joint effort targets commanded by the user. Shape is (num_instances, num_joints). - - For an implicit actuator model, the targets are directly set into the simulation. - For an explicit actuator model, the targets are used to compute the joint torques (see :attr:`applied_torque`), - which are then set into the simulation. - """ - - ## - # Joint commands -- Explicit actuators. - ## - - computed_torque: torch.Tensor = None - """Joint torques computed from the actuator model (before clipping). Shape is (num_instances, num_joints). - - This quantity is the raw torque output from the actuator mode, before any clipping is applied. - It is exposed for users who want to inspect the computations inside the actuator model. - For instance, to penalize the learning agent for a difference between the computed and applied torques. - """ - - applied_torque: torch.Tensor = None - """Joint torques applied from the actuator model (after clipping). Shape is (num_instances, num_joints). - - These torques are set into the simulation, after clipping the :attr:`computed_torque` based on the - actuator model. - """ - - ## - # Joint properties. - ## - - joint_stiffness: torch.Tensor = None - """Joint stiffness provided to the simulation. Shape is (num_instances, num_joints). - - In the case of explicit actuators, the value for the corresponding joints is zero. - """ - - joint_damping: torch.Tensor = None - """Joint damping provided to the simulation. Shape is (num_instances, num_joints) - - In the case of explicit actuators, the value for the corresponding joints is zero. - """ - - joint_armature: torch.Tensor = None - """Joint armature provided to the simulation. Shape is (num_instances, num_joints).""" - - joint_friction_coeff: torch.Tensor = None - """Joint static friction coefficient provided to the simulation. Shape is (num_instances, num_joints). - - Note: In Isaac Sim 4.5, this parameter is modeled as a coefficient. In Isaac Sim 5.0 and later, - it is modeled as an effort (torque or force). - """ - - joint_dynamic_friction_coeff: torch.Tensor = None - """Joint dynamic friction coefficient provided to the simulation. Shape is (num_instances, num_joints). - - Note: In Isaac Sim 4.5, this parameter is modeled as a coefficient. In Isaac Sim 5.0 and later, - it is modeled as an effort (torque or force). - """ - - joint_viscous_friction_coeff: torch.Tensor = None - """Joint viscous friction coefficient provided to the simulation. Shape is (num_instances, num_joints).""" - - joint_pos_limits: torch.Tensor = None - """Joint position limits provided to the simulation. Shape is (num_instances, num_joints, 2). - - The limits are in the order :math:`[lower, upper]`. - """ - - joint_vel_limits: torch.Tensor = None - """Joint maximum velocity provided to the simulation. Shape is (num_instances, num_joints).""" - - joint_effort_limits: torch.Tensor = None - """Joint maximum effort provided to the simulation. Shape is (num_instances, num_joints).""" - - ## - # Joint properties - Custom. - ## - - soft_joint_pos_limits: torch.Tensor = None - r"""Soft joint positions limits for all joints. Shape is (num_instances, num_joints, 2). - - The limits are in the order :math:`[lower, upper]`.The soft joint position limits are computed as - a sub-region of the :attr:`joint_pos_limits` based on the - :attr:`~isaaclab.assets.ArticulationCfg.soft_joint_pos_limit_factor` parameter. - - Consider the joint position limits :math:`[lower, upper]` and the soft joint position limits - :math:`[soft_lower, soft_upper]`. The soft joint position limits are computed as: - - .. math:: - - soft\_lower = (lower + upper) / 2 - factor * (upper - lower) / 2 - soft\_upper = (lower + upper) / 2 + factor * (upper - lower) / 2 - - The soft joint position limits help specify a safety region around the joint limits. It isn't used by the - simulation, but is useful for learning agents to prevent the joint positions from violating the limits. - """ - - soft_joint_vel_limits: torch.Tensor = None - """Soft joint velocity limits for all joints. Shape is (num_instances, num_joints). - - These are obtained from the actuator model. It may differ from :attr:`joint_vel_limits` if the actuator model - has a variable velocity limit model. For instance, in a variable gear ratio actuator model. - """ - - gear_ratio: torch.Tensor = None - """Gear ratio for relating motor torques to applied Joint torques. Shape is (num_instances, num_joints).""" - - ## - # Fixed tendon properties. - ## - - fixed_tendon_stiffness: torch.Tensor = None - """Fixed tendon stiffness provided to the simulation. Shape is (num_instances, num_fixed_tendons).""" - - fixed_tendon_damping: torch.Tensor = None - """Fixed tendon damping provided to the simulation. Shape is (num_instances, num_fixed_tendons).""" - - fixed_tendon_limit_stiffness: torch.Tensor = None - """Fixed tendon limit stiffness provided to the simulation. Shape is (num_instances, num_fixed_tendons).""" - - fixed_tendon_rest_length: torch.Tensor = None - """Fixed tendon rest length provided to the simulation. Shape is (num_instances, num_fixed_tendons).""" - - fixed_tendon_offset: torch.Tensor = None - """Fixed tendon offset provided to the simulation. Shape is (num_instances, num_fixed_tendons).""" - - fixed_tendon_pos_limits: torch.Tensor = None - """Fixed tendon position limits provided to the simulation. Shape is (num_instances, num_fixed_tendons, 2).""" - - ## - # Spatial tendon properties. - ## - - spatial_tendon_stiffness: torch.Tensor = None - """Spatial tendon stiffness provided to the simulation. Shape is (num_instances, num_spatial_tendons).""" - - spatial_tendon_damping: torch.Tensor = None - """Spatial tendon damping provided to the simulation. Shape is (num_instances, num_spatial_tendons).""" - - spatial_tendon_limit_stiffness: torch.Tensor = None - """Spatial tendon limit stiffness provided to the simulation. Shape is (num_instances, num_spatial_tendons).""" - - spatial_tendon_offset: torch.Tensor = None - """Spatial tendon offset provided to the simulation. Shape is (num_instances, num_spatial_tendons).""" - - ## - # Root state properties. - ## - - @property - def root_link_pose_w(self) -> torch.Tensor: - """Root link pose ``[pos, quat]`` in simulation world frame. Shape is (num_instances, 7). - - This quantity is the pose of the articulation root's actor frame relative to the world. - The orientation is provided in (w, x, y, z) format. - """ - if self._root_link_pose_w.timestamp < self._sim_timestamp: - # read data from simulation - pose = self._root_physx_view.get_root_transforms().clone() - pose[:, 3:7] = math_utils.convert_quat(pose[:, 3:7], to="wxyz") - # set the buffer data and timestamp - self._root_link_pose_w.data = pose - self._root_link_pose_w.timestamp = self._sim_timestamp - - return self._root_link_pose_w.data - - @property - def root_link_vel_w(self) -> torch.Tensor: - """Root link velocity ``[lin_vel, ang_vel]`` in simulation world frame. Shape is (num_instances, 6). - - This quantity contains the linear and angular velocities of the articulation root's actor frame - relative to the world. - """ - if self._root_link_vel_w.timestamp < self._sim_timestamp: - # read the CoM velocity - vel = self.root_com_vel_w.clone() - # adjust linear velocity to link from center of mass - vel[:, :3] += torch.linalg.cross( - vel[:, 3:], math_utils.quat_apply(self.root_link_quat_w, -self.body_com_pos_b[:, 0]), dim=-1 - ) - # set the buffer data and timestamp - self._root_link_vel_w.data = vel - self._root_link_vel_w.timestamp = self._sim_timestamp - - return self._root_link_vel_w.data - - @property - def root_com_pose_w(self) -> torch.Tensor: - """Root center of mass pose ``[pos, quat]`` in simulation world frame. Shape is (num_instances, 7). - - This quantity is the pose of the articulation root's center of mass frame relative to the world. - The orientation is provided in (w, x, y, z) format. - """ - if self._root_com_pose_w.timestamp < self._sim_timestamp: - # apply local transform to center of mass frame - pos, quat = math_utils.combine_frame_transforms( - self.root_link_pos_w, self.root_link_quat_w, self.body_com_pos_b[:, 0], self.body_com_quat_b[:, 0] - ) - # set the buffer data and timestamp - self._root_com_pose_w.data = torch.cat((pos, quat), dim=-1) - self._root_com_pose_w.timestamp = self._sim_timestamp - - return self._root_com_pose_w.data - - @property - def root_com_vel_w(self) -> torch.Tensor: - """Root center of mass velocity ``[lin_vel, ang_vel]`` in simulation world frame. Shape is (num_instances, 6). - - This quantity contains the linear and angular velocities of the articulation root's center of mass frame - relative to the world. - """ - if self._root_com_vel_w.timestamp < self._sim_timestamp: - self._root_com_vel_w.data = self._root_physx_view.get_root_velocities() - self._root_com_vel_w.timestamp = self._sim_timestamp - - return self._root_com_vel_w.data - - @property - def root_state_w(self): - """Root state ``[pos, quat, lin_vel, ang_vel]`` in simulation world frame. Shape is (num_instances, 13). - - The position and quaternion are of the articulation root's actor frame relative to the world. Meanwhile, - the linear and angular velocities are of the articulation root's center of mass frame. - """ - if self._root_state_w.timestamp < self._sim_timestamp: - self._root_state_w.data = torch.cat((self.root_link_pose_w, self.root_com_vel_w), dim=-1) - self._root_state_w.timestamp = self._sim_timestamp - - return self._root_state_w.data - - @property - def root_link_state_w(self): - """Root state ``[pos, quat, lin_vel, ang_vel]`` in simulation world frame. Shape is (num_instances, 13). - - The position, quaternion, and linear/angular velocity are of the articulation root's actor frame relative to the - world. - """ - if self._root_link_state_w.timestamp < self._sim_timestamp: - self._root_link_state_w.data = torch.cat((self.root_link_pose_w, self.root_link_vel_w), dim=-1) - self._root_link_state_w.timestamp = self._sim_timestamp - - return self._root_link_state_w.data - - @property - def root_com_state_w(self): - """Root center of mass state ``[pos, quat, lin_vel, ang_vel]`` in simulation world frame. - Shape is (num_instances, 13). - - The position, quaternion, and linear/angular velocity are of the articulation root link's center of mass frame - relative to the world. Center of mass frame is assumed to be the same orientation as the link rather than the - orientation of the principle inertia. - """ - if self._root_com_state_w.timestamp < self._sim_timestamp: - self._root_com_state_w.data = torch.cat((self.root_com_pose_w, self.root_com_vel_w), dim=-1) - self._root_com_state_w.timestamp = self._sim_timestamp - - return self._root_com_state_w.data - - ## - # Body state properties. - ## - - @property - def body_link_pose_w(self) -> torch.Tensor: - """Body link pose ``[pos, quat]`` in simulation world frame. - Shape is (num_instances, num_bodies, 7). - - This quantity is the pose of the articulation links' actor frame relative to the world. - The orientation is provided in (w, x, y, z) format. - """ - if self._body_link_pose_w.timestamp < self._sim_timestamp: - # perform forward kinematics (shouldn't cause overhead if it happened already) - self._physics_sim_view.update_articulations_kinematic() - # read data from simulation - poses = self._root_physx_view.get_link_transforms().clone() - poses[..., 3:7] = math_utils.convert_quat(poses[..., 3:7], to="wxyz") - # set the buffer data and timestamp - self._body_link_pose_w.data = poses - self._body_link_pose_w.timestamp = self._sim_timestamp - - return self._body_link_pose_w.data - - @property - def body_link_vel_w(self) -> torch.Tensor: - """Body link velocity ``[lin_vel, ang_vel]`` in simulation world frame. - Shape is (num_instances, num_bodies, 6). - - This quantity contains the linear and angular velocities of the articulation links' actor frame - relative to the world. - """ - if self._body_link_vel_w.timestamp < self._sim_timestamp: - # read data from simulation - velocities = self.body_com_vel_w.clone() - # adjust linear velocity to link from center of mass - velocities[..., :3] += torch.linalg.cross( - velocities[..., 3:], math_utils.quat_apply(self.body_link_quat_w, -self.body_com_pos_b), dim=-1 - ) - # set the buffer data and timestamp - self._body_link_vel_w.data = velocities - self._body_link_vel_w.timestamp = self._sim_timestamp - - return self._body_link_vel_w.data - - @property - def body_com_pose_w(self) -> torch.Tensor: - """Body center of mass pose ``[pos, quat]`` in simulation world frame. - Shape is (num_instances, num_bodies, 7). - - This quantity is the pose of the center of mass frame of the articulation links relative to the world. - The orientation is provided in (w, x, y, z) format. - """ - if self._body_com_pose_w.timestamp < self._sim_timestamp: - # apply local transform to center of mass frame - pos, quat = math_utils.combine_frame_transforms( - self.body_link_pos_w, self.body_link_quat_w, self.body_com_pos_b, self.body_com_quat_b - ) - # set the buffer data and timestamp - self._body_com_pose_w.data = torch.cat((pos, quat), dim=-1) - self._body_com_pose_w.timestamp = self._sim_timestamp - - return self._body_com_pose_w.data - - @property - def body_com_vel_w(self) -> torch.Tensor: - """Body center of mass velocity ``[lin_vel, ang_vel]`` in simulation world frame. - Shape is (num_instances, num_bodies, 6). - - This quantity contains the linear and angular velocities of the articulation links' center of mass frame - relative to the world. - """ - if self._body_com_vel_w.timestamp < self._sim_timestamp: - self._body_com_vel_w.data = self._root_physx_view.get_link_velocities() - self._body_com_vel_w.timestamp = self._sim_timestamp - - return self._body_com_vel_w.data - - @property - def body_state_w(self): - """State of all bodies `[pos, quat, lin_vel, ang_vel]` in simulation world frame. - Shape is (num_instances, num_bodies, 13). - - The position and quaternion are of all the articulation links' actor frame. Meanwhile, the linear and angular - velocities are of the articulation links's center of mass frame. - """ - if self._body_state_w.timestamp < self._sim_timestamp: - self._body_state_w.data = torch.cat((self.body_link_pose_w, self.body_com_vel_w), dim=-1) - self._body_state_w.timestamp = self._sim_timestamp - - return self._body_state_w.data - - @property - def body_link_state_w(self): - """State of all bodies' link frame`[pos, quat, lin_vel, ang_vel]` in simulation world frame. - Shape is (num_instances, num_bodies, 13). - - The position, quaternion, and linear/angular velocity are of the body's link frame relative to the world. - """ - if self._body_link_state_w.timestamp < self._sim_timestamp: - self._body_link_state_w.data = torch.cat((self.body_link_pose_w, self.body_link_vel_w), dim=-1) - self._body_link_state_w.timestamp = self._sim_timestamp - - return self._body_link_state_w.data - - @property - def body_com_state_w(self): - """State of all bodies center of mass `[pos, quat, lin_vel, ang_vel]` in simulation world frame. - Shape is (num_instances, num_bodies, 13). - - The position, quaternion, and linear/angular velocity are of the body's center of mass frame relative to the - world. Center of mass frame is assumed to be the same orientation as the link rather than the orientation of the - principle inertia. - """ - if self._body_com_state_w.timestamp < self._sim_timestamp: - self._body_com_state_w.data = torch.cat((self.body_com_pose_w, self.body_com_vel_w), dim=-1) - self._body_com_state_w.timestamp = self._sim_timestamp - - return self._body_com_state_w.data - - @property - def body_com_acc_w(self): - """Acceleration of all bodies center of mass ``[lin_acc, ang_acc]``. - Shape is (num_instances, num_bodies, 6). - - All values are relative to the world. - """ - if self._body_com_acc_w.timestamp < self._sim_timestamp: - # read data from simulation and set the buffer data and timestamp - self._body_com_acc_w.data = self._root_physx_view.get_link_accelerations() - self._body_com_acc_w.timestamp = self._sim_timestamp - - return self._body_com_acc_w.data - - @property - def body_com_pose_b(self) -> torch.Tensor: - """Center of mass pose ``[pos, quat]`` of all bodies in their respective body's link frames. - Shape is (num_instances, 1, 7). - - This quantity is the pose of the center of mass frame of the rigid body relative to the body's link frame. - The orientation is provided in (w, x, y, z) format. - """ - if self._body_com_pose_b.timestamp < self._sim_timestamp: - # read data from simulation - pose = self._root_physx_view.get_coms().to(self.device) - pose[..., 3:7] = math_utils.convert_quat(pose[..., 3:7], to="wxyz") - # set the buffer data and timestamp - self._body_com_pose_b.data = pose - self._body_com_pose_b.timestamp = self._sim_timestamp - - return self._body_com_pose_b.data - - @property - def body_incoming_joint_wrench_b(self) -> torch.Tensor: - """Joint reaction wrench applied from body parent to child body in parent body frame. - - Shape is (num_instances, num_bodies, 6). All body reaction wrenches are provided including the root body to the - world of an articulation. - - For more information on joint wrenches, please check the`PhysX documentation`_ and the underlying - `PhysX Tensor API`_. - - .. _`PhysX documentation`: https://nvidia-omniverse.github.io/PhysX/physx/5.5.1/docs/Articulations.html#link-incoming-joint-force - .. _`PhysX Tensor API`: https://docs.omniverse.nvidia.com/kit/docs/omni_physics/latest/extensions/runtime/source/omni.physics.tensors/docs/api/python.html#omni.physics.tensors.impl.api.ArticulationView.get_link_incoming_joint_force - """ - - if self._body_incoming_joint_wrench_b.timestamp < self._sim_timestamp: - self._body_incoming_joint_wrench_b.data = self._root_physx_view.get_link_incoming_joint_force() - self._body_incoming_joint_wrench_b.time_stamp = self._sim_timestamp - return self._body_incoming_joint_wrench_b.data - - ## - # Joint state properties. - ## - - @property - def joint_pos(self): - """Joint positions of all joints. Shape is (num_instances, num_joints).""" - if self._joint_pos.timestamp < self._sim_timestamp: - # read data from simulation and set the buffer data and timestamp - self._joint_pos.data = self._root_physx_view.get_dof_positions() - self._joint_pos.timestamp = self._sim_timestamp - return self._joint_pos.data - - @property - def joint_vel(self): - """Joint velocities of all joints. Shape is (num_instances, num_joints).""" - if self._joint_vel.timestamp < self._sim_timestamp: - # read data from simulation and set the buffer data and timestamp - self._joint_vel.data = self._root_physx_view.get_dof_velocities() - self._joint_vel.timestamp = self._sim_timestamp - return self._joint_vel.data - - @property - def joint_acc(self): - """Joint acceleration of all joints. Shape is (num_instances, num_joints).""" - if self._joint_acc.timestamp < self._sim_timestamp: - # note: we use finite differencing to compute acceleration - time_elapsed = self._sim_timestamp - self._joint_acc.timestamp - self._joint_acc.data = (self.joint_vel - self._previous_joint_vel) / time_elapsed - self._joint_acc.timestamp = self._sim_timestamp - # update the previous joint velocity - self._previous_joint_vel[:] = self.joint_vel - return self._joint_acc.data - - ## - # Derived Properties. - ## - - @property - def projected_gravity_b(self): - """Projection of the gravity direction on base frame. Shape is (num_instances, 3).""" - return math_utils.quat_apply_inverse(self.root_link_quat_w, self.GRAVITY_VEC_W) - - @property - def heading_w(self): - """Yaw heading of the base frame (in radians). Shape is (num_instances,). - - Note: - This quantity is computed by assuming that the forward-direction of the base - frame is along x-direction, i.e. :math:`(1, 0, 0)`. - """ - forward_w = math_utils.quat_apply(self.root_link_quat_w, self.FORWARD_VEC_B) - return torch.atan2(forward_w[:, 1], forward_w[:, 0]) - - @property - def root_link_lin_vel_b(self) -> torch.Tensor: - """Root link linear velocity in base frame. Shape is (num_instances, 3). - - This quantity is the linear velocity of the articulation root's actor frame with respect to the - its actor frame. - """ - return math_utils.quat_apply_inverse(self.root_link_quat_w, self.root_link_lin_vel_w) - - @property - def root_link_ang_vel_b(self) -> torch.Tensor: - """Root link angular velocity in base world frame. Shape is (num_instances, 3). - - This quantity is the angular velocity of the articulation root's actor frame with respect to the - its actor frame. - """ - return math_utils.quat_apply_inverse(self.root_link_quat_w, self.root_link_ang_vel_w) - - @property - def root_com_lin_vel_b(self) -> torch.Tensor: - """Root center of mass linear velocity in base frame. Shape is (num_instances, 3). - - This quantity is the linear velocity of the articulation root's center of mass frame with respect to the - its actor frame. - """ - return math_utils.quat_apply_inverse(self.root_link_quat_w, self.root_com_lin_vel_w) - - @property - def root_com_ang_vel_b(self) -> torch.Tensor: - """Root center of mass angular velocity in base world frame. Shape is (num_instances, 3). - - This quantity is the angular velocity of the articulation root's center of mass frame with respect to the - its actor frame. - """ - return math_utils.quat_apply_inverse(self.root_link_quat_w, self.root_com_ang_vel_w) - - ## - # Sliced properties. - ## - - @property - def root_link_pos_w(self) -> torch.Tensor: - """Root link position in simulation world frame. Shape is (num_instances, 3). - - This quantity is the position of the actor frame of the root rigid body relative to the world. - """ - return self.root_link_pose_w[:, :3] - - @property - def root_link_quat_w(self) -> torch.Tensor: - """Root link orientation (w, x, y, z) in simulation world frame. Shape is (num_instances, 4). - - This quantity is the orientation of the actor frame of the root rigid body. - """ - return self.root_link_pose_w[:, 3:7] - - @property - def root_link_lin_vel_w(self) -> torch.Tensor: - """Root linear velocity in simulation world frame. Shape is (num_instances, 3). - - This quantity is the linear velocity of the root rigid body's actor frame relative to the world. - """ - return self.root_link_vel_w[:, :3] - - @property - def root_link_ang_vel_w(self) -> torch.Tensor: - """Root link angular velocity in simulation world frame. Shape is (num_instances, 3). - - This quantity is the angular velocity of the actor frame of the root rigid body relative to the world. - """ - return self.root_link_vel_w[:, 3:6] - - @property - def root_com_pos_w(self) -> torch.Tensor: - """Root center of mass position in simulation world frame. Shape is (num_instances, 3). - - This quantity is the position of the actor frame of the root rigid body relative to the world. - """ - return self.root_com_pose_w[:, :3] - - @property - def root_com_quat_w(self) -> torch.Tensor: - """Root center of mass orientation (w, x, y, z) in simulation world frame. Shape is (num_instances, 4). - - This quantity is the orientation of the actor frame of the root rigid body relative to the world. - """ - return self.root_com_pose_w[:, 3:7] - - @property - def root_com_lin_vel_w(self) -> torch.Tensor: - """Root center of mass linear velocity in simulation world frame. Shape is (num_instances, 3). - - This quantity is the linear velocity of the root rigid body's center of mass frame relative to the world. - """ - return self.root_com_vel_w[:, :3] - - @property - def root_com_ang_vel_w(self) -> torch.Tensor: - """Root center of mass angular velocity in simulation world frame. Shape is (num_instances, 3). - - This quantity is the angular velocity of the root rigid body's center of mass frame relative to the world. - """ - return self.root_com_vel_w[:, 3:6] - - @property - def body_link_pos_w(self) -> torch.Tensor: - """Positions of all bodies in simulation world frame. Shape is (num_instances, num_bodies, 3). - - This quantity is the position of the articulation bodies' actor frame relative to the world. - """ - return self.body_link_pose_w[..., :3] - - @property - def body_link_quat_w(self) -> torch.Tensor: - """Orientation (w, x, y, z) of all bodies in simulation world frame. Shape is (num_instances, num_bodies, 4). - - This quantity is the orientation of the articulation bodies' actor frame relative to the world. - """ - return self.body_link_pose_w[..., 3:7] - - @property - def body_link_lin_vel_w(self) -> torch.Tensor: - """Linear velocity of all bodies in simulation world frame. Shape is (num_instances, num_bodies, 3). - - This quantity is the linear velocity of the articulation bodies' center of mass frame relative to the world. - """ - return self.body_link_vel_w[..., :3] - - @property - def body_link_ang_vel_w(self) -> torch.Tensor: - """Angular velocity of all bodies in simulation world frame. Shape is (num_instances, num_bodies, 3). - - This quantity is the angular velocity of the articulation bodies' center of mass frame relative to the world. - """ - return self.body_link_vel_w[..., 3:6] - - @property - def body_com_pos_w(self) -> torch.Tensor: - """Positions of all bodies in simulation world frame. Shape is (num_instances, num_bodies, 3). - - This quantity is the position of the articulation bodies' actor frame. - """ - return self.body_com_pose_w[..., :3] - - @property - def body_com_quat_w(self) -> torch.Tensor: - """Orientation (w, x, y, z) of the principle axis of inertia of all bodies in simulation world frame. - Shape is (num_instances, num_bodies, 4). - - This quantity is the orientation of the articulation bodies' actor frame. - """ - return self.body_com_pose_w[..., 3:7] - - @property - def body_com_lin_vel_w(self) -> torch.Tensor: - """Linear velocity of all bodies in simulation world frame. Shape is (num_instances, num_bodies, 3). - - This quantity is the linear velocity of the articulation bodies' center of mass frame. - """ - return self.body_com_vel_w[..., :3] - - @property - def body_com_ang_vel_w(self) -> torch.Tensor: - """Angular velocity of all bodies in simulation world frame. Shape is (num_instances, num_bodies, 3). - - This quantity is the angular velocity of the articulation bodies' center of mass frame. - """ - return self.body_com_vel_w[..., 3:6] - - @property - def body_com_lin_acc_w(self) -> torch.Tensor: - """Linear acceleration of all bodies in simulation world frame. Shape is (num_instances, num_bodies, 3). - - This quantity is the linear acceleration of the articulation bodies' center of mass frame. - """ - return self.body_com_acc_w[..., :3] - - @property - def body_com_ang_acc_w(self) -> torch.Tensor: - """Angular acceleration of all bodies in simulation world frame. Shape is (num_instances, num_bodies, 3). - - This quantity is the angular acceleration of the articulation bodies' center of mass frame. - """ - return self.body_com_acc_w[..., 3:6] - - @property - def body_com_pos_b(self) -> torch.Tensor: - """Center of mass position of all of the bodies in their respective link frames. - Shape is (num_instances, num_bodies, 3). - - This quantity is the center of mass location relative to its body'slink frame. - """ - return self.body_com_pose_b[..., :3] - - @property - def body_com_quat_b(self) -> torch.Tensor: - """Orientation (w, x, y, z) of the principle axis of inertia of all of the bodies in their - respective link frames. Shape is (num_instances, num_bodies, 4). - - This quantity is the orientation of the principles axes of inertia relative to its body's link frame. - """ - return self.body_com_pose_b[..., 3:7] - - ## - # Backward compatibility. - ## - - @property - def root_pose_w(self) -> torch.Tensor: - """Same as :attr:`root_link_pose_w`.""" - return self.root_link_pose_w - - @property - def root_pos_w(self) -> torch.Tensor: - """Same as :attr:`root_link_pos_w`.""" - return self.root_link_pos_w - - @property - def root_quat_w(self) -> torch.Tensor: - """Same as :attr:`root_link_quat_w`.""" - return self.root_link_quat_w - - @property - def root_vel_w(self) -> torch.Tensor: - """Same as :attr:`root_com_vel_w`.""" - return self.root_com_vel_w - - @property - def root_lin_vel_w(self) -> torch.Tensor: - """Same as :attr:`root_com_lin_vel_w`.""" - return self.root_com_lin_vel_w - - @property - def root_ang_vel_w(self) -> torch.Tensor: - """Same as :attr:`root_com_ang_vel_w`.""" - return self.root_com_ang_vel_w - - @property - def root_lin_vel_b(self) -> torch.Tensor: - """Same as :attr:`root_com_lin_vel_b`.""" - return self.root_com_lin_vel_b - - @property - def root_ang_vel_b(self) -> torch.Tensor: - """Same as :attr:`root_com_ang_vel_b`.""" - return self.root_com_ang_vel_b - - @property - def body_pose_w(self) -> torch.Tensor: - """Same as :attr:`body_link_pose_w`.""" - return self.body_link_pose_w - - @property - def body_pos_w(self) -> torch.Tensor: - """Same as :attr:`body_link_pos_w`.""" - return self.body_link_pos_w - - @property - def body_quat_w(self) -> torch.Tensor: - """Same as :attr:`body_link_quat_w`.""" - return self.body_link_quat_w - - @property - def body_vel_w(self) -> torch.Tensor: - """Same as :attr:`body_com_vel_w`.""" - return self.body_com_vel_w - - @property - def body_lin_vel_w(self) -> torch.Tensor: - """Same as :attr:`body_com_lin_vel_w`.""" - return self.body_com_lin_vel_w - - @property - def body_ang_vel_w(self) -> torch.Tensor: - """Same as :attr:`body_com_ang_vel_w`.""" - return self.body_com_ang_vel_w - - @property - def body_acc_w(self) -> torch.Tensor: - """Same as :attr:`body_com_acc_w`.""" - return self.body_com_acc_w - - @property - def body_lin_acc_w(self) -> torch.Tensor: - """Same as :attr:`body_com_lin_acc_w`.""" - return self.body_com_lin_acc_w - - @property - def body_ang_acc_w(self) -> torch.Tensor: - """Same as :attr:`body_com_ang_acc_w`.""" - return self.body_com_ang_acc_w - - @property - def com_pos_b(self) -> torch.Tensor: - """Same as :attr:`body_com_pos_b`.""" - return self.body_com_pos_b - - @property - def com_quat_b(self) -> torch.Tensor: - """Same as :attr:`body_com_quat_b`.""" - return self.body_com_quat_b - - @property - def joint_limits(self) -> torch.Tensor: - """Deprecated property. Please use :attr:`joint_pos_limits` instead.""" - logger.warning( - "The `joint_limits` property will be deprecated in a future release. Please use `joint_pos_limits` instead." - ) - return self.joint_pos_limits - - @property - def default_joint_limits(self) -> torch.Tensor: - """Deprecated property. Please use :attr:`default_joint_pos_limits` instead.""" - logger.warning( - "The `default_joint_limits` property will be deprecated in a future release. Please use" - " `default_joint_pos_limits` instead." - ) - return self.default_joint_pos_limits - - @property - def joint_velocity_limits(self) -> torch.Tensor: - """Deprecated property. Please use :attr:`joint_vel_limits` instead.""" - logger.warning( - "The `joint_velocity_limits` property will be deprecated in a future release. Please use" - " `joint_vel_limits` instead." - ) - return self.joint_vel_limits - - @property - def joint_friction(self) -> torch.Tensor: - """Deprecated property. Please use :attr:`joint_friction_coeff` instead.""" - logger.warning( - "The `joint_friction` property will be deprecated in a future release. Please use" - " `joint_friction_coeff` instead." - ) - return self.joint_friction_coeff - - @property - def default_joint_friction(self) -> torch.Tensor: - """Deprecated property. Please use :attr:`default_joint_friction_coeff` instead.""" - logger.warning( - "The `default_joint_friction` property will be deprecated in a future release. Please use" - " `default_joint_friction_coeff` instead." - ) - return self.default_joint_friction_coeff - - @property - def fixed_tendon_limit(self) -> torch.Tensor: - """Deprecated property. Please use :attr:`fixed_tendon_pos_limits` instead.""" - logger.warning( - "The `fixed_tendon_limit` property will be deprecated in a future release. Please use" - " `fixed_tendon_pos_limits` instead." - ) - return self.fixed_tendon_pos_limits - - @property - def default_fixed_tendon_limit(self) -> torch.Tensor: - """Deprecated property. Please use :attr:`default_fixed_tendon_pos_limits` instead.""" - logger.warning( - "The `default_fixed_tendon_limit` property will be deprecated in a future release. Please use" - " `default_fixed_tendon_pos_limits` instead." - ) - return self.default_fixed_tendon_pos_limits + def __new__(cls, *args, **kwargs) -> BaseArticulationData | PhysXArticulationData: + """Create a new instance of an articulation data based on the backend.""" + return super().__new__(cls, *args, **kwargs) diff --git a/source/isaaclab/isaaclab/assets/articulation/base_articulation.py b/source/isaaclab/isaaclab/assets/articulation/base_articulation.py new file mode 100644 index 00000000000..d62392b8248 --- /dev/null +++ b/source/isaaclab/isaaclab/assets/articulation/base_articulation.py @@ -0,0 +1,1147 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +# Flag for pyright to ignore type errors in this file. +# pyright: reportPrivateUsage=false + +from __future__ import annotations + +import torch +from abc import abstractmethod +from collections.abc import Sequence +from typing import TYPE_CHECKING + +import warp as wp + +from ..asset_base import AssetBase + +if TYPE_CHECKING: + from isaaclab.utils.wrench_composer import WrenchComposer + + from .articulation_cfg import ArticulationCfg + from .articulation_data import ArticulationData + + +class BaseArticulation(AssetBase): + """An articulation asset class. + + An articulation is a collection of rigid bodies connected by joints. The joints can be either + fixed or actuated. The joints can be of different types, such as revolute, prismatic, D-6, etc. + However, the articulation class has currently been tested with revolute and prismatic joints. + The class supports both floating-base and fixed-base articulations. The type of articulation + is determined based on the root joint of the articulation. If the root joint is fixed, then + the articulation is considered a fixed-base system. Otherwise, it is considered a floating-base + system. This can be checked using the :attr:`Articulation.is_fixed_base` attribute. + + For an asset to be considered an articulation, the root prim of the asset must have the + `USD ArticulationRootAPI`_. This API is used to define the sub-tree of the articulation using + the reduced coordinate formulation. On playing the simulation, the physics engine parses the + articulation root prim and creates the corresponding articulation in the physics engine. The + articulation root prim can be specified using the :attr:`AssetBaseCfg.prim_path` attribute. + + The articulation class also provides the functionality to augment the simulation of an articulated + system with custom actuator models. These models can either be explicit or implicit, as detailed in + the :mod:`isaaclab.actuators` module. The actuator models are specified using the + :attr:`ArticulationCfg.actuators` attribute. These are then parsed and used to initialize the + corresponding actuator models, when the simulation is played. + + During the simulation step, the articulation class first applies the actuator models to compute + the joint commands based on the user-specified targets. These joint commands are then applied + into the simulation. The joint commands can be either position, velocity, or effort commands. + As an example, the following snippet shows how this can be used for position commands: + + .. code-block:: python + + # an example instance of the articulation class + my_articulation = Articulation(cfg) + + # set joint position targets + my_articulation.set_joint_position_target(position) + # propagate the actuator models and apply the computed commands into the simulation + my_articulation.write_data_to_sim() + + # step the simulation using the simulation context + sim_context.step() + + # update the articulation state, where dt is the simulation time step + my_articulation.update(dt) + + .. _`USD ArticulationRootAPI`: https://openusd.org/dev/api/class_usd_physics_articulation_root_a_p_i.html + + """ + + cfg: ArticulationCfg + """Configuration instance for the articulations.""" + + __backend_name__: str = "base" + """The name of the backend for the articulation.""" + + actuators: dict + """Dictionary of actuator instances for the articulation. + + The keys are the actuator names and the values are the actuator instances. The actuator instances + are initialized based on the actuator configurations specified in the :attr:`ArticulationCfg.actuators` + attribute. They are used to compute the joint commands during the :meth:`write_data_to_sim` function. + """ + + def __init__(self, cfg: ArticulationCfg): + """Initialize the articulation. + + Args: + cfg: A configuration instance. + """ + super().__init__(cfg) + + """ + Properties + """ + + @property + @abstractmethod + def data(self) -> ArticulationData: + raise NotImplementedError() + + @property + @abstractmethod + def num_instances(self) -> int: + raise NotImplementedError() + + @property + @abstractmethod + def is_fixed_base(self) -> bool: + """Whether the articulation is a fixed-base or floating-base system.""" + raise NotImplementedError() + + @property + @abstractmethod + def num_joints(self) -> int: + """Number of joints in articulation.""" + raise NotImplementedError() + + @property + @abstractmethod + def num_fixed_tendons(self) -> int: + """Number of fixed tendons in articulation.""" + raise NotImplementedError() + + @property + @abstractmethod + def num_spatial_tendons(self) -> int: + """Number of spatial tendons in articulation.""" + raise NotImplementedError() + + @property + @abstractmethod + def num_bodies(self) -> int: + """Number of bodies in articulation.""" + raise NotImplementedError() + + @property + @abstractmethod + def joint_names(self) -> list[str]: + """Ordered names of joints in articulation.""" + raise NotImplementedError() + + @property + @abstractmethod + def fixed_tendon_names(self) -> list[str]: + """Ordered names of fixed tendons in articulation.""" + raise NotImplementedError() + + @property + @abstractmethod + def spatial_tendon_names(self) -> list[str]: + """Ordered names of spatial tendons in articulation.""" + raise NotImplementedError() + + @property + @abstractmethod + def body_names(self) -> list[str]: + """Ordered names of bodies in articulation.""" + raise NotImplementedError() + + @property + @abstractmethod + def root_view(self): + """Root view for the asset. + + Note: + Use this view with caution. It requires handling of tensors in a specific way. + """ + raise NotImplementedError() + + @property + @abstractmethod + def instantaneous_wrench_composer(self) -> WrenchComposer: + """Instantaneous wrench composer for the articulation.""" + raise NotImplementedError() + + @property + @abstractmethod + def permanent_wrench_composer(self) -> WrenchComposer: + """Permanent wrench composer for the articulation.""" + raise NotImplementedError() + + """ + Operations. + """ + + @abstractmethod + def reset(self, env_ids: Sequence[int] | None = None) -> None: + """Reset the articulation. + + Args: + env_ids: Environment indices. If None, then all indices are used. + """ + raise NotImplementedError() + + @abstractmethod + def write_data_to_sim(self) -> None: + """Write external wrenches and joint commands to the simulation. + + If any explicit actuators are present, then the actuator models are used to compute the + joint commands. Otherwise, the joint commands are directly set into the simulation. + + Note: + We write external wrench to the simulation here since this function is called before the simulation step. + This ensures that the external wrench is applied at every simulation step. + """ + raise NotImplementedError() + + @abstractmethod + def update(self, dt: float) -> None: + """Updates the simulation data. + + Args: + dt: The time step size in seconds. + """ + raise NotImplementedError() + + """ + Operations - Finders. + """ + + @abstractmethod + def find_bodies( + self, name_keys: str | Sequence[str], preserve_order: bool = False + ) -> tuple[list[int], list[str]]: + """Find bodies in the articulation based on the name keys. + + Please check the :meth:`isaaclab.utils.string_utils.resolve_matching_names` function for more + information on the name matching. + + Args: + name_keys: A regular expression or a list of regular expressions to match the body names. + preserve_order: Whether to preserve the order of the name keys in the output. Defaults to False. + + Returns: + A tuple of lists containing the body indices and names. + """ + raise NotImplementedError() + + @abstractmethod + def find_joints( + self, name_keys: str | Sequence[str], joint_subset: list[str] | None = None, preserve_order: bool = False + ) -> tuple[list[int], list[str]]: + """Find joints in the articulation based on the name keys. + + Please see the :func:`isaaclab.utils.string.resolve_matching_names` function for more information + on the name matching. + + Args: + name_keys: A regular expression or a list of regular expressions to match the joint names. + joint_subset: A subset of joints to search for. Defaults to None, which means all joints + in the articulation are searched. + preserve_order: Whether to preserve the order of the name keys in the output. Defaults to False. + + Returns: + A tuple of lists containing the joint indices and names. + """ + raise NotImplementedError() + + @abstractmethod + def find_fixed_tendons( + self, name_keys: str | Sequence[str], tendon_subsets: list[str] | None = None, preserve_order: bool = False + ) -> tuple[list[int], list[str]]: + """Find fixed tendons in the articulation based on the name keys. + + Please see the :func:`isaaclab.utils.string.resolve_matching_names` function for more information + on the name matching. + + Args: + name_keys: A regular expression or a list of regular expressions to match the joint + names with fixed tendons. + tendon_subsets: A subset of joints with fixed tendons to search for. Defaults to None, which means + all joints in the articulation are searched. + preserve_order: Whether to preserve the order of the name keys in the output. Defaults to False. + + Returns: + A tuple of lists containing the tendon indices and names. + """ + raise NotImplementedError() + + @abstractmethod + def find_spatial_tendons( + self, name_keys: str | Sequence[str], tendon_subsets: list[str] | None = None, preserve_order: bool = False + ) -> tuple[list[int], list[str]]: + """Find spatial tendons in the articulation based on the name keys. + + Please see the :func:`isaaclab.utils.string.resolve_matching_names` function for more information + on the name matching. + + Args: + name_keys: A regular expression or a list of regular expressions to match the tendon names. + tendon_subsets: A subset of tendons to search for. Defaults to None, which means all tendons + in the articulation are searched. + preserve_order: Whether to preserve the order of the name keys in the output. Defaults to False. + + Returns: + A tuple of lists containing the tendon indices and names. + """ + raise NotImplementedError() + + """ + Operations - State Writers. + """ + + @abstractmethod + def write_root_state_to_sim( + self, + root_state: torch.Tensor, + env_ids: Sequence[int] | None = None, + ) -> None: + """Set the root state over selected environment indices into the simulation. + + The root state comprises of the cartesian position, quaternion orientation in (x, y, z, w), and linear + and angular velocity. All the quantities are in the simulation frame. + + Args: + root_state: Root state in simulation frame. Shape is (len(env_ids), 13) or (num_instances, 13). + env_ids: Environment indices. If None, then all indices are used. + """ + raise NotImplementedError() + + @abstractmethod + def write_root_com_state_to_sim( + self, + root_state: torch.Tensor, + env_ids: Sequence[int] | None = None, + ) -> None: + """Set the root center of mass state over selected environment indices into the simulation. + + The root state comprises of the cartesian position, quaternion orientation in (x, y, z, w), and linear + and angular velocity. All the quantities are in the simulation frame. + + Args: + root_state: Root state in simulation frame. Shape is (len(env_ids), 13) or (num_instances, 13). + env_ids: Environment indices. If None, then all indices are used. + """ + raise NotImplementedError() + + @abstractmethod + def write_root_link_state_to_sim( + self, + root_state: torch.Tensor, + env_ids: Sequence[int] | None = None, + ) -> None: + """Set the root link state over selected environment indices into the simulation. + + The root state comprises of the cartesian position, quaternion orientation in (x, y, z, w), and linear + and angular velocity. All the quantities are in the simulation frame. + + Args: + root_state: Root state in simulation frame. Shape is (len(env_ids), 13) or (num_instances, 13). + env_ids: Environment indices. If None, then all indices are used. + """ + raise NotImplementedError() + + @abstractmethod + def write_root_pose_to_sim( + self, + root_pose: torch.Tensor, + env_ids: Sequence[int] | None = None, + ) -> None: + """Set the root pose over selected environment indices into the simulation. + + The root pose comprises of the cartesian position and quaternion orientation in (x, y, z, w). + + Args: + root_pose: Root poses in simulation frame. Shape is (len(env_ids), 7) or (num_instances, 7). + env_ids: Environment indices. If None, then all indices are used. + """ + raise NotImplementedError() + + @abstractmethod + def write_root_link_pose_to_sim( + self, + root_pose: torch.Tensor, + env_ids: Sequence[int] | None = None, + ) -> None: + """Set the root link pose over selected environment indices into the simulation. + + The root pose comprises of the cartesian position and quaternion orientation in (x, y, z, w). + + Args: + root_pose: Root poses in simulation frame. Shape is (len(env_ids), 7) or (num_instances, 7). + env_ids: Environment indices. If None, then all indices are used. + """ + raise NotImplementedError() + + @abstractmethod + def write_root_com_pose_to_sim( + self, + root_pose: torch.Tensor, + env_ids: Sequence[int] | None = None, + ) -> None: + """Set the root center of mass pose over selected environment indices into the simulation. + + The root pose comprises of the cartesian position and quaternion orientation in (x, y, z, w). + The orientation is the orientation of the principle axes of inertia. + + Args: + root_pose: Root center of mass poses in simulation frame. Shape is (len(env_ids), 7) or (num_instances, 7). + env_ids: Environment indices. If None, then all indices are used. + """ + raise NotImplementedError() + + @abstractmethod + def write_root_velocity_to_sim( + self, + root_velocity: torch.Tensor, + env_ids: Sequence[int] | None = None, + ) -> None: + """Set the root center of mass velocity over selected environment indices into the simulation. + + The velocity comprises linear velocity (x, y, z) and angular velocity (x, y, z) in that order. + ..note:: This sets the velocity of the root's center of mass rather than the roots frame. + + Args: + root_velocity: Root center of mass velocities in simulation world frame. Shape is (len(env_ids), 6) or (num_instances, 6). + env_ids: Environment indices. If None, then all indices are used. + """ + raise NotImplementedError() + + @abstractmethod + def write_root_com_velocity_to_sim( + self, + root_velocity: torch.Tensor, + env_ids: Sequence[int] | None = None, + ) -> None: + """Set the root center of mass velocity over selected environment indices into the simulation. + + The velocity comprises linear velocity (x, y, z) and angular velocity (x, y, z) in that order. + ..note:: This sets the velocity of the root's center of mass rather than the roots frame. + + Args: + root_velocity: Root center of mass velocities in simulation world frame. Shape is (len(env_ids), 6) or (num_instances, 6). + env_ids: Environment indices. If None, then all indices are used. + """ + raise NotImplementedError() + + @abstractmethod + def write_root_link_velocity_to_sim( + self, + root_velocity: torch.Tensor, + env_ids: Sequence[int] | None = None, + ) -> None: + """Set the root link velocity over selected environment indices into the simulation. + + The velocity comprises linear velocity (x, y, z) and angular velocity (x, y, z) in that order. + ..note:: This sets the velocity of the root's frame rather than the roots center of mass. + + Args: + root_velocity: Root frame velocities in simulation world frame. Shape is (len(env_ids), 6) or (num_instances, 6). + env_ids: Environment indices. If None, then all indices are used. + """ + raise NotImplementedError() + + @abstractmethod + def write_joint_state_to_sim( + self, + position: torch.Tensor, + velocity: torch.Tensor, + joint_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | slice | None = None, + ) -> None: + """Write joint positions and velocities to the simulation. + + Args: + position: Joint positions. Shape is (len(env_ids), len(joint_ids)) or (num_instances, num_joints). + velocity: Joint velocities. Shape is (len(env_ids), len(joint_ids)) or (num_instances, num_joints). + joint_ids: The joint indices to set the targets for. Defaults to None (all joints). + env_ids: The environment indices to set the targets for. Defaults to None (all environments). + """ + raise NotImplementedError() + + @abstractmethod + def write_joint_position_to_sim( + self, + position: torch.Tensor, + joint_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | slice | None = None, + ) -> None: + """Write joint positions to the simulation. + + Args: + position: Joint positions. Shape is (len(env_ids), len(joint_ids)) or (num_instances, num_joints). + joint_ids: The joint indices to set the targets for. Defaults to None (all joints). + env_ids: The environment indices to set the targets for. Defaults to None (all environments). + """ + raise NotImplementedError() + + @abstractmethod + def write_joint_velocity_to_sim( + self, + velocity: torch.Tensor, + joint_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | slice | None = None, + ) -> None: + """Write joint velocities to the simulation. + + Args: + velocity: Joint velocities. Shape is (len(env_ids), len(joint_ids)) or (num_instances, num_joints). + joint_ids: The joint indices to set the targets for. Defaults to None (all joints). + env_ids: The environment indices to set the targets for. Defaults to None (all environments). + """ + raise NotImplementedError() + + """ + Operations - Simulation Parameters Writers. + """ + + @abstractmethod + def write_joint_stiffness_to_sim( + self, + stiffness: torch.Tensor | float, + joint_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | None = None, + ) -> None: + """Write joint stiffness into the simulation. + + Args: + stiffness: Joint stiffness. Shape is (len(env_ids), len(joint_ids)) or (num_instances, num_joints). + joint_ids: The joint indices to set the stiffness for. Defaults to None (all joints). + env_ids: The environment indices to set the stiffness for. Defaults to None (all environments). + """ + raise NotImplementedError() + + @abstractmethod + def write_joint_damping_to_sim( + self, + damping: torch.Tensor | float, + joint_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | None = None, + ) -> None: + """Write joint damping into the simulation. + + Args: + damping: Joint damping. Shape is (len(env_ids), len(joint_ids)) or (num_instances, num_joints). + joint_ids: The joint indices to set the damping for. Defaults to None (all joints). + env_ids: The environment indices to set the damping for. Defaults to None (all environments). + """ + raise NotImplementedError() + + @abstractmethod + def write_joint_position_limit_to_sim( + self, + limits: torch.Tensor | float, + joint_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | None = None, + warn_limit_violation: bool = True, + ) -> None: + """Write joint position limits into the simulation. + + Args: + limits: Joint limits. Shape is (len(env_ids), len(joint_ids), 2) or (num_instances, num_joints, 2). + joint_ids: The joint indices to set the limits for. Defaults to None (all joints). + env_ids: The environment indices to set the limits for. Defaults to None (all environments). + warn_limit_violation: Whether to use warning or info level logging when default joint positions + exceed the new limits. Defaults to True. + """ + raise NotImplementedError() + + @abstractmethod + def write_joint_velocity_limit_to_sim( + self, + limits: torch.Tensor | float, + joint_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | None = None, + ) -> None: + """Write joint max velocity to the simulation. + + The velocity limit is used to constrain the joint velocities in the physics engine. The joint will only + be able to reach this velocity if the joint's effort limit is sufficiently large. If the joint is moving + faster than this velocity, the physics engine will actually try to brake the joint to reach this velocity. + + Args: + limits: Joint max velocity. Shape is (len(env_ids), len(joint_ids)) or (num_instances, num_joints). + joint_ids: The joint indices to set the max velocity for. Defaults to None (all joints). + env_ids: The environment indices to set the max velocity for. Defaults to None (all environments). + """ + raise NotImplementedError() + + @abstractmethod + def write_joint_effort_limit_to_sim( + self, + limits: torch.Tensor | float, + joint_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | None = None, + ) -> None: + """Write joint effort limits into the simulation. + + The effort limit is used to constrain the computed joint efforts in the physics engine. If the + computed effort exceeds this limit, the physics engine will clip the effort to this value. + + Args: + limits: Joint torque limits. Shape is (len(env_ids), len(joint_ids)) or (num_instances, num_joints). + joint_ids: The joint indices to set the joint torque limits for. Defaults to None (all joints). + env_ids: The environment indices to set the joint torque limits for. Defaults to None (all environments). + """ + raise NotImplementedError() + + @abstractmethod + def write_joint_armature_to_sim( + self, + armature: torch.Tensor | float, + joint_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | None = None, + ) -> None: + """Write joint armature into the simulation. + + The armature is directly added to the corresponding joint-space inertia. It helps improve the + simulation stability by reducing the joint velocities. + + Args: + armature: Joint armature. Shape is (len(env_ids), len(joint_ids)) or (num_instances, num_joints). + joint_ids: The joint indices to set the joint torque limits for. Defaults to None (all joints). + env_ids: The environment indices to set the joint torque limits for. Defaults to None (all environments). + """ + raise NotImplementedError() + + @abstractmethod + def write_joint_friction_coefficient_to_sim( + self, + joint_friction_coeff: torch.Tensor | float, + joint_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | None = None, + ) -> None: + r"""Write joint static friction coefficients into the simulation. + + The joint static friction is a unitless quantity. It relates the magnitude of the spatial force transmitted + from the parent body to the child body to the maximal static friction force that may be applied by the solver + to resist the joint motion. + + Args: + joint_friction_coeff: Joint static friction coefficient. Shape is (len(env_ids), len(joint_ids)) or (num_instances, num_joints). + joint_ids: The joint indices to set the joint torque limits for. Defaults to None (all joints). + env_ids: The environment indices to set the joint torque limits for. Defaults to None (all environments). + """ + raise NotImplementedError() + + + """ + Operations - Setters. + """ + + @abstractmethod + def set_masses( + self, + masses: torch.Tensor, + body_ids: Sequence[int] | None = None, + env_ids: Sequence[int] | None = None, + ) -> None: + """Set masses of all bodies in the simulation world frame. + + Args: + masses: Masses of all bodies. Shape is (num_instances, num_bodies). + body_ids: The body indices to set the masses for. Defaults to None (all bodies). + env_ids: The environment indices to set the masses for. Defaults to None (all environments). + """ + raise NotImplementedError() + + @abstractmethod + def set_coms( + self, + coms: torch.Tensor, + body_ids: Sequence[int] | None = None, + env_ids: Sequence[int] | None = None, + ) -> None: + """Set center of mass positions of all bodies in the simulation world frame. + + Args: + coms: Center of mass positions of all bodies. Shape is (num_instances, num_bodies, 3). + body_ids: The body indices to set the center of mass positions for. Defaults to None (all bodies). + env_ids: The environment indices to set the center of mass positions for. Defaults to None (all environments). + """ + raise NotImplementedError() + + @abstractmethod + def set_inertias( + self, + inertias: torch.Tensor, + body_ids: Sequence[int] | None = None, + env_ids: Sequence[int] | None = None, + ) -> None: + """Set inertias of all bodies in the simulation world frame. + + Args: + inertias: Inertias of all bodies. Shape is (num_instances, num_bodies, 3, 3). + body_ids: The body indices to set the inertias for. Defaults to None (all bodies). + env_ids: The environment indices to set the inertias for. Defaults to None (all environments). + """ + raise NotImplementedError() + + @abstractmethod + def set_external_force_and_torque( + self, + forces: torch.Tensor, + torques: torch.Tensor, + positions: torch.Tensor | None = None, + body_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | None = None, + is_global: bool = False, + ) -> None: + """Set external force and torque to apply on the asset's bodies in their local frame. + + For many applications, we want to keep the applied external force on rigid bodies constant over a period of + time (for instance, during the policy control). This function allows us to store the external force and torque + into buffers which are then applied to the simulation at every step. Optionally, set the position to apply the + external wrench at (in the local link frame of the bodies). + + .. caution:: + If the function is called with empty forces and torques, then this function disables the application + of external wrench to the simulation. + + .. code-block:: python + + # example of disabling external wrench + asset.set_external_force_and_torque(forces=torch.zeros(0, 3), torques=torch.zeros(0, 3)) + + .. caution:: + If the function is called consecutively with and with different values for ``is_global``, then the + all the external wrenches will be applied in the frame specified by the last call. + + .. code-block:: python + + # example of setting external wrench in the global frame + asset.set_external_force_and_torque(forces=torch.ones(1, 1, 3), env_ids=[0], is_global=True) + # example of setting external wrench in the link frame + asset.set_external_force_and_torque(forces=torch.ones(1, 1, 3), env_ids=[1], is_global=False) + # Both environments will have the external wrenches applied in the link frame + + .. note:: + This function does not apply the external wrench to the simulation. It only fills the buffers with + the desired values. To apply the external wrench, call the :meth:`write_data_to_sim` function + right before the simulation step. + + Args: + forces: External forces in bodies' local frame. Shape is (len(env_ids), len(body_ids), 3) or (num_instances, num_bodies, 3). + torques: External torques in bodies' local frame. Shape is (len(env_ids), len(body_ids), 3) or (num_instances, num_bodies, 3). + positions: Positions to apply external wrench. Shape is (len(env_ids), len(body_ids), 3) or (num_instances, num_bodies, 3). Defaults to None. + body_ids: Body indices to apply external wrench to. Defaults to None (all bodies). + env_ids: Environment indices to apply external wrench to. Defaults to None (all instances). + is_global: Whether to apply the external wrench in the global frame. Defaults to False. If set to False, + the external wrench is applied in the link frame of the articulations' bodies. + """ + raise NotImplementedError() + + @abstractmethod + def set_joint_position_target( + self, + target: torch.Tensor, + joint_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | None = None, + ) -> None: + """Set joint position targets into internal buffers. + + This function does not apply the joint targets to the simulation. It only fills the buffers with + the desired values. To apply the joint targets, call the :meth:`write_data_to_sim` function. + + Args: + target: Joint position targets. Shape is (len(env_ids), len(joint_ids)). + joint_ids: The joint indices to set the targets for. Defaults to None (all joints). + env_ids: The environment indices to set the targets for. Defaults to None (all environments). + """ + raise NotImplementedError() + + @abstractmethod + def set_joint_velocity_target( + self, + target: torch.Tensor, + joint_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | None = None, + ) -> None: + """Set joint velocity targets into internal buffers. + + This function does not apply the joint targets to the simulation. It only fills the buffers with + the desired values. To apply the joint targets, call the :meth:`write_data_to_sim` function. + + Args: + target: Joint velocity targets. Shape is (len(env_ids), len(joint_ids)) or (num_instances, num_joints). + joint_ids: The joint indices to set the targets for. Defaults to None (all joints). + env_ids: The environment indices to set the targets for. Defaults to None (all environments). + """ + raise NotImplementedError() + + @abstractmethod + def set_joint_effort_target( + self, + target: torch.Tensor, + joint_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | None = None, + ) -> None: + """Set joint efforts into internal buffers. + + This function does not apply the joint targets to the simulation. It only fills the buffers with + the desired values. To apply the joint targets, call the :meth:`write_data_to_sim` function. + + Args: + target: Joint effort targets. Shape is (len(env_ids), len(joint_ids)) or (num_instances, num_joints). + joint_ids: The joint indices to set the targets for. Defaults to None (all joints). + env_ids: The environment indices to set the targets for. Defaults to None (all environments). + """ + raise NotImplementedError() + + """ + Operations - Tendons. + """ + + @abstractmethod + def set_fixed_tendon_stiffness( + self, + stiffness: torch.Tensor, + fixed_tendon_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | None = None, + ) -> None: + """Set fixed tendon stiffness into internal buffers. + + This function does not apply the tendon stiffness to the simulation. It only fills the buffers with + the desired values. To apply the tendon stiffness, call the :meth:`write_fixed_tendon_properties_to_sim` function. + + Args: + stiffness: Fixed tendon stiffness. Shape is (len(env_ids), len(fixed_tendon_ids)) or (num_instances, num_fixed_tendons). + fixed_tendon_ids: The tendon indices to set the stiffness for. Defaults to None (all fixed tendons). + env_ids: The environment indices to set the stiffness for. Defaults to None (all environments). + """ + raise NotImplementedError() + + @abstractmethod + def set_fixed_tendon_damping( + self, + damping: torch.Tensor, + fixed_tendon_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | None = None, + ) -> None: + """Set fixed tendon damping into internal buffers. + + This function does not apply the tendon damping to the simulation. It only fills the buffers with + the desired values. To apply the tendon damping, call the :meth:`write_fixed_tendon_properties_to_sim` function. + + Args: + damping: Fixed tendon damping. Shape is (len(env_ids), len(fixed_tendon_ids)) or (num_instances, num_fixed_tendons). + fixed_tendon_ids: The tendon indices to set the damping for. Defaults to None (all fixed tendons). + env_ids: The environment indices to set the damping for. Defaults to None (all environments). + """ + raise NotImplementedError() + + @abstractmethod + def set_fixed_tendon_limit_stiffness( + self, + limit_stiffness: torch.Tensor, + fixed_tendon_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | None = None, + ) -> None: + """Set fixed tendon limit stiffness efforts into internal buffers. + + This function does not apply the tendon limit stiffness to the simulation. It only fills the buffers with + the desired values. To apply the tendon limit stiffness, call the :meth:`write_fixed_tendon_properties_to_sim` function. + + Args: + limit_stiffness: Fixed tendon limit stiffness. Shape is (len(env_ids), len(fixed_tendon_ids)) or (num_instances, num_fixed_tendons). + fixed_tendon_ids: The tendon indices to set the limit stiffness for. Defaults to None (all fixed tendons). + env_ids: The environment indices to set the limit stiffness for. Defaults to None (all environments). + """ + raise NotImplementedError() + + @abstractmethod + def set_fixed_tendon_position_limit( + self, + limit: torch.Tensor, + fixed_tendon_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | None = None, + ) -> None: + """Set fixed tendon limit efforts into internal buffers. + + This function does not apply the tendon limit to the simulation. It only fills the buffers with + the desired values. To apply the tendon limit, call the :meth:`write_fixed_tendon_properties_to_sim` function. + + Args: + limit: Fixed tendon limit. Shape is (len(env_ids), len(fixed_tendon_ids)) or (num_instances, num_fixed_tendons). + fixed_tendon_ids: The tendon indices to set the limit for. Defaults to None (all fixed tendons). + env_ids: The environment indices to set the limit for. Defaults to None (all environments). + """ + raise NotImplementedError() + + @abstractmethod + def set_fixed_tendon_rest_length( + self, + rest_length: torch.Tensor, + fixed_tendon_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | None = None, + ) -> None: + """Set fixed tendon rest length efforts into internal buffers. + + This function does not apply the tendon rest length to the simulation. It only fills the buffers with + the desired values. To apply the tendon rest length, call the :meth:`write_fixed_tendon_properties_to_sim` function. + + Args: + rest_length: Fixed tendon rest length. Shape is (len(env_ids), len(fixed_tendon_ids)) or (num_instances, num_fixed_tendons). + fixed_tendon_ids: The tendon indices to set the rest length for. Defaults to None (all fixed tendons). + env_ids: The environment indices to set the rest length for. Defaults to None (all environments). + """ + raise NotImplementedError() + + @abstractmethod + def set_fixed_tendon_offset( + self, + offset: torch.Tensor, + fixed_tendon_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | None = None, + ) -> None: + """Set fixed tendon offset efforts into internal buffers. + + This function does not apply the tendon offset to the simulation. It only fills the buffers with + the desired values. To apply the tendon offset, call the :meth:`write_fixed_tendon_properties_to_sim` function. + + Args: + offset: Fixed tendon offset. Shape is (len(env_ids), len(fixed_tendon_ids)) or (num_instances, num_fixed_tendons). + fixed_tendon_ids: The tendon indices to set the offset for. Defaults to None (all fixed tendons). + env_ids: The environment indices to set the offset for. Defaults to None (all environments). + """ + raise NotImplementedError() + + @abstractmethod + def write_fixed_tendon_properties_to_sim( + self, + fixed_tendon_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | None = None, + ) -> None: + """Write fixed tendon properties into the simulation. + + Args: + fixed_tendon_ids: The fixed tendon indices to set the limits for. Defaults to None (all fixed tendons). + env_ids: The environment indices to set the limits for. Defaults to None (all environments). + """ + raise NotImplementedError() + + @abstractmethod + def set_spatial_tendon_stiffness( + self, + stiffness: torch.Tensor, + spatial_tendon_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | None = None, + ) -> None: + """Set spatial tendon stiffness into internal buffers. + + This function does not apply the tendon stiffness to the simulation. It only fills the buffers with + the desired values. To apply the tendon stiffness, call the :meth:`write_spatial_tendon_properties_to_sim` function. + + Args: + stiffness: Spatial tendon stiffness. Shape is (len(env_ids), len(spatial_tendon_ids)) or (num_instances, num_spatial_tendons). + spatial_tendon_ids: The tendon indices to set the stiffness for. Defaults to None (all spatial tendons). + env_ids: The environment indices to set the stiffness for. Defaults to None (all environments). + """ + raise NotImplementedError() + + @abstractmethod + def set_spatial_tendon_damping( + self, + damping: torch.Tensor, + spatial_tendon_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | None = None, + ) -> None: + """Set spatial tendon damping into internal buffers. + + This function does not apply the tendon damping to the simulation. It only fills the buffers with + the desired values. To apply the tendon damping, call the :meth:`write_spatial_tendon_properties_to_sim` function. + + + Args: + damping: Spatial tendon damping. Shape is (len(env_ids), len(spatial_tendon_ids)) or (num_instances, num_spatial_tendons). + spatial_tendon_ids: The tendon indices to set the damping for. Defaults to None (all spatial tendons). + env_ids: The environment indices to set the damping for. Defaults to None (all environments). + """ + raise NotImplementedError() + + @abstractmethod + def set_spatial_tendon_limit_stiffness( + self, + limit_stiffness: torch.Tensor, + spatial_tendon_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | None = None, + ) -> None: + """Set spatial tendon limit stiffness into internal buffers. + + This function does not apply the tendon limit stiffness to the simulation. It only fills the buffers with + the desired values. To apply the tendon limit stiffness, call the :meth:`write_spatial_tendon_properties_to_sim` function. + + Args: + limit_stiffness: Spatial tendon limit stiffness. Shape is (len(env_ids), len(spatial_tendon_ids)) or (num_instances, num_spatial_tendons). + spatial_tendon_ids: The tendon indices to set the limit stiffness for. Defaults to None (all spatial tendons). + env_ids: The environment indices to set the limit stiffness for. Defaults to None (all environments). + """ + raise NotImplementedError() + + @abstractmethod + def set_spatial_tendon_offset( + self, + offset: torch.Tensor, + spatial_tendon_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | None = None, + ) -> None: + """Set spatial tendon offset efforts into internal buffers. + + This function does not apply the tendon offset to the simulation. It only fills the buffers with + the desired values. To apply the tendon offset, call the :meth:`write_spatial_tendon_properties_to_sim` function. + + Args: + offset: Spatial tendon offset. Shape is (len(env_ids), len(spatial_tendon_ids)) or (num_instances, num_spatial_tendons). + spatial_tendon_ids: The tendon indices to set the offset for. Defaults to None (all spatial tendons). + env_ids: The environment indices to set the offset for. Defaults to None (all environments). + """ + raise NotImplementedError() + + @abstractmethod + def write_spatial_tendon_properties_to_sim( + self, + spatial_tendon_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | None = None, + ) -> None: + """Write spatial tendon properties into the simulation. + + Args: + spatial_tendon_ids: The spatial tendon indices to set the properties for. Defaults to None (all spatial tendons). + env_ids: The environment indices to set the properties for. Defaults to None (all environments). + """ + raise NotImplementedError() + + """ + Internal helper. + """ + + @abstractmethod + def _initialize_impl(self) -> None: + raise NotImplementedError() + + @abstractmethod + def _create_buffers(self) -> None: + raise NotImplementedError() + + @abstractmethod + def _process_cfg(self) -> None: + """Post processing of configuration parameters.""" + raise NotImplementedError() + + """ + Internal simulation callbacks. + """ + + @abstractmethod + def _invalidate_initialize_callback(self, event) -> None: + """Invalidates the scene elements.""" + super()._invalidate_initialize_callback(event) + + """ + Internal helpers -- Actuators. + """ + + @abstractmethod + def _process_actuators_cfg(self) -> None: + """Process and apply articulation joint properties.""" + raise NotImplementedError() + + @abstractmethod + def _process_tendons(self) -> None: + """Process fixed and spatial tendons.""" + raise NotImplementedError() + + @abstractmethod + def _apply_actuator_model(self) -> None: + """Processes joint commands for the articulation by forwarding them to the actuators. + + The actions are first processed using actuator models. Depending on the robot configuration, + the actuator models compute the joint level simulation commands and sets them into the PhysX buffers. + """ + raise NotImplementedError() + + """ + Internal helpers -- Debugging. + """ + + @abstractmethod + def _validate_cfg(self) -> None: + """Validate the configuration after processing. + + Note: + This function should be called only after the configuration has been processed and the buffers have been + created. Otherwise, some settings that are altered during processing may not be validated. + For instance, the actuator models may change the joint max velocity limits. + """ + raise NotImplementedError() + + @abstractmethod + def _log_articulation_info(self) -> None: + """Log information about the articulation. + + Note: We purposefully read the values from the simulator to ensure that the values are configured as expected. + """ + raise NotImplementedError() + + """ + Deprecated methods. + """ + + @abstractmethod + def write_joint_friction_to_sim( + self, + joint_friction: torch.Tensor | float, + joint_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | None = None, + ) -> None: + """Write joint friction coefficients into the simulation. + + .. deprecated:: 2.1.0 + Please use :meth:`write_joint_friction_coefficient_to_sim` instead. + """ + raise NotImplementedError() + + @abstractmethod + def write_joint_limits_to_sim( + self, + limits: torch.Tensor | float, + joint_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | None = None, + warn_limit_violation: bool = True, + ) -> None: + """Write joint limits into the simulation. + + .. deprecated:: 2.1.0 + Please use :meth:`write_joint_position_limit_to_sim` instead. + """ + raise NotImplementedError() + + @abstractmethod + def set_fixed_tendon_limit( + self, + limit: torch.Tensor, + fixed_tendon_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | None = None, + ) -> None: + """Set fixed tendon position limits into internal buffers. + + .. deprecated:: 2.1.0 + Please use :meth:`set_fixed_tendon_position_limit` instead. + """ + raise NotImplementedError() diff --git a/source/isaaclab/isaaclab/assets/articulation/base_articulation_data.py b/source/isaaclab/isaaclab/assets/articulation/base_articulation_data.py new file mode 100644 index 00000000000..dfcc646a8e1 --- /dev/null +++ b/source/isaaclab/isaaclab/assets/articulation/base_articulation_data.py @@ -0,0 +1,962 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from abc import ABC, abstractmethod + +import torch + + +class BaseArticulationData(ABC): + """Data container for an articulation. + + This class contains the data for an articulation in the simulation. The data includes the state of + the root rigid body, the state of all the bodies in the articulation, and the joint state. The data is + stored in the simulation world frame unless otherwise specified. + + An articulation is comprised of multiple rigid bodies or links. For a rigid body, there are two frames + of reference that are used: + + - Actor frame: The frame of reference of the rigid body prim. This typically corresponds to the Xform prim + with the rigid body schema. + - Center of mass frame: The frame of reference of the center of mass of the rigid body. + + Depending on the settings, the two frames may not coincide with each other. In the robotics sense, the actor frame + can be interpreted as the link frame. + """ + + def __init__(self, root_view, device: str): + """Initializes the articulation data. + + Args: + root_view: The root articulation view. + device: The device used for processing. + """ + # Set the parameters + self.device = device + + @abstractmethod + def update(self, dt: float) -> None: + raise NotImplementedError + + ## + # Names. + ## + + body_names: list[str] = None + """Body names in the order parsed by the simulation view.""" + + joint_names: list[str] = None + """Joint names in the order parsed by the simulation view.""" + + fixed_tendon_names: list[str] = None + """Fixed tendon names in the order parsed by the simulation view.""" + + spatial_tendon_names: list[str] = None + """Spatial tendon names in the order parsed by the simulation view.""" + + ## + # Defaults - Initial state. + ## + + @property + @abstractmethod + def default_root_pose(self) -> torch.Tensor: + """Default root pose ``[pos, quat]`` in the local environment frame. Shape is (num_instances, 7). + + The position and quaternion are of the articulation root's actor frame. + """ + raise NotImplementedError + + @property + @abstractmethod + def default_root_vel(self) -> torch.Tensor: + """Default root velocity ``[lin_vel, ang_vel]`` in the local environment frame. Shape is (num_instances, 6). + + The linear and angular velocities are of the articulation root's center of mass frame. + """ + raise NotImplementedError + + @property + @abstractmethod + def default_root_state(self) -> torch.Tensor: + """Default root state ``[pos, quat, lin_vel, ang_vel]`` in the local environment frame. Shape is (num_instances, 13). + + The position and quaternion are of the articulation root's actor frame. Meanwhile, the linear and angular + velocities are of its center of mass frame. + + This quantity is configured through the :attr:`isaaclab.assets.ArticulationCfg.init_state` parameter. + """ + raise NotImplementedError + + @property + @abstractmethod + def default_joint_pos(self) -> torch.Tensor: + """Default joint positions of all joints. Shape is (num_instances, num_joints). + + This quantity is configured through the :attr:`isaaclab.assets.ArticulationCfg.init_state` parameter. + """ + raise NotImplementedError + + @property + @abstractmethod + def default_joint_vel(self) -> torch.Tensor: + """Default joint velocities of all joints. Shape is (num_instances, num_joints). + + This quantity is configured through the :attr:`isaaclab.assets.ArticulationCfg.init_state` parameter. + """ + raise NotImplementedError + + ## + # Joint commands -- Set into simulation. + ## + + @property + @abstractmethod + def joint_pos_target(self) -> torch.Tensor: + """Joint position targets commanded by the user. Shape is (num_instances, num_joints). + + For an implicit actuator model, the targets are directly set into the simulation. + For an explicit actuator model, the targets are used to compute the joint torques (see :attr:`applied_torque`), + which are then set into the simulation. + """ + raise NotImplementedError + + @property + @abstractmethod + def joint_vel_target(self) -> torch.Tensor: + """Joint velocity targets commanded by the user. Shape is (num_instances, num_joints). + + For an implicit actuator model, the targets are directly set into the simulation. + For an explicit actuator model, the targets are used to compute the joint torques (see :attr:`applied_torque`), + which are then set into the simulation. + """ + raise NotImplementedError + + @property + @abstractmethod + def joint_effort_target(self) -> torch.Tensor: + """Joint effort targets commanded by the user. Shape is (num_instances, num_joints). + + For an implicit actuator model, the targets are directly set into the simulation. + For an explicit actuator model, the targets are used to compute the joint torques (see :attr:`applied_torque`), + which are then set into the simulation. + """ + raise NotImplementedError + + ## + # Joint commands -- Explicit actuators. + ## + + @property + @abstractmethod + def computed_torque(self) -> torch.Tensor: + """Joint torques computed from the actuator model (before clipping). Shape is (num_instances, num_joints). + + This quantity is the raw torque output from the actuator mode, before any clipping is applied. + It is exposed for users who want to inspect the computations inside the actuator model. + For instance, to penalize the learning agent for a difference between the computed and applied torques. + """ + raise NotImplementedError + + @property + @abstractmethod + def applied_torque(self) -> torch.Tensor: + """Joint torques applied from the actuator model (after clipping). Shape is (num_instances, num_joints). + + These torques are set into the simulation, after clipping the :attr:`computed_torque` based on the + actuator model. + """ + raise NotImplementedError + + ## + # Joint properties. + ## + + @property + @abstractmethod + def joint_stiffness(self) -> torch.Tensor: + """Joint stiffness provided to the simulation. Shape is (num_instances, num_joints). + + In the case of explicit actuators, the value for the corresponding joints is zero. + """ + raise NotImplementedError + + @property + @abstractmethod + def joint_damping(self) -> torch.Tensor: + """Joint damping provided to the simulation. Shape is (num_instances, num_joints) + + In the case of explicit actuators, the value for the corresponding joints is zero. + """ + raise NotImplementedError + + @property + @abstractmethod + def joint_armature(self) -> torch.Tensor: + """Joint armature provided to the simulation. Shape is (num_instances, num_joints).""" + raise NotImplementedError + + @property + @abstractmethod + def joint_friction_coeff(self) -> torch.Tensor: + """Joint static friction coefficient provided to the simulation. Shape is (num_instances, num_joints).""" + raise NotImplementedError + + @property + @abstractmethod + def joint_dynamic_friction_coeff(self) -> torch.Tensor: + """Joint dynamic friction coefficient provided to the simulation. Shape is (num_instances, num_joints).""" + raise NotImplementedError + + @property + @abstractmethod + def joint_viscous_friction_coeff(self) -> torch.Tensor: + """Joint viscous friction coefficient provided to the simulation. Shape is (num_instances, num_joints).""" + raise NotImplementedError + + @property + @abstractmethod + def joint_pos_limits(self) -> torch.Tensor: + """Joint position limits provided to the simulation. Shape is (num_instances, num_joints, 2). + + The limits are in the order :math:`[lower, upper]`. + """ + raise NotImplementedError + + @property + @abstractmethod + def joint_vel_limits(self) -> torch.Tensor: + """Joint maximum velocity provided to the simulation. Shape is (num_instances, num_joints).""" + raise NotImplementedError + + @property + @abstractmethod + def joint_effort_limits(self) -> torch.Tensor: + """Joint maximum effort provided to the simulation. Shape is (num_instances, num_joints).""" + raise NotImplementedError + + ## + # Joint properties - Custom. + ## + + @property + @abstractmethod + def soft_joint_pos_limits(self) -> torch.Tensor: + r"""Soft joint positions limits for all joints. Shape is (num_instances, num_joints, 2). + + The limits are in the order :math:`[lower, upper]`.The soft joint position limits are computed as + a sub-region of the :attr:`joint_pos_limits` based on the + :attr:`~isaaclab.assets.ArticulationCfg.soft_joint_pos_limit_factor` parameter. + + Consider the joint position limits :math:`[lower, upper]` and the soft joint position limits + :math:`[soft_lower, soft_upper]`. The soft joint position limits are computed as: + + .. math:: + + soft\_lower = (lower + upper) / 2 - factor * (upper - lower) / 2 + soft\_upper = (lower + upper) / 2 + factor * (upper - lower) / 2 + + The soft joint position limits help specify a safety region around the joint limits. It isn't used by the + simulation, but is useful for learning agents to prevent the joint positions from violating the limits. + """ + raise NotImplementedError + + @property + @abstractmethod + def soft_joint_vel_limits(self) -> torch.Tensor: + """Soft joint velocity limits for all joints. Shape is (num_instances, num_joints). + + These are obtained from the actuator model. It may differ from :attr:`joint_vel_limits` if the actuator model + has a variable velocity limit model. For instance, in a variable gear ratio actuator model. + """ + raise NotImplementedError + + @property + @abstractmethod + def gear_ratio(self) -> torch.Tensor: + """Gear ratio for relating motor torques to applied Joint torques. Shape is (num_instances, num_joints).""" + raise NotImplementedError + + ## + # Fixed tendon properties. + ## + + @property + @abstractmethod + def fixed_tendon_stiffness(self) -> torch.Tensor: + """Fixed tendon stiffness provided to the simulation. Shape is (num_instances, num_fixed_tendons).""" + raise NotImplementedError + + @property + @abstractmethod + def fixed_tendon_damping(self) -> torch.Tensor: + """Fixed tendon damping provided to the simulation. Shape is (num_instances, num_fixed_tendons).""" + raise NotImplementedError + + @property + @abstractmethod + def fixed_tendon_limit_stiffness(self) -> torch.Tensor: + """Fixed tendon limit stiffness provided to the simulation. Shape is (num_instances, num_fixed_tendons).""" + raise NotImplementedError + + @property + @abstractmethod + def fixed_tendon_rest_length(self) -> torch.Tensor: + """Fixed tendon rest length provided to the simulation. Shape is (num_instances, num_fixed_tendons).""" + raise NotImplementedError + + @property + @abstractmethod + def fixed_tendon_offset(self) -> torch.Tensor: + """Fixed tendon offset provided to the simulation. Shape is (num_instances, num_fixed_tendons).""" + raise NotImplementedError + + @property + @abstractmethod + def fixed_tendon_pos_limits(self) -> torch.Tensor: + """Fixed tendon position limits provided to the simulation. Shape is (num_instances, num_fixed_tendons, 2).""" + raise NotImplementedError + + ## + # Spatial tendon properties. + ## + + @property + @abstractmethod + def spatial_tendon_stiffness(self) -> torch.Tensor: + """Spatial tendon stiffness provided to the simulation. Shape is (num_instances, num_spatial_tendons).""" + raise NotImplementedError + + @property + @abstractmethod + def spatial_tendon_damping(self) -> torch.Tensor: + """Spatial tendon damping provided to the simulation. Shape is (num_instances, num_spatial_tendons).""" + raise NotImplementedError + + @property + @abstractmethod + def spatial_tendon_limit_stiffness(self) -> torch.Tensor: + """Spatial tendon limit stiffness provided to the simulation. Shape is (num_instances, num_spatial_tendons).""" + raise NotImplementedError + + @property + @abstractmethod + def spatial_tendon_offset(self) -> torch.Tensor: + """Spatial tendon offset provided to the simulation. Shape is (num_instances, num_spatial_tendons).""" + raise NotImplementedError + + ## + # Root state properties. + ## + + @property + @abstractmethod + def root_link_pose_w(self) -> torch.Tensor: + """Root link pose ``[pos, quat]`` in simulation world frame. Shape is (num_instances, 7). + + This quantity is the pose of the articulation root's actor frame relative to the world. + The orientation is provided in (x, y, z, w) format. + """ + raise NotImplementedError + + @property + @abstractmethod + def root_link_vel_w(self) -> torch.Tensor: + """Root link velocity ``[lin_vel, ang_vel]`` in simulation world frame. Shape is (num_instances, 6). + + This quantity contains the linear and angular velocities of the articulation root's actor frame + relative to the world. + """ + raise NotImplementedError + + @property + @abstractmethod + def root_com_pose_w(self) -> torch.Tensor: + """Root center of mass pose ``[pos, quat]`` in simulation world frame. Shape is (num_instances, 7). + + This quantity is the pose of the articulation root's center of mass frame relative to the world. + The orientation is provided in (x, y, z, w) format. + """ + raise NotImplementedError + + @property + @abstractmethod + def root_com_vel_w(self) -> torch.Tensor: + """Root center of mass velocity ``[lin_vel, ang_vel]`` in simulation world frame. Shape is (num_instances, 6). + + This quantity contains the linear and angular velocities of the articulation root's center of mass frame + relative to the world. + """ + raise NotImplementedError + + @property + @abstractmethod + def root_state_w(self) -> torch.Tensor: + """Root state ``[pos, quat, lin_vel, ang_vel]`` in simulation world frame. Shape is (num_instances, 13). + + The position and quaternion are of the articulation root's actor frame relative to the world. Meanwhile, + the linear and angular velocities are of the articulation root's center of mass frame. + """ + raise NotImplementedError + + @property + @abstractmethod + def root_link_state_w(self) -> torch.Tensor: + """Root state ``[pos, quat, lin_vel, ang_vel]`` in simulation world frame. Shape is (num_instances, 13). + + The position, quaternion, and linear/angular velocity are of the articulation root's actor frame relative to the + world. + """ + raise NotImplementedError + + @property + @abstractmethod + def root_com_state_w(self) -> torch.Tensor: + """Root center of mass state ``[pos, quat, lin_vel, ang_vel]`` in simulation world frame. Shape is (num_instances, 13). + + The position, quaternion, and linear/angular velocity are of the articulation root link's center of mass frame + relative to the world. Center of mass frame is assumed to be the same orientation as the link rather than the + orientation of the principle inertia. + """ + raise NotImplementedError + + ## + # Body state properties. + ## + + @property + @abstractmethod + def body_mass(self) -> torch.Tensor: + """Body mass ``wp.float32`` in the world frame. Shape is (num_instances, num_bodies).""" + raise NotImplementedError + + @property + @abstractmethod + def body_inertia(self) -> torch.Tensor: + """Body inertia ``wp.mat33`` in the world frame. Shape is (num_instances, num_bodies, 3, 3).""" + raise NotImplementedError + + @property + @abstractmethod + def body_link_pose_w(self) -> torch.Tensor: + """Body link pose ``[pos, quat]`` in simulation world frame. + Shape is (num_instances, num_bodies, 7). + + This quantity is the pose of the articulation links' actor frame relative to the world. + The orientation is provided in (x, y, z, w) format. + """ + raise NotImplementedError + + @property + @abstractmethod + def body_link_vel_w(self) -> torch.Tensor: + """Body link velocity ``[lin_vel, ang_vel]`` in simulation world frame. + Shape is (num_instances, num_bodies, 6). + + This quantity contains the linear and angular velocities of the articulation links' actor frame + relative to the world. + """ + raise NotImplementedError + + @property + @abstractmethod + def body_com_pose_w(self) -> torch.Tensor: + """Body center of mass pose ``[pos, quat]`` in simulation world frame. + Shape is (num_instances, num_bodies, 7). + + This quantity is the pose of the center of mass frame of the articulation links relative to the world. + The orientation is provided in (x, y, z, w) format. + """ + raise NotImplementedError + + @property + @abstractmethod + def body_com_vel_w(self) -> torch.Tensor: + """Body center of mass velocity ``[lin_vel, ang_vel]`` in simulation world frame. + Shape is (num_instances, num_bodies, 6). + + This quantity contains the linear and angular velocities of the articulation links' center of mass frame + relative to the world. + """ + raise NotImplementedError + + @property + @abstractmethod + def body_state_w(self) -> torch.Tensor: + """State of all bodies `[pos, quat, lin_vel, ang_vel]` in simulation world frame. + Shape is (num_instances, num_bodies, 13). + + The position and quaternion are of all the articulation links' actor frame. Meanwhile, the linear and angular + velocities are of the articulation links's center of mass frame. + """ + raise NotImplementedError + + @property + @abstractmethod + def body_link_state_w(self) -> torch.Tensor: + """State of all bodies' link frame`[pos, quat, lin_vel, ang_vel]` in simulation world frame. + Shape is (num_instances, num_bodies, 13). + + The position, quaternion, and linear/angular velocity are of the body's link frame relative to the world. + """ + raise NotImplementedError + + @property + @abstractmethod + def body_com_state_w(self) -> torch.Tensor: + """State of all bodies center of mass `[pos, quat, lin_vel, ang_vel]` in simulation world frame. + Shape is (num_instances, num_bodies, 13). + + The position, quaternion, and linear/angular velocity are of the body's center of mass frame relative to the + world. Center of mass frame is assumed to be the same orientation as the link rather than the orientation of the + principle inertia. + """ + raise NotImplementedError + + @property + @abstractmethod + def body_com_acc_w(self) -> torch.Tensor: + """Acceleration of all bodies center of mass ``[lin_acc, ang_acc]``. + Shape is (num_instances, num_bodies, 6). + + All values are relative to the world. + """ + raise NotImplementedError + + @property + @abstractmethod + def body_com_pose_b(self) -> torch.Tensor: + """Center of mass pose ``[pos, quat]`` of all bodies in their respective body's link frames. + Shape is (num_instances, 1, 7). + + This quantity is the pose of the center of mass frame of the rigid body relative to the body's link frame. + The orientation is provided in (x, y, z, w) format. + """ + raise NotImplementedError + + @property + @abstractmethod + def body_incoming_joint_wrench_b(self) -> torch.Tensor: + """Joint reaction wrench applied from body parent to child body in parent body frame. + + Shape is (num_instances, num_bodies, 6). All body reaction wrenches are provided including the root body to the + world of an articulation. + + For more information on joint wrenches, please check the`PhysX documentation `__ + and the underlying `PhysX Tensor API `__ . + """ + raise NotImplementedError + + ## + # Joint state properties. + ## + + @property + @abstractmethod + def joint_pos(self) -> torch.Tensor: + """Joint positions of all joints. Shape is (num_instances, num_joints).""" + raise NotImplementedError + + @property + @abstractmethod + def joint_vel(self) -> torch.Tensor: + """Joint velocities of all joints. Shape is (num_instances, num_joints).""" + raise NotImplementedError + + @property + @abstractmethod + def joint_acc(self) -> torch.Tensor: + """Joint acceleration of all joints. Shape is (num_instances, num_joints).""" + raise NotImplementedError + + ## + # Derived Properties. + ## + + @property + @abstractmethod + def projected_gravity_b(self) -> torch.Tensor: + """Projection of the gravity direction on base frame. Shape is (num_instances, 3).""" + raise NotImplementedError + + @property + @abstractmethod + def heading_w(self) -> torch.Tensor: + """Yaw heading of the base frame (in radians). Shape is (num_instances,). + + Note: + This quantity is computed by assuming that the forward-direction of the base + frame is along x-direction, i.e. :math:`(1, 0, 0)`. + """ + raise NotImplementedError + + @property + @abstractmethod + def root_link_lin_vel_b(self) -> torch.Tensor: + """Root link linear velocity in base frame. Shape is (num_instances, 3). + + This quantity is the linear velocity of the articulation root's actor frame with respect to the + its actor frame. + """ + raise NotImplementedError + + @property + @abstractmethod + def root_link_ang_vel_b(self) -> torch.Tensor: + """Root link angular velocity in base world frame. Shape is (num_instances, 3). + + This quantity is the angular velocity of the articulation root's actor frame with respect to the + its actor frame. + """ + raise NotImplementedError + + @property + @abstractmethod + def root_com_lin_vel_b(self) -> torch.Tensor: + """Root center of mass linear velocity in base frame. Shape is (num_instances, 3). + + This quantity is the linear velocity of the articulation root's center of mass frame with respect to the + its actor frame. + """ + raise NotImplementedError + + @property + @abstractmethod + def root_com_ang_vel_b(self) -> torch.Tensor: + """Root center of mass angular velocity in base world frame. Shape is (num_instances, 3). + + This quantity is the angular velocity of the articulation root's center of mass frame with respect to the + its actor frame. + """ + raise NotImplementedError + + ## + # Sliced properties. + ## + + @property + @abstractmethod + def root_link_pos_w(self) -> torch.Tensor: + """Root link position in simulation world frame. Shape is (num_instances, 3). + + This quantity is the position of the actor frame of the root rigid body relative to the world. + """ + raise NotImplementedError + + @property + @abstractmethod + def root_link_quat_w(self) -> torch.Tensor: + """Root link orientation (x, y, z, w) in simulation world frame. Shape is (num_instances, 4). + + This quantity is the orientation of the actor frame of the root rigid body. + """ + raise NotImplementedError + + @property + @abstractmethod + def root_link_lin_vel_w(self) -> torch.Tensor: + """Root linear velocity in simulation world frame. Shape is (num_instances, 3). + + This quantity is the linear velocity of the root rigid body's actor frame relative to the world. + """ + raise NotImplementedError + + @property + @abstractmethod + def root_link_ang_vel_w(self) -> torch.Tensor: + """Root link angular velocity in simulation world frame. Shape is (num_instances, 3). + + This quantity is the angular velocity of the actor frame of the root rigid body relative to the world. + """ + raise NotImplementedError + + @property + @abstractmethod + def root_com_pos_w(self) -> torch.Tensor: + """Root center of mass position in simulation world frame. Shape is (num_instances, 3). + + This quantity is the position of the actor frame of the root rigid body relative to the world. + """ + raise NotImplementedError + + @property + @abstractmethod + def root_com_quat_w(self) -> torch.Tensor: + """Root center of mass orientation (x, y, z, w) in simulation world frame. Shape is (num_instances, 4). + + This quantity is the orientation of the actor frame of the root rigid body relative to the world. + """ + raise NotImplementedError + + @property + @abstractmethod + def root_com_lin_vel_w(self) -> torch.Tensor: + """Root center of mass linear velocity in simulation world frame. Shape is (num_instances, 3). + + This quantity is the linear velocity of the root rigid body's center of mass frame relative to the world. + """ + raise NotImplementedError + + @property + @abstractmethod + def root_com_ang_vel_w(self) -> torch.Tensor: + """Root center of mass angular velocity in simulation world frame. Shape is (num_instances, 3). + + This quantity is the angular velocity of the root rigid body's center of mass frame relative to the world. + """ + raise NotImplementedError + + @property + @abstractmethod + def body_link_pos_w(self) -> torch.Tensor: + """Positions of all bodies in simulation world frame. Shape is (num_instances, num_bodies, 3). + + This quantity is the position of the articulation bodies' actor frame relative to the world. + """ + raise NotImplementedError + + @property + @abstractmethod + def body_link_quat_w(self) -> torch.Tensor: + """Orientation (x, y, z, w) of all bodies in simulation world frame. Shape is (num_instances, num_bodies, 4). + + This quantity is the orientation of the articulation bodies' actor frame relative to the world. + """ + raise NotImplementedError + + @property + @abstractmethod + def body_link_lin_vel_w(self) -> torch.Tensor: + """Linear velocity of all bodies in simulation world frame. Shape is (num_instances, num_bodies, 3). + + This quantity is the linear velocity of the articulation bodies' center of mass frame relative to the world. + """ + raise NotImplementedError + + @property + @abstractmethod + def body_link_ang_vel_w(self) -> torch.Tensor: + """Angular velocity of all bodies in simulation world frame. Shape is (num_instances, num_bodies, 3). + + This quantity is the angular velocity of the articulation bodies' center of mass frame relative to the world. + """ + raise NotImplementedError + + @property + @abstractmethod + def body_com_pos_w(self) -> torch.Tensor: + """Positions of all bodies in simulation world frame. Shape is (num_instances, num_bodies, 3). + + This quantity is the position of the articulation bodies' actor frame. + """ + raise NotImplementedError + + @property + @abstractmethod + def body_com_quat_w(self) -> torch.Tensor: + """Orientation (x, y, z, w) of the principle axis of inertia of all bodies in simulation world frame. + Shape is (num_instances, num_bodies, 4). + + This quantity is the orientation of the articulation bodies' actor frame. + """ + raise NotImplementedError + + @property + @abstractmethod + def body_com_lin_vel_w(self) -> torch.Tensor: + """Linear velocity of all bodies in simulation world frame. Shape is (num_instances, num_bodies, 3). + + This quantity is the linear velocity of the articulation bodies' center of mass frame. + """ + raise NotImplementedError + + @property + @abstractmethod + def body_com_ang_vel_w(self) -> torch.Tensor: + """Angular velocity of all bodies in simulation world frame. Shape is (num_instances, num_bodies, 3). + + This quantity is the angular velocity of the articulation bodies' center of mass frame. + """ + raise NotImplementedError + + @property + @abstractmethod + def body_com_lin_acc_w(self) -> torch.Tensor: + """Linear acceleration of all bodies in simulation world frame. Shape is (num_instances, num_bodies, 3). + + This quantity is the linear acceleration of the articulation bodies' center of mass frame. + """ + raise NotImplementedError + + @property + @abstractmethod + def body_com_ang_acc_w(self) -> torch.Tensor: + """Angular acceleration of all bodies in simulation world frame. Shape is (num_instances, num_bodies, 3). + + This quantity is the angular acceleration of the articulation bodies' center of mass frame. + """ + raise NotImplementedError + + @property + @abstractmethod + def body_com_pos_b(self) -> torch.Tensor: + """Center of mass position of all of the bodies in their respective link frames. + Shape is (num_instances, num_bodies, 3). + + This quantity is the center of mass location relative to its body'slink frame. + """ + raise NotImplementedError + + @property + @abstractmethod + def body_com_quat_b(self) -> torch.Tensor: + """Orientation (x, y, z, w) of the principle axis of inertia of all of the bodies in their + respective link frames. Shape is (num_instances, num_bodies, 4). + + This quantity is the orientation of the principles axes of inertia relative to its body's link frame. + """ + raise NotImplementedError + + ## + # Backward compatibility. + ## + + @property + @abstractmethod + def root_pose_w(self) -> torch.Tensor: + """Same as :attr:`root_link_pose_w`.""" + raise NotImplementedError + + @property + @abstractmethod + def root_pos_w(self) -> torch.Tensor: + """Same as :attr:`root_link_pos_w`.""" + raise NotImplementedError + + @property + @abstractmethod + def root_quat_w(self) -> torch.Tensor: + """Same as :attr:`root_link_quat_w`.""" + raise NotImplementedError + + @property + @abstractmethod + def root_vel_w(self) -> torch.Tensor: + """Same as :attr:`root_com_vel_w`.""" + raise NotImplementedError + + @property + @abstractmethod + def root_lin_vel_w(self) -> torch.Tensor: + """Same as :attr:`root_com_lin_vel_w`.""" + raise NotImplementedError + + @property + @abstractmethod + def root_ang_vel_w(self) -> torch.Tensor: + """Same as :attr:`root_com_ang_vel_w`.""" + raise NotImplementedError + + @property + @abstractmethod + def root_lin_vel_b(self) -> torch.Tensor: + """Same as :attr:`root_com_lin_vel_b`.""" + raise NotImplementedError + + @property + @abstractmethod + def root_ang_vel_b(self) -> torch.Tensor: + """Same as :attr:`root_com_ang_vel_b`.""" + raise NotImplementedError + + @property + @abstractmethod + def body_pose_w(self) -> torch.Tensor: + """Same as :attr:`body_link_pose_w`.""" + raise NotImplementedError + + @property + @abstractmethod + def body_pos_w(self) -> torch.Tensor: + """Same as :attr:`body_link_pos_w`.""" + raise NotImplementedError + + @property + @abstractmethod + def body_quat_w(self) -> torch.Tensor: + """Same as :attr:`body_link_quat_w`.""" + raise NotImplementedError + + @property + @abstractmethod + def body_vel_w(self) -> torch.Tensor: + """Same as :attr:`body_com_vel_w`.""" + raise NotImplementedError + + @property + @abstractmethod + def body_lin_vel_w(self) -> torch.Tensor: + """Same as :attr:`body_com_lin_vel_w`.""" + raise NotImplementedError + + @property + @abstractmethod + def body_ang_vel_w(self) -> torch.Tensor: + """Same as :attr:`body_com_ang_vel_w`.""" + raise NotImplementedError + + @property + @abstractmethod + def body_acc_w(self) -> torch.Tensor: + """Same as :attr:`body_com_acc_w`.""" + raise NotImplementedError + + @property + @abstractmethod + def body_lin_acc_w(self) -> torch.Tensor: + """Same as :attr:`body_com_lin_acc_w`.""" + raise NotImplementedError + + @property + @abstractmethod + def body_ang_acc_w(self) -> torch.Tensor: + """Same as :attr:`body_com_ang_acc_w`.""" + raise NotImplementedError + + @property + @abstractmethod + def com_pos_b(self) -> torch.Tensor: + """Same as :attr:`body_com_pos_b`.""" + raise NotImplementedError + + @property + @abstractmethod + def com_quat_b(self) -> torch.Tensor: + """Same as :attr:`body_com_quat_b`.""" + raise NotImplementedError + + @property + @abstractmethod + def joint_limits(self) -> torch.Tensor: + """Deprecated property. Please use :attr:`joint_pos_limits` instead.""" + raise NotImplementedError + + @property + @abstractmethod + def joint_velocity_limits(self) -> torch.Tensor: + """Deprecated property. Please use :attr:`joint_vel_limits` instead.""" + raise NotImplementedError + + @property + @abstractmethod + def joint_friction(self) -> torch.Tensor: + """Deprecated property. Please use :attr:`joint_friction_coeff` instead.""" + raise NotImplementedError + + @property + @abstractmethod + def fixed_tendon_limit(self) -> torch.Tensor: + """Deprecated property. Please use :attr:`fixed_tendon_pos_limits` instead.""" + raise NotImplementedError diff --git a/source/isaaclab/isaaclab/assets/rigid_object/__init__.py b/source/isaaclab/isaaclab/assets/rigid_object/__init__.py index 1598c060f1a..5bc38dcb171 100644 --- a/source/isaaclab/isaaclab/assets/rigid_object/__init__.py +++ b/source/isaaclab/isaaclab/assets/rigid_object/__init__.py @@ -5,6 +5,16 @@ """Sub-module for rigid object assets.""" +from .base_rigid_object import BaseRigidObject +from .base_rigid_object_data import BaseRigidObjectData from .rigid_object import RigidObject from .rigid_object_cfg import RigidObjectCfg from .rigid_object_data import RigidObjectData + +__all__ = [ + "BaseRigidObject", + "BaseRigidObjectData", + "RigidObject", + "RigidObjectCfg", + "RigidObjectData", +] \ No newline at end of file diff --git a/source/isaaclab/isaaclab/assets/rigid_object/base_rigid_object.py b/source/isaaclab/isaaclab/assets/rigid_object/base_rigid_object.py new file mode 100644 index 00000000000..7519e2f6c7e --- /dev/null +++ b/source/isaaclab/isaaclab/assets/rigid_object/base_rigid_object.py @@ -0,0 +1,447 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from __future__ import annotations + +import torch +from abc import abstractmethod +from collections.abc import Sequence +from typing import TYPE_CHECKING + +from isaaclab.utils.wrench_composer import WrenchComposer + +from ..asset_base import AssetBase + +if TYPE_CHECKING: + from .rigid_object_cfg import RigidObjectCfg + from .rigid_object_data import RigidObjectData + + +class BaseRigidObject(AssetBase): + """A rigid object asset class. + + Rigid objects are assets comprising of rigid bodies. They can be used to represent dynamic objects + such as boxes, spheres, etc. A rigid body is described by its pose, velocity and mass distribution. + + For an asset to be considered a rigid object, the root prim of the asset must have the `USD RigidBodyAPI`_ + applied to it. This API is used to define the simulation properties of the rigid body. On playing the + simulation, the physics engine will automatically register the rigid body and create a corresponding + rigid body handle. This handle can be accessed using the :attr:`root_physx_view` attribute. + + .. note:: + + For users familiar with Isaac Sim, the PhysX view class API is not the exactly same as Isaac Sim view + class API. Similar to Isaac Lab, Isaac Sim wraps around the PhysX view API. However, as of now (2023.1 release), + we see a large difference in initializing the view classes in Isaac Sim. This is because the view classes + in Isaac Sim perform additional USD-related operations which are slow and also not required. + + .. _`USD RigidBodyAPI`: https://openusd.org/dev/api/class_usd_physics_rigid_body_a_p_i.html + """ + + cfg: RigidObjectCfg + """Configuration instance for the rigid object.""" + + __backend_name__: str = "base" + """The name of the backend for the rigid object.""" + + def __init__(self, cfg: RigidObjectCfg): + """Initialize the rigid object. + + Args: + cfg: A configuration instance. + """ + super().__init__(cfg) + + """ + Properties + """ + + @property + @abstractmethod + def data(self) -> RigidObjectData: + raise NotImplementedError() + + @property + @abstractmethod + def num_instances(self) -> int: + raise NotImplementedError() + + @property + @abstractmethod + def num_bodies(self) -> int: + """Number of bodies in the asset. + + This is always 1 since each object is a single rigid body. + """ + raise NotImplementedError() + + @property + @abstractmethod + def body_names(self) -> list[str]: + """Ordered names of bodies in the rigid object.""" + raise NotImplementedError() + + @property + @abstractmethod + def root_view(self): + """Root view for the asset. + + Note: + Use this view with caution. It requires handling of tensors in a specific way. + """ + raise NotImplementedError() + + @property + @abstractmethod + def instantaneous_wrench_composer(self) -> WrenchComposer: + """Instantaneous wrench composer for the rigid object.""" + raise NotImplementedError() + + @property + @abstractmethod + def permanent_wrench_composer(self) -> WrenchComposer: + """Permanent wrench composer for the rigid object.""" + raise NotImplementedError() + + """ + Operations. + """ + + @abstractmethod + def reset(self, env_ids: Sequence[int] | None = None) -> None: + """Reset the rigid object. + + Args: + env_ids: Environment indices. If None, then all indices are used. + """ + raise NotImplementedError() + + @abstractmethod + def write_data_to_sim(self) -> None: + """Write external wrench to the simulation. + + Note: + We write external wrench to the simulation here since this function is called before the simulation step. + This ensures that the external wrench is applied at every simulation step. + """ + raise NotImplementedError() + + @abstractmethod + def update(self, dt: float) -> None: + """Updates the simulation data. + + Args: + dt: The time step size in seconds. + """ + raise NotImplementedError() + + """ + Operations - Finders. + """ + + @abstractmethod + def find_bodies( + self, name_keys: str | Sequence[str], preserve_order: bool = False + ) -> tuple[list[int], list[str]]: + """Find bodies in the rigid body based on the name keys. + + Please check the :meth:`isaaclab.utils.string_utils.resolve_matching_names` function for more + information on the name matching. + + Args: + name_keys: A regular expression or a list of regular expressions to match the body names. + preserve_order: Whether to preserve the order of the name keys in the output. Defaults to False. + + Returns: + A tuple of lists containing the body indices and names. + """ + raise NotImplementedError() + + """ + Operations - Write to simulation. + """ + + @abstractmethod + def write_root_state_to_sim( + self, + root_state: torch.Tensor, + env_ids: Sequence[int] | None = None, + ) -> None: + """Set the root state over selected environment indices into the simulation. + + The root state comprises of the cartesian position, quaternion orientation in (x, y, z, w), and linear + and angular velocity. All the quantities are in the simulation frame. + + Args: + root_state: Root state in simulation frame. Shape is (len(env_ids), 13) or (num_instances, 13). + env_ids: Environment indices. If None, then all indices are used. + """ + raise NotImplementedError() + + @abstractmethod + def write_root_com_state_to_sim( + self, + root_state: torch.Tensor, + env_ids: Sequence[int] | None = None, + ) -> None: + """Set the root center of mass state over selected environment indices into the simulation. + + The root state comprises of the cartesian position, quaternion orientation in (x, y, z, w), and linear + and angular velocity. All the quantities are in the simulation frame. + + Args: + root_state: Root state in simulation frame. Shape is (len(env_ids), 13) or (num_instances, 13). + env_ids: Environment indices. If None, then all indices are used. + """ + raise NotImplementedError() + + @abstractmethod + def write_root_link_state_to_sim( + self, + root_state: torch.Tensor, + env_ids: Sequence[int] | None = None, + ) -> None: + """Set the root link state over selected environment indices into the simulation. + + The root state comprises of the cartesian position, quaternion orientation in (x, y, z, w), and linear + and angular velocity. All the quantities are in the simulation frame. + + Args: + root_state: Root state in simulation frame. Shape is (len(env_ids), 13) or (num_instances, 13). + env_ids: Environment indices. If None, then all indices are used. + """ + raise NotImplementedError() + + @abstractmethod + def write_root_pose_to_sim( + self, + root_pose: torch.Tensor, + env_ids: Sequence[int] | None = None, + ) -> None: + """Set the root pose over selected environment indices into the simulation. + + The root pose comprises of the cartesian position and quaternion orientation in (x, y, z, w). + + Args: + root_pose: Root link poses in simulation frame. Shape is (len(env_ids), 7) or (num_instances, 7). + env_ids: Environment indices. If None, then all indices are used. + """ + raise NotImplementedError() + + @abstractmethod + def write_root_link_pose_to_sim( + self, + root_pose: torch.Tensor, + env_ids: Sequence[int] | None = None, + ) -> None: + """Set the root link pose over selected environment indices into the simulation. + + The root pose comprises of the cartesian position and quaternion orientation in (x, y, z, w). + + Args: + root_pose: Root link poses in simulation frame. Shape is (len(env_ids), 7) or (num_instances, 7). + env_ids: Environment indices. If None, then all indices are used. + """ + raise NotImplementedError() + + @abstractmethod + def write_root_com_pose_to_sim( + self, + root_pose: torch.Tensor, + env_ids: Sequence[int] | None = None, + ) -> None: + """Set the root center of mass pose over selected environment indices into the simulation. + + The root pose comprises of the cartesian position and quaternion orientation in (x, y, z, w). + The orientation is the orientation of the principle axes of inertia. + + Args: + root_pose: Root center of mass poses in simulation frame. Shape is (len(env_ids), 7) or (num_instances, 7). + env_ids: Environment indices. If None, then all indices are used. + """ + raise NotImplementedError() + + @abstractmethod + def write_root_velocity_to_sim( + self, + root_velocity: torch.Tensor, + env_ids: Sequence[int] | None = None, + ) -> None: + """Set the root center of mass velocity over selected environment indices into the simulation. + + The velocity comprises linear velocity (x, y, z) and angular velocity (x, y, z) in that order. + ..note:: This sets the velocity of the root's center of mass rather than the roots frame. + + Args: + root_velocity: Root center of mass velocities in simulation world frame. Shape is (len(env_ids), 6) or (num_instances, 6). + env_ids: Environment indices. If None, then all indices are used. + """ + raise NotImplementedError() + + @abstractmethod + def write_root_com_velocity_to_sim( + self, + root_velocity: torch.Tensor, + env_ids: Sequence[int] | None = None, + ) -> None: + """Set the root center of mass velocity over selected environment indices into the simulation. + + The velocity comprises linear velocity (x, y, z) and angular velocity (x, y, z) in that order. + ..note:: This sets the velocity of the root's center of mass rather than the roots frame. + + Args: + root_velocity: Root center of mass velocities in simulation world frame. Shape is (len(env_ids), 6) or (num_instances, 6). + env_ids: Environment indices. If None, then all indices are used. + """ + raise NotImplementedError() + + @abstractmethod + def write_root_link_velocity_to_sim( + self, + root_velocity: torch.Tensor, + env_ids: Sequence[int] | None = None, + ) -> None: + """Set the root link velocity over selected environment indices into the simulation. + + The velocity comprises linear velocity (x, y, z) and angular velocity (x, y, z) in that order. + ..note:: This sets the velocity of the root's frame rather than the roots center of mass. + + Args: + root_velocity: Root frame velocities in simulation world frame. Shape is (len(env_ids), 6) or (num_instances, 6). + env_ids: Environment indices. If None, then all indices are used. + """ + raise NotImplementedError() + + """ + Operations - Setters. + """ + + @abstractmethod + def set_masses( + self, + masses: torch.Tensor, + body_ids: Sequence[int] | None = None, + env_ids: Sequence[int] | None = None, + ) -> None: + """Set masses of all bodies. + + Args: + masses: Masses of all bodies. Shape is (num_instances, num_bodies). + body_ids: The body indices to set the masses for. Defaults to None (all bodies). + env_ids: The environment indices to set the masses for. Defaults to None (all environments). + """ + raise NotImplementedError() + + @abstractmethod + def set_coms( + self, + coms: torch.Tensor, + body_ids: Sequence[int] | None = None, + env_ids: Sequence[int] | None = None, + ) -> None: + """Set center of mass positions of all bodies. + + Args: + coms: Center of mass positions of all bodies. Shape is (num_instances, num_bodies, 3). + body_ids: The body indices to set the center of mass positions for. Defaults to None (all bodies). + env_ids: The environment indices to set the center of mass positions for. Defaults to None (all environments). + """ + raise NotImplementedError() + + @abstractmethod + def set_inertias( + self, + inertias: torch.Tensor, + body_ids: Sequence[int] | None = None, + env_ids: Sequence[int] | None = None, + ) -> None: + """Set inertias of all bodies. + + Args: + inertias: Inertias of all bodies. Shape is (num_instances, num_bodies, 3, 3). + body_ids: The body indices to set the inertias for. Defaults to None (all bodies). + env_ids: The environment indices to set the inertias for. Defaults to None (all environments). + """ + raise NotImplementedError() + + @abstractmethod + def set_external_force_and_torque( + self, + forces: torch.Tensor, + torques: torch.Tensor, + positions: torch.Tensor | None = None, + body_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | None = None, + is_global: bool = False, + ) -> None: + """Set external force and torque to apply on the asset's bodies in their local frame. + + For many applications, we want to keep the applied external force on rigid bodies constant over a period of + time (for instance, during the policy control). This function allows us to store the external force and torque + into buffers which are then applied to the simulation at every step. Optionally, set the position to apply the + external wrench at (in the local link frame of the bodies). + + .. caution:: + If the function is called with empty forces and torques, then this function disables the application + of external wrench to the simulation. + + .. code-block:: python + + # example of disabling external wrench + asset.set_external_force_and_torque(forces=torch.zeros(0, 3), torques=torch.zeros(0, 3)) + + .. caution:: + If the function is called consecutively with and with different values for ``is_global``, then the + all the external wrenches will be applied in the frame specified by the last call. + + .. code-block:: python + + # example of setting external wrench in the global frame + asset.set_external_force_and_torque(forces=torch.ones(1, 1, 3), env_ids=[0], is_global=True) + # example of setting external wrench in the link frame + asset.set_external_force_and_torque(forces=torch.ones(1, 1, 3), env_ids=[1], is_global=False) + # Both environments will have the external wrenches applied in the link frame + + .. note:: + This function does not apply the external wrench to the simulation. It only fills the buffers with + the desired values. To apply the external wrench, call the :meth:`write_data_to_sim` function + right before the simulation step. + + Args: + forces: External forces in bodies' local frame. Shape is (len(env_ids), len(body_ids), 3). + torques: External torques in bodies' local frame. Shape is (len(env_ids), len(body_ids), 3). + positions: External wrench positions in bodies' local frame. Shape is (len(env_ids), len(body_ids), 3). Defaults to None. + body_ids: Body indices to apply external wrench to. Defaults to None (all bodies). + env_ids: Environment indices to apply external wrench to. Defaults to None (all instances). + is_global: Whether to apply the external wrench in the global frame. Defaults to False. If set to False, + the external wrench is applied in the link frame of the bodies. + """ + raise NotImplementedError() + + """ + Internal helper. + """ + + @abstractmethod + def _initialize_impl(self) -> None: + raise NotImplementedError() + + @abstractmethod + def _create_buffers(self) -> None: + raise NotImplementedError() + + @abstractmethod + def _process_cfg(self) -> None: + """Post processing of configuration parameters.""" + raise NotImplementedError() + + """ + Internal simulation callbacks. + """ + + @abstractmethod + def _invalidate_initialize_callback(self, event) -> None: + """Invalidates the scene elements.""" + super()._invalidate_initialize_callback(event) diff --git a/source/isaaclab/isaaclab/assets/rigid_object/base_rigid_object_data.py b/source/isaaclab/isaaclab/assets/rigid_object/base_rigid_object_data.py new file mode 100644 index 00000000000..5aa7c26c065 --- /dev/null +++ b/source/isaaclab/isaaclab/assets/rigid_object/base_rigid_object_data.py @@ -0,0 +1,640 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from abc import ABC, abstractmethod + +import torch + + +class BaseRigidObjectData(ABC): + """Data container for a rigid object. + + This class contains the data for a rigid object in the simulation. The data includes the state of + the root rigid body and the state of all the bodies in the object. The data is stored in the simulation + world frame unless otherwise specified. + + For a rigid body, there are two frames of reference that are used: + + - Actor frame: The frame of reference of the rigid body prim. This typically corresponds to the Xform prim + with the rigid body schema. + - Center of mass frame: The frame of reference of the center of mass of the rigid body. + Depending on the settings of the simulation, the actor frame and the center of mass frame may be the same. + This needs to be taken into account when interpreting the data. + + The data is lazily updated, meaning that the data is only updated when it is accessed. This is useful + when the data is expensive to compute or retrieve. The data is updated when the timestamp of the buffer + is older than the current simulation timestamp. The timestamp is updated whenever the data is updated. + """ + + def __init__(self, root_view, device: str): + """Initializes the rigid object data. + + Args: + root_view: The root rigid body view. + device: The device used for processing. + """ + # Set the parameters + self.device = device + + @abstractmethod + def update(self, dt: float) -> None: + """Updates the data for the rigid object. + + Args: + dt: The time step for the update. This must be a positive value. + """ + raise NotImplementedError() + + ## + # Names. + ## + + body_names: list[str] = None + """Body names in the order parsed by the simulation view.""" + + ## + # Defaults. + ## + + @property + @abstractmethod + def default_root_pose(self) -> torch.Tensor: + """Default root pose ``[pos, quat]`` in local environment frame. Shape is (num_instances, 7). + + The position and quaternion are of the rigid body's actor frame. + """ + raise NotImplementedError() + + @property + @abstractmethod + def default_root_vel(self) -> torch.Tensor: + """Default root velocity ``[lin_vel, ang_vel]`` in local environment frame. Shape is (num_instances, 6). + + The linear and angular velocities are of the rigid body's center of mass frame. + """ + raise NotImplementedError() + + @property + @abstractmethod + def default_root_state(self) -> torch.Tensor: + """Default root state ``[pos, quat, lin_vel, ang_vel]`` in local environment frame. Shape is (num_instances, 13). + + The position and quaternion are of the rigid body's actor frame. Meanwhile, the linear and angular velocities are + of the center of mass frame. + """ + raise NotImplementedError() + + ## + # Root state properties. + ## + + @property + @abstractmethod + def root_link_pose_w(self) -> torch.Tensor: + """Root link pose ``[pos, quat]`` in simulation world frame. Shape is (num_instances, 7). + + This quantity is the pose of the actor frame of the root rigid body relative to the world. + The orientation is provided in (x, y, z, w) format. + """ + raise NotImplementedError() + + @property + @abstractmethod + def root_link_vel_w(self) -> torch.Tensor: + """Root link velocity ``[lin_vel, ang_vel]`` in simulation world frame. Shape is (num_instances, 6). + + This quantity contains the linear and angular velocities of the actor frame of the root + rigid body relative to the world. + """ + raise NotImplementedError() + + @property + @abstractmethod + def root_com_pose_w(self) -> torch.Tensor: + """Root center of mass pose ``[pos, quat]`` in simulation world frame. Shape is (num_instances, 7). + + This quantity is the pose of the center of mass frame of the root rigid body relative to the world. + The orientation is provided in (x, y, z, w) format. + """ + raise NotImplementedError() + + @property + @abstractmethod + def root_com_vel_w(self) -> torch.Tensor: + """Root center of mass velocity ``[lin_vel, ang_vel]`` in simulation world frame. Shape is (num_instances, 6). + + This quantity contains the linear and angular velocities of the root rigid body's center of mass frame + relative to the world. + """ + raise NotImplementedError() + + @property + @abstractmethod + def root_state_w(self) -> torch.Tensor: + """Root state ``[pos, quat, lin_vel, ang_vel]`` in simulation world frame. Shape is (num_instances, 13). + + The position and orientation are of the rigid body's actor frame. Meanwhile, the linear and angular + velocities are of the rigid body's center of mass frame. + """ + raise NotImplementedError() + + @property + @abstractmethod + def root_link_state_w(self) -> torch.Tensor: + """Root state ``[pos, quat, lin_vel, ang_vel]`` in simulation world frame. Shape is (num_instances, 13). + + The position, quaternion, and linear/angular velocity are of the rigid body root frame relative to the + world. The orientation is provided in (x, y, z, w) format. + """ + raise NotImplementedError() + + @property + @abstractmethod + def root_com_state_w(self) -> torch.Tensor: + """Root center of mass state ``[pos, quat, lin_vel, ang_vel]`` in simulation world frame. + Shape is (num_instances, 13). + + The position, quaternion, and linear/angular velocity are of the rigid body's center of mass frame + relative to the world. Center of mass frame is the orientation principle axes of inertia. + """ + raise NotImplementedError() + + ## + # Body state properties. + ## + + @property + @abstractmethod + def body_link_pose_w(self) -> torch.Tensor: + """Body link pose ``[pos, quat]`` in simulation world frame. Shape is (num_instances, 1, 7). + + This quantity is the pose of the actor frame of the rigid body relative to the world. + The orientation is provided in (x, y, z, w) format. + """ + raise NotImplementedError() + + @property + @abstractmethod + def body_link_vel_w(self) -> torch.Tensor: + """Body link velocity ``[lin_vel, ang_vel]`` in simulation world frame. Shape is (num_instances, 1, 6). + + This quantity contains the linear and angular velocities of the actor frame of the root + rigid body relative to the world. + """ + raise NotImplementedError() + + @property + @abstractmethod + def body_com_pose_w(self) -> torch.Tensor: + """Body center of mass pose ``[pos, quat]`` in simulation world frame. Shape is (num_instances, 1, 7). + + This quantity is the pose of the center of mass frame of the rigid body relative to the world. + The orientation is provided in (x, y, z, w) format. + """ + raise NotImplementedError() + + @property + @abstractmethod + def body_com_vel_w(self) -> torch.Tensor: + """Body center of mass velocity ``[lin_vel, ang_vel]`` in simulation world frame. + Shape is (num_instances, 1, 6). + + This quantity contains the linear and angular velocities of the root rigid body's center of mass frame + relative to the world. + """ + raise NotImplementedError() + + @property + @abstractmethod + def body_state_w(self) -> torch.Tensor: + """State of all bodies `[pos, quat, lin_vel, ang_vel]` in simulation world frame. + Shape is (num_instances, 1, 13). + + The position and orientation are of the rigid bodies' actor frame. Meanwhile, the linear and angular + velocities are of the rigid bodies' center of mass frame. + """ + raise NotImplementedError() + + @property + @abstractmethod + def body_link_state_w(self) -> torch.Tensor: + """State of all bodies ``[pos, quat, lin_vel, ang_vel]`` in simulation world frame. + Shape is (num_instances, 1, 13). + + The position, quaternion, and linear/angular velocity are of the body's link frame relative to the world. + The orientation is provided in (x, y, z, w) format. + """ + raise NotImplementedError() + + @property + @abstractmethod + def body_com_state_w(self) -> torch.Tensor: + """State of all bodies ``[pos, quat, lin_vel, ang_vel]`` in simulation world frame. + Shape is (num_instances, num_bodies, 13). + + The position, quaternion, and linear/angular velocity are of the body's center of mass frame relative to the + world. Center of mass frame is assumed to be the same orientation as the link rather than the orientation of the + principle inertia. The orientation is provided in (x, y, z, w) format. + """ + raise NotImplementedError() + + @property + @abstractmethod + def body_com_acc_w(self) -> torch.Tensor: + """Acceleration of all bodies ``[lin_acc, ang_acc]`` in the simulation world frame. + Shape is (num_instances, 1, 6). + + This quantity is the acceleration of the rigid bodies' center of mass frame relative to the world. + """ + raise NotImplementedError() + + @property + @abstractmethod + def body_com_pose_b(self) -> torch.Tensor: + """Center of mass pose ``[pos, quat]`` of all bodies in their respective body's link frames. + Shape is (num_instances, 1, 7). + + This quantity is the pose of the center of mass frame of the rigid body relative to the body's link frame. + The orientation is provided in (x, y, z, w) format. + """ + raise NotImplementedError() + + @property + @abstractmethod + def body_mass(self) -> torch.Tensor: + """Mass of all bodies in the simulation world frame. Shape is (num_instances, 1, 1).""" + raise NotImplementedError() + + @property + @abstractmethod + def body_inertia(self) -> torch.Tensor: + """Inertia of all bodies in the simulation world frame. Shape is (num_instances, 1, 3, 3).""" + raise NotImplementedError() + + ## + # Derived Properties. + ## + + @property + @abstractmethod + def projected_gravity_b(self) -> torch.Tensor: + """Projection of the gravity direction on base frame. Shape is (num_instances, 3).""" + raise NotImplementedError() + + @property + @abstractmethod + def heading_w(self) -> torch.Tensor: + """Yaw heading of the base frame (in radians). Shape is (num_instances,). + + Note: + This quantity is computed by assuming that the forward-direction of the base + frame is along x-direction, i.e. :math:`(1, 0, 0)`. + """ + raise NotImplementedError() + + @property + @abstractmethod + def root_link_lin_vel_b(self) -> torch.Tensor: + """Root link linear velocity in base frame. Shape is (num_instances, 3). + + This quantity is the linear velocity of the actor frame of the root rigid body frame with respect to the + rigid body's actor frame. + """ + raise NotImplementedError() + + @property + @abstractmethod + def root_link_ang_vel_b(self) -> torch.Tensor: + """Root link angular velocity in base world frame. Shape is (num_instances, 3). + + This quantity is the angular velocity of the actor frame of the root rigid body frame with respect to the + rigid body's actor frame. + """ + raise NotImplementedError() + + @property + @abstractmethod + def root_com_lin_vel_b(self) -> torch.Tensor: + """Root center of mass linear velocity in base frame. Shape is (num_instances, 3). + + This quantity is the linear velocity of the root rigid body's center of mass frame with respect to the + rigid body's actor frame. + """ + raise NotImplementedError() + + @property + @abstractmethod + def root_com_ang_vel_b(self) -> torch.Tensor: + """Root center of mass angular velocity in base world frame. Shape is (num_instances, 3). + + This quantity is the angular velocity of the root rigid body's center of mass frame with respect to the + rigid body's actor frame. + """ + raise NotImplementedError() + + ## + # Sliced properties. + ## + + @property + @abstractmethod + def root_link_pos_w(self) -> torch.Tensor: + """Root link position in simulation world frame. Shape is (num_instances, 3). + + This quantity is the position of the actor frame of the root rigid body relative to the world. + """ + raise NotImplementedError() + + @property + @abstractmethod + def root_link_quat_w(self) -> torch.Tensor: + """Root link orientation (x, y, z, w) in simulation world frame. Shape is (num_instances, 4). + + This quantity is the orientation of the actor frame of the root rigid body. + """ + raise NotImplementedError() + + @property + @abstractmethod + def root_link_lin_vel_w(self) -> torch.Tensor: + """Root linear velocity in simulation world frame. Shape is (num_instances, 3). + + This quantity is the linear velocity of the root rigid body's actor frame relative to the world. + """ + raise NotImplementedError() + + @property + @abstractmethod + def root_link_ang_vel_w(self) -> torch.Tensor: + """Root link angular velocity in simulation world frame. Shape is (num_instances, 3). + + This quantity is the angular velocity of the actor frame of the root rigid body relative to the world. + """ + raise NotImplementedError() + + @property + @abstractmethod + def root_com_pos_w(self) -> torch.Tensor: + """Root center of mass position in simulation world frame. Shape is (num_instances, 3). + + This quantity is the position of the actor frame of the root rigid body relative to the world. + """ + raise NotImplementedError() + + @property + @abstractmethod + def root_com_quat_w(self) -> torch.Tensor: + """Root center of mass orientation (x, y, z, w) in simulation world frame. Shape is (num_instances, 4). + + This quantity is the orientation of the actor frame of the root rigid body relative to the world. + """ + raise NotImplementedError() + + @property + @abstractmethod + def root_com_lin_vel_w(self) -> torch.Tensor: + """Root center of mass linear velocity in simulation world frame. Shape is (num_instances, 3). + + This quantity is the linear velocity of the root rigid body's center of mass frame relative to the world. + """ + raise NotImplementedError() + + @property + @abstractmethod + def root_com_ang_vel_w(self) -> torch.Tensor: + """Root center of mass angular velocity in simulation world frame. Shape is (num_instances, 3). + + This quantity is the angular velocity of the root rigid body's center of mass frame relative to the world. + """ + raise NotImplementedError() + + @property + @abstractmethod + def body_link_pos_w(self) -> torch.Tensor: + """Positions of all bodies in simulation world frame. Shape is (num_instances, 1, 3). + + This quantity is the position of the rigid bodies' actor frame relative to the world. + """ + raise NotImplementedError() + + @property + @abstractmethod + def body_link_quat_w(self) -> torch.Tensor: + """Orientation (x, y, z, w) of all bodies in simulation world frame. Shape is (num_instances, 1, 4). + + This quantity is the orientation of the rigid bodies' actor frame relative to the world. + """ + raise NotImplementedError() + + @property + @abstractmethod + def body_link_lin_vel_w(self) -> torch.Tensor: + """Linear velocity of all bodies in simulation world frame. Shape is (num_instances, 1, 3). + + This quantity is the linear velocity of the rigid bodies' center of mass frame relative to the world. + """ + raise NotImplementedError() + + @property + @abstractmethod + def body_link_ang_vel_w(self) -> torch.Tensor: + """Angular velocity of all bodies in simulation world frame. Shape is (num_instances, 1, 3). + + This quantity is the angular velocity of the rigid bodies' center of mass frame relative to the world. + """ + raise NotImplementedError() + + @property + @abstractmethod + def body_com_pos_w(self) -> torch.Tensor: + """Positions of all bodies in simulation world frame. Shape is (num_instances, 1, 3). + + This quantity is the position of the rigid bodies' actor frame. + """ + raise NotImplementedError() + + @property + @abstractmethod + def body_com_quat_w(self) -> torch.Tensor: + """Orientation (x, y, z, w) of the principle axis of inertia of all bodies in simulation world frame. + + Shape is (num_instances, 1, 4). This quantity is the orientation of the rigid bodies' actor frame. + """ + raise NotImplementedError() + + @property + @abstractmethod + def body_com_lin_vel_w(self) -> torch.Tensor: + """Linear velocity of all bodies in simulation world frame. Shape is (num_instances, 1, 3). + + This quantity is the linear velocity of the rigid bodies' center of mass frame. + """ + raise NotImplementedError() + + @property + @abstractmethod + def body_com_ang_vel_w(self) -> torch.Tensor: + """Angular velocity of all bodies in simulation world frame. Shape is (num_instances, 1, 3). + + This quantity is the angular velocity of the rigid bodies' center of mass frame. + """ + raise NotImplementedError() + + @property + @abstractmethod + def body_com_lin_acc_w(self) -> torch.Tensor: + """Linear acceleration of all bodies in simulation world frame. Shape is (num_instances, 1, 3). + + This quantity is the linear acceleration of the rigid bodies' center of mass frame. + """ + raise NotImplementedError() + + @property + @abstractmethod + def body_com_ang_acc_w(self) -> torch.Tensor: + """Angular acceleration of all bodies in simulation world frame. Shape is (num_instances, 1, 3). + + This quantity is the angular acceleration of the rigid bodies' center of mass frame. + """ + raise NotImplementedError() + + @property + @abstractmethod + def body_com_pos_b(self) -> torch.Tensor: + """Center of mass position of all of the bodies in their respective link frames. + Shape is (num_instances, 1, 3). + + This quantity is the center of mass location relative to its body'slink frame. + """ + raise NotImplementedError() + + @property + @abstractmethod + def body_com_quat_b(self) -> torch.Tensor: + """Orientation (x, y, z, w) of the principle axis of inertia of all of the bodies in their + respective link frames. Shape is (num_instances, 1, 4). + + This quantity is the orientation of the principles axes of inertia relative to its body's link frame. + """ + raise NotImplementedError() + + ## + # Properties for backwards compatibility. + ## + + @property + @abstractmethod + def root_pose_w(self) -> torch.Tensor: + """Same as :attr:`root_link_pose_w`.""" + raise NotImplementedError() + + @property + @abstractmethod + def root_pos_w(self) -> torch.Tensor: + """Same as :attr:`root_link_pos_w`.""" + raise NotImplementedError() + + @property + @abstractmethod + def root_quat_w(self) -> torch.Tensor: + """Same as :attr:`root_link_quat_w`.""" + raise NotImplementedError() + + @property + @abstractmethod + def root_vel_w(self) -> torch.Tensor: + """Same as :attr:`root_com_vel_w`.""" + raise NotImplementedError() + + @property + @abstractmethod + def root_lin_vel_w(self) -> torch.Tensor: + """Same as :attr:`root_com_lin_vel_w`.""" + raise NotImplementedError() + + @property + @abstractmethod + def root_ang_vel_w(self) -> torch.Tensor: + """Same as :attr:`root_com_ang_vel_w`.""" + raise NotImplementedError() + + @property + @abstractmethod + def root_lin_vel_b(self) -> torch.Tensor: + """Same as :attr:`root_com_lin_vel_b`.""" + raise NotImplementedError() + + @property + @abstractmethod + def root_ang_vel_b(self) -> torch.Tensor: + """Same as :attr:`root_com_ang_vel_b`.""" + raise NotImplementedError() + + @property + @abstractmethod + def body_pose_w(self) -> torch.Tensor: + """Same as :attr:`body_link_pose_w`.""" + raise NotImplementedError() + + @property + @abstractmethod + def body_pos_w(self) -> torch.Tensor: + """Same as :attr:`body_link_pos_w`.""" + raise NotImplementedError() + + @property + @abstractmethod + def body_quat_w(self) -> torch.Tensor: + """Same as :attr:`body_link_quat_w`.""" + raise NotImplementedError() + + @property + @abstractmethod + def body_vel_w(self) -> torch.Tensor: + """Same as :attr:`body_com_vel_w`.""" + raise NotImplementedError() + + @property + @abstractmethod + def body_lin_vel_w(self) -> torch.Tensor: + """Same as :attr:`body_com_lin_vel_w`.""" + raise NotImplementedError() + + @property + @abstractmethod + def body_ang_vel_w(self) -> torch.Tensor: + """Same as :attr:`body_com_ang_vel_w`.""" + raise NotImplementedError() + + @property + @abstractmethod + def body_acc_w(self) -> torch.Tensor: + """Same as :attr:`body_com_acc_w`.""" + raise NotImplementedError() + + @property + @abstractmethod + def body_lin_acc_w(self) -> torch.Tensor: + """Same as :attr:`body_com_lin_acc_w`.""" + raise NotImplementedError() + + @property + @abstractmethod + def body_ang_acc_w(self) -> torch.Tensor: + """Same as :attr:`body_com_ang_acc_w`.""" + raise NotImplementedError() + + @property + @abstractmethod + def com_pos_b(self) -> torch.Tensor: + """Same as :attr:`body_com_pos_b`.""" + raise NotImplementedError() + + @property + @abstractmethod + def com_quat_b(self) -> torch.Tensor: + """Same as :attr:`body_com_quat_b`.""" + + raise NotImplementedError() diff --git a/source/isaaclab/isaaclab/assets/rigid_object/rigid_object.py b/source/isaaclab/isaaclab/assets/rigid_object/rigid_object.py index 6d0ec98f431..d1a8ecc11ce 100644 --- a/source/isaaclab/isaaclab/assets/rigid_object/rigid_object.py +++ b/source/isaaclab/isaaclab/assets/rigid_object/rigid_object.py @@ -5,578 +5,26 @@ from __future__ import annotations -import logging -from collections.abc import Sequence from typing import TYPE_CHECKING -import torch -import warp as wp +from isaaclab.utils.backend_utils import FactoryBase -import omni.physics.tensors.impl.api as physx -from isaacsim.core.simulation_manager import SimulationManager -from pxr import UsdPhysics - -import isaaclab.sim as sim_utils -import isaaclab.utils.math as math_utils -import isaaclab.utils.string as string_utils -from isaaclab.utils.wrench_composer import WrenchComposer - -from ..asset_base import AssetBase -from .rigid_object_data import RigidObjectData +from .base_rigid_object import BaseRigidObject +from .base_rigid_object_data import BaseRigidObjectData if TYPE_CHECKING: - from .rigid_object_cfg import RigidObjectCfg - -# import logger -logger = logging.getLogger(__name__) - - -class RigidObject(AssetBase): - """A rigid object asset class. - - Rigid objects are assets comprising of rigid bodies. They can be used to represent dynamic objects - such as boxes, spheres, etc. A rigid body is described by its pose, velocity and mass distribution. - - For an asset to be considered a rigid object, the root prim of the asset must have the `USD RigidBodyAPI`_ - applied to it. This API is used to define the simulation properties of the rigid body. On playing the - simulation, the physics engine will automatically register the rigid body and create a corresponding - rigid body handle. This handle can be accessed using the :attr:`root_physx_view` attribute. - - .. note:: - - For users familiar with Isaac Sim, the PhysX view class API is not the exactly same as Isaac Sim view - class API. Similar to Isaac Lab, Isaac Sim wraps around the PhysX view API. However, as of now (2023.1 release), - we see a large difference in initializing the view classes in Isaac Sim. This is because the view classes - in Isaac Sim perform additional USD-related operations which are slow and also not required. - - .. _`USD RigidBodyAPI`: https://openusd.org/dev/api/class_usd_physics_rigid_body_a_p_i.html - """ - - cfg: RigidObjectCfg - """Configuration instance for the rigid object.""" - - def __init__(self, cfg: RigidObjectCfg): - """Initialize the rigid object. - - Args: - cfg: A configuration instance. - """ - super().__init__(cfg) - - """ - Properties - """ - - @property - def data(self) -> RigidObjectData: - return self._data - - @property - def num_instances(self) -> int: - return self.root_physx_view.count - - @property - def num_bodies(self) -> int: - """Number of bodies in the asset. - - This is always 1 since each object is a single rigid body. - """ - return 1 - - @property - def body_names(self) -> list[str]: - """Ordered names of bodies in the rigid object.""" - prim_paths = self.root_physx_view.prim_paths[: self.num_bodies] - return [path.split("/")[-1] for path in prim_paths] - - @property - def root_physx_view(self) -> physx.RigidBodyView: - """Rigid body view for the asset (PhysX). - - Note: - Use this view with caution. It requires handling of tensors in a specific way. - """ - return self._root_physx_view - - @property - def instantaneous_wrench_composer(self) -> WrenchComposer: - """Instantaneous wrench composer. - - Returns a :class:`~isaaclab.utils.wrench_composer.WrenchComposer` instance. Wrenches added or set to this wrench - composer are only valid for the current simulation step. At the end of the simulation step, the wrenches set - to this object are discarded. This is useful to apply forces that change all the time, things like drag forces - for instance. - """ - return self._instantaneous_wrench_composer - - @property - def permanent_wrench_composer(self) -> WrenchComposer: - """Permanent wrench composer. - - Returns a :class:`~isaaclab.utils.wrench_composer.WrenchComposer` instance. Wrenches added or set to this wrench - composer are persistent and are applied to the simulation at every step. This is useful to apply forces that - are constant over a period of time, things like the thrust of a motor for instance. - """ - return self._permanent_wrench_composer - - """ - Operations. - """ - - def reset(self, env_ids: Sequence[int] | None = None): - # resolve all indices - if env_ids is None: - env_ids = slice(None) - # reset external wrench - self._instantaneous_wrench_composer.reset(env_ids) - self._permanent_wrench_composer.reset(env_ids) - - def write_data_to_sim(self): - """Write external wrench to the simulation. - - Note: - We write external wrench to the simulation here since this function is called before the simulation step. - This ensures that the external wrench is applied at every simulation step. - """ - # write external wrench - if self._instantaneous_wrench_composer.active or self._permanent_wrench_composer.active: - if self._instantaneous_wrench_composer.active: - # Compose instantaneous wrench with permanent wrench - self._instantaneous_wrench_composer.add_forces_and_torques( - forces=self._permanent_wrench_composer.composed_force, - torques=self._permanent_wrench_composer.composed_torque, - body_ids=self._ALL_BODY_INDICES_WP, - env_ids=self._ALL_INDICES_WP, - ) - # Apply both instantaneous and permanent wrench to the simulation - self.root_physx_view.apply_forces_and_torques_at_position( - force_data=self._instantaneous_wrench_composer.composed_force_as_torch.view(-1, 3), - torque_data=self._instantaneous_wrench_composer.composed_torque_as_torch.view(-1, 3), - position_data=None, - indices=self._ALL_INDICES, - is_global=False, - ) - else: - # Apply permanent wrench to the simulation - self.root_physx_view.apply_forces_and_torques_at_position( - force_data=self._permanent_wrench_composer.composed_force_as_torch.view(-1, 3), - torque_data=self._permanent_wrench_composer.composed_torque_as_torch.view(-1, 3), - position_data=None, - indices=self._ALL_INDICES, - is_global=False, - ) - self._instantaneous_wrench_composer.reset() - - def update(self, dt: float): - self._data.update(dt) - - """ - Operations - Finders. - """ - - def find_bodies(self, name_keys: str | Sequence[str], preserve_order: bool = False) -> tuple[list[int], list[str]]: - """Find bodies in the rigid body based on the name keys. - - Please check the :meth:`isaaclab.utils.string_utils.resolve_matching_names` function for more - information on the name matching. - - Args: - name_keys: A regular expression or a list of regular expressions to match the body names. - preserve_order: Whether to preserve the order of the name keys in the output. Defaults to False. - - Returns: - A tuple of lists containing the body indices and names. - """ - return string_utils.resolve_matching_names(name_keys, self.body_names, preserve_order) - - """ - Operations - Write to simulation. - """ - - def write_root_state_to_sim(self, root_state: torch.Tensor, env_ids: Sequence[int] | None = None): - """Set the root state over selected environment indices into the simulation. - - The root state comprises of the cartesian position, quaternion orientation in (w, x, y, z), and linear - and angular velocity. All the quantities are in the simulation frame. - - Args: - root_state: Root state in simulation frame. Shape is (len(env_ids), 13). - env_ids: Environment indices. If None, then all indices are used. - """ - self.write_root_link_pose_to_sim(root_state[:, :7], env_ids=env_ids) - self.write_root_com_velocity_to_sim(root_state[:, 7:], env_ids=env_ids) - - def write_root_com_state_to_sim(self, root_state: torch.Tensor, env_ids: Sequence[int] | None = None): - """Set the root center of mass state over selected environment indices into the simulation. - - The root state comprises of the cartesian position, quaternion orientation in (w, x, y, z), and linear - and angular velocity. All the quantities are in the simulation frame. - - Args: - root_state: Root state in simulation frame. Shape is (len(env_ids), 13). - env_ids: Environment indices. If None, then all indices are used. - """ - self.write_root_com_pose_to_sim(root_state[:, :7], env_ids=env_ids) - self.write_root_com_velocity_to_sim(root_state[:, 7:], env_ids=env_ids) - - def write_root_link_state_to_sim(self, root_state: torch.Tensor, env_ids: Sequence[int] | None = None): - """Set the root link state over selected environment indices into the simulation. - - The root state comprises of the cartesian position, quaternion orientation in (w, x, y, z), and linear - and angular velocity. All the quantities are in the simulation frame. - - Args: - root_state: Root state in simulation frame. Shape is (len(env_ids), 13). - env_ids: Environment indices. If None, then all indices are used. - """ - self.write_root_link_pose_to_sim(root_state[:, :7], env_ids=env_ids) - self.write_root_link_velocity_to_sim(root_state[:, 7:], env_ids=env_ids) - - def write_root_pose_to_sim(self, root_pose: torch.Tensor, env_ids: Sequence[int] | None = None): - """Set the root pose over selected environment indices into the simulation. - - The root pose comprises of the cartesian position and quaternion orientation in (w, x, y, z). - - Args: - root_pose: Root link poses in simulation frame. Shape is (len(env_ids), 7). - env_ids: Environment indices. If None, then all indices are used. - """ - self.write_root_link_pose_to_sim(root_pose, env_ids=env_ids) - - def write_root_link_pose_to_sim(self, root_pose: torch.Tensor, env_ids: Sequence[int] | None = None): - """Set the root link pose over selected environment indices into the simulation. - - The root pose comprises of the cartesian position and quaternion orientation in (w, x, y, z). - - Args: - root_pose: Root link poses in simulation frame. Shape is (len(env_ids), 7). - env_ids: Environment indices. If None, then all indices are used. - """ - # resolve all indices - physx_env_ids = env_ids - if env_ids is None: - env_ids = slice(None) - physx_env_ids = self._ALL_INDICES - - # note: we need to do this here since tensors are not set into simulation until step. - # set into internal buffers - self._data.root_link_pose_w[env_ids] = root_pose.clone() - # update these buffers only if the user is using them. Otherwise this adds to overhead. - if self._data._root_link_state_w.data is not None: - self._data.root_link_state_w[env_ids, :7] = self._data.root_link_pose_w[env_ids] - if self._data._root_state_w.data is not None: - self._data.root_state_w[env_ids, :7] = self._data.root_link_pose_w[env_ids] - if self._data._root_com_state_w.data is not None: - expected_com_pos, expected_com_quat = math_utils.combine_frame_transforms( - self._data.root_link_pose_w[env_ids, :3], - self._data.root_link_pose_w[env_ids, 3:7], - self.data.body_com_pos_b[env_ids, 0, :], - self.data.body_com_quat_b[env_ids, 0, :], - ) - self._data.root_com_state_w[env_ids, :3] = expected_com_pos - self._data.root_com_state_w[env_ids, 3:7] = expected_com_quat - # convert root quaternion from wxyz to xyzw - root_poses_xyzw = self._data.root_link_pose_w.clone() - root_poses_xyzw[:, 3:] = math_utils.convert_quat(root_poses_xyzw[:, 3:], to="xyzw") - # set into simulation - self.root_physx_view.set_transforms(root_poses_xyzw, indices=physx_env_ids) - - def write_root_com_pose_to_sim(self, root_pose: torch.Tensor, env_ids: Sequence[int] | None = None): - """Set the root center of mass pose over selected environment indices into the simulation. - - The root pose comprises of the cartesian position and quaternion orientation in (w, x, y, z). - The orientation is the orientation of the principle axes of inertia. - - Args: - root_pose: Root center of mass poses in simulation frame. Shape is (len(env_ids), 7). - env_ids: Environment indices. If None, then all indices are used. - """ - # resolve all indices - if env_ids is None: - local_env_ids = slice(env_ids) - else: - local_env_ids = env_ids - - # set into internal buffers - self._data.root_com_pose_w[local_env_ids] = root_pose.clone() - # update these buffers only if the user is using them. Otherwise this adds to overhead. - if self._data._root_com_state_w.data is not None: - self._data.root_com_state_w[local_env_ids, :7] = self._data.root_com_pose_w[local_env_ids] - - # get CoM pose in link frame - com_pos_b = self.data.body_com_pos_b[local_env_ids, 0, :] - com_quat_b = self.data.body_com_quat_b[local_env_ids, 0, :] - # transform input CoM pose to link frame - root_link_pos, root_link_quat = math_utils.combine_frame_transforms( - root_pose[..., :3], - root_pose[..., 3:7], - math_utils.quat_apply(math_utils.quat_inv(com_quat_b), -com_pos_b), - math_utils.quat_inv(com_quat_b), - ) - root_link_pose = torch.cat((root_link_pos, root_link_quat), dim=-1) - - # write transformed pose in link frame to sim - self.write_root_link_pose_to_sim(root_link_pose, env_ids=env_ids) - - def write_root_velocity_to_sim(self, root_velocity: torch.Tensor, env_ids: Sequence[int] | None = None): - """Set the root center of mass velocity over selected environment indices into the simulation. - - The velocity comprises linear velocity (x, y, z) and angular velocity (x, y, z) in that order. - NOTE: This sets the velocity of the root's center of mass rather than the roots frame. - - Args: - root_velocity: Root center of mass velocities in simulation world frame. Shape is (len(env_ids), 6). - env_ids: Environment indices. If None, then all indices are used. - """ - self.write_root_com_velocity_to_sim(root_velocity=root_velocity, env_ids=env_ids) - - def write_root_com_velocity_to_sim(self, root_velocity: torch.Tensor, env_ids: Sequence[int] | None = None): - """Set the root center of mass velocity over selected environment indices into the simulation. - - The velocity comprises linear velocity (x, y, z) and angular velocity (x, y, z) in that order. - NOTE: This sets the velocity of the root's center of mass rather than the roots frame. - - Args: - root_velocity: Root center of mass velocities in simulation world frame. Shape is (len(env_ids), 6). - env_ids: Environment indices. If None, then all indices are used. - """ - # resolve all indices - physx_env_ids = env_ids - if env_ids is None: - env_ids = slice(None) - physx_env_ids = self._ALL_INDICES - - # note: we need to do this here since tensors are not set into simulation until step. - # set into internal buffers - self._data.root_com_vel_w[env_ids] = root_velocity.clone() - # update these buffers only if the user is using them. Otherwise this adds to overhead. - if self._data._root_com_state_w.data is not None: - self._data.root_com_state_w[env_ids, 7:] = self._data.root_com_vel_w[env_ids] - if self._data._root_state_w.data is not None: - self._data.root_state_w[env_ids, 7:] = self._data.root_com_vel_w[env_ids] - if self._data._root_link_state_w.data is not None: - self._data.root_link_state_w[env_ids, 7:] = self._data.root_com_vel_w[env_ids] - # make the acceleration zero to prevent reporting old values - self._data.body_com_acc_w[env_ids] = 0.0 - # set into simulation - self.root_physx_view.set_velocities(self._data.root_com_vel_w, indices=physx_env_ids) - - def write_root_link_velocity_to_sim(self, root_velocity: torch.Tensor, env_ids: Sequence[int] | None = None): - """Set the root link velocity over selected environment indices into the simulation. - - The velocity comprises linear velocity (x, y, z) and angular velocity (x, y, z) in that order. - NOTE: This sets the velocity of the root's frame rather than the roots center of mass. - - Args: - root_velocity: Root frame velocities in simulation world frame. Shape is (len(env_ids), 6). - env_ids: Environment indices. If None, then all indices are used. - """ - # resolve all indices - if env_ids is None: - local_env_ids = slice(env_ids) - else: - local_env_ids = env_ids - - # set into internal buffers - self._data.root_link_vel_w[local_env_ids] = root_velocity.clone() - # update these buffers only if the user is using them. Otherwise this adds to overhead. - if self._data._root_link_state_w.data is not None: - self._data.root_link_state_w[local_env_ids, 7:] = self._data.root_link_vel_w[local_env_ids] - - # get CoM pose in link frame - quat = self.data.root_link_quat_w[local_env_ids] - com_pos_b = self.data.body_com_pos_b[local_env_ids, 0, :] - # transform input velocity to center of mass frame - root_com_velocity = root_velocity.clone() - root_com_velocity[:, :3] += torch.linalg.cross( - root_com_velocity[:, 3:], math_utils.quat_apply(quat, com_pos_b), dim=-1 - ) - - # write transformed velocity in CoM frame to sim - self.write_root_com_velocity_to_sim(root_com_velocity, env_ids=env_ids) - - """ - Operations - Setters. - """ - - def set_external_force_and_torque( - self, - forces: torch.Tensor, - torques: torch.Tensor, - positions: torch.Tensor | None = None, - body_ids: Sequence[int] | slice | None = None, - env_ids: Sequence[int] | None = None, - is_global: bool = False, - ): - """Set external force and torque to apply on the asset's bodies in their local frame. - - For many applications, we want to keep the applied external force on rigid bodies constant over a period of - time (for instance, during the policy control). This function allows us to store the external force and torque - into buffers which are then applied to the simulation at every step. Optionally, set the position to apply the - external wrench at (in the local link frame of the bodies). - - .. caution:: - If the function is called with empty forces and torques, then this function disables the application - of external wrench to the simulation. - - .. code-block:: python - - # example of disabling external wrench - asset.set_external_force_and_torque(forces=torch.zeros(0, 3), torques=torch.zeros(0, 3)) - - .. note:: - This function does not apply the external wrench to the simulation. It only fills the buffers with - the desired values. To apply the external wrench, call the :meth:`write_data_to_sim` function - right before the simulation step. - - Args: - forces: External forces in bodies' local frame. Shape is (len(env_ids), len(body_ids), 3). - torques: External torques in bodies' local frame. Shape is (len(env_ids), len(body_ids), 3). - positions: External wrench positions in bodies' local frame. Shape is (len(env_ids), len(body_ids), 3). - Defaults to None. - body_ids: Body indices to apply external wrench to. Defaults to None (all bodies). - env_ids: Environment indices to apply external wrench to. Defaults to None (all instances). - is_global: Whether to apply the external wrench in the global frame. Defaults to False. If set to False, - the external wrench is applied in the link frame of the bodies. - """ - logger.warning( - "The function 'set_external_force_and_torque' will be deprecated in a future release. Please" - " use 'permanent_wrench_composer.set_forces_and_torques' instead." - ) - if forces is None and torques is None: - logger.warning("No forces or torques provided. No permanent external wrench will be applied.") - - # resolve all indices - # -- env_ids - if env_ids is None: - env_ids = self._ALL_INDICES_WP - elif not isinstance(env_ids, torch.Tensor): - env_ids = wp.array(env_ids, dtype=wp.int32, device=self.device) - else: - env_ids = wp.from_torch(env_ids.to(torch.int32), dtype=wp.int32) - # -- body_ids - body_ids = self._ALL_BODY_INDICES_WP - - # Write to wrench composer - self._permanent_wrench_composer.set_forces_and_torques( - forces=wp.from_torch(forces, dtype=wp.vec3f) if forces is not None else None, - torques=wp.from_torch(torques, dtype=wp.vec3f) if torques is not None else None, - positions=wp.from_torch(positions, dtype=wp.vec3f) if positions is not None else None, - body_ids=body_ids, - env_ids=env_ids, - is_global=is_global, - ) - - """ - Internal helper. - """ - - def _initialize_impl(self): - # obtain global simulation view - self._physics_sim_view = SimulationManager.get_physics_sim_view() - # obtain the first prim in the regex expression (all others are assumed to be a copy of this) - template_prim = sim_utils.find_first_matching_prim(self.cfg.prim_path) - if template_prim is None: - raise RuntimeError(f"Failed to find prim for expression: '{self.cfg.prim_path}'.") - template_prim_path = template_prim.GetPath().pathString - - # find rigid root prims - root_prims = sim_utils.get_all_matching_child_prims( - template_prim_path, - predicate=lambda prim: prim.HasAPI(UsdPhysics.RigidBodyAPI), - traverse_instance_prims=False, - ) - if len(root_prims) == 0: - raise RuntimeError( - f"Failed to find a rigid body when resolving '{self.cfg.prim_path}'." - " Please ensure that the prim has 'USD RigidBodyAPI' applied." - ) - if len(root_prims) > 1: - raise RuntimeError( - f"Failed to find a single rigid body when resolving '{self.cfg.prim_path}'." - f" Found multiple '{root_prims}' under '{template_prim_path}'." - " Please ensure that there is only one rigid body in the prim path tree." - ) - - articulation_prims = sim_utils.get_all_matching_child_prims( - template_prim_path, - predicate=lambda prim: prim.HasAPI(UsdPhysics.ArticulationRootAPI), - traverse_instance_prims=False, - ) - if len(articulation_prims) != 0: - if articulation_prims[0].GetAttribute("physxArticulation:articulationEnabled").Get(): - raise RuntimeError( - f"Found an articulation root when resolving '{self.cfg.prim_path}' for rigid objects. These are" - f" located at: '{articulation_prims}' under '{template_prim_path}'. Please disable the articulation" - " root in the USD or from code by setting the parameter" - " 'ArticulationRootPropertiesCfg.articulation_enabled' to False in the spawn configuration." - ) - - # resolve root prim back into regex expression - root_prim_path = root_prims[0].GetPath().pathString - root_prim_path_expr = self.cfg.prim_path + root_prim_path[len(template_prim_path) :] - # -- object view - self._root_physx_view = self._physics_sim_view.create_rigid_body_view(root_prim_path_expr.replace(".*", "*")) - - # check if the rigid body was created - if self._root_physx_view._backend is None: - raise RuntimeError(f"Failed to create rigid body at: {self.cfg.prim_path}. Please check PhysX logs.") - - # log information about the rigid body - logger.info(f"Rigid body initialized at: {self.cfg.prim_path} with root '{root_prim_path_expr}'.") - logger.info(f"Number of instances: {self.num_instances}") - logger.info(f"Number of bodies: {self.num_bodies}") - logger.info(f"Body names: {self.body_names}") - - # container for data access - self._data = RigidObjectData(self.root_physx_view, self.device) - - # create buffers - self._create_buffers() - # process configuration - self._process_cfg() - # update the rigid body data - self.update(0.0) - - def _create_buffers(self): - """Create buffers for storing data.""" - # constants - self._ALL_INDICES = torch.arange(self.num_instances, dtype=torch.long, device=self.device) - self._ALL_INDICES_WP = wp.from_torch(self._ALL_INDICES.to(torch.int32), dtype=wp.int32) - self._ALL_BODY_INDICES_WP = wp.from_torch( - torch.arange(self.num_bodies, dtype=torch.int32, device=self.device), dtype=wp.int32 - ) - - # external wrench composer - self._instantaneous_wrench_composer = WrenchComposer(self) - self._permanent_wrench_composer = WrenchComposer(self) + from isaaclab_physx.assets.rigid_object import RigidObject as PhysXRigidObject + from isaaclab_physx.assets.rigid_object import RigidObjectData as PhysXRigidObjectData - # set information about rigid body into data - self._data.body_names = self.body_names - self._data.default_mass = self.root_physx_view.get_masses().clone() - self._data.default_inertia = self.root_physx_view.get_inertias().clone() - def _process_cfg(self): - """Post processing of configuration parameters.""" - # default state - # -- root state - # note: we cast to tuple to avoid torch/numpy type mismatch. - default_root_state = ( - tuple(self.cfg.init_state.pos) - + tuple(self.cfg.init_state.rot) - + tuple(self.cfg.init_state.lin_vel) - + tuple(self.cfg.init_state.ang_vel) - ) - default_root_state = torch.tensor(default_root_state, dtype=torch.float, device=self.device) - self._data.default_root_state = default_root_state.repeat(self.num_instances, 1) +class RigidObject(FactoryBase, BaseRigidObject): + """Factory for creating rigid object instances.""" - """ - Internal simulation callbacks. - """ + data: BaseRigidObjectData | PhysXRigidObjectData - def _invalidate_initialize_callback(self, event): - """Invalidates the scene elements.""" - # call parent - super()._invalidate_initialize_callback(event) - # set all existing views to None to invalidate them - self._root_physx_view = None + def __new__(cls, *args, **kwargs) -> BaseRigidObject | PhysXRigidObject: + """Create a new instance of a rigid object based on the backend.""" + # The `FactoryBase` __new__ method will handle the logic and return + # an instance of the correct backend-specific rigid object class, + # which is guaranteed to be a subclass of `BaseRigidObject` by convention. + return super().__new__(cls, *args, **kwargs) diff --git a/source/isaaclab/isaaclab/assets/rigid_object/rigid_object_data.py b/source/isaaclab/isaaclab/assets/rigid_object/rigid_object_data.py index d1a94f1eac7..a81e44892c0 100644 --- a/source/isaaclab/isaaclab/assets/rigid_object/rigid_object_data.py +++ b/source/isaaclab/isaaclab/assets/rigid_object/rigid_object_data.py @@ -3,655 +3,21 @@ # # SPDX-License-Identifier: BSD-3-Clause -import weakref +from __future__ import annotations -import torch +from typing import TYPE_CHECKING -import omni.physics.tensors.impl.api as physx +from isaaclab.utils.backend_utils import FactoryBase -import isaaclab.utils.math as math_utils -from isaaclab.sim.utils.stage import get_current_stage_id -from isaaclab.utils.buffers import TimestampedBuffer +from .base_rigid_object_data import BaseRigidObjectData +if TYPE_CHECKING: + from isaaclab_physx.assets.rigid_object.rigid_object_data import RigidObjectData as PhysXRigidObjectData -class RigidObjectData: - """Data container for a rigid object. - This class contains the data for a rigid object in the simulation. The data includes the state of - the root rigid body and the state of all the bodies in the object. The data is stored in the simulation - world frame unless otherwise specified. +class RigidObjectData(FactoryBase): + """Factory for creating rigid object data instances.""" - For a rigid body, there are two frames of reference that are used: - - - Actor frame: The frame of reference of the rigid body prim. This typically corresponds to the Xform prim - with the rigid body schema. - - Center of mass frame: The frame of reference of the center of mass of the rigid body. - - Depending on the settings of the simulation, the actor frame and the center of mass frame may be the same. - This needs to be taken into account when interpreting the data. - - The data is lazily updated, meaning that the data is only updated when it is accessed. This is useful - when the data is expensive to compute or retrieve. The data is updated when the timestamp of the buffer - is older than the current simulation timestamp. The timestamp is updated whenever the data is updated. - """ - - def __init__(self, root_physx_view: physx.RigidBodyView, device: str): - """Initializes the rigid object data. - - Args: - root_physx_view: The root rigid body view. - device: The device used for processing. - """ - # Set the parameters - self.device = device - # Set the root rigid body view - # note: this is stored as a weak reference to avoid circular references between the asset class - # and the data container. This is important to avoid memory leaks. - self._root_physx_view: physx.RigidBodyView = weakref.proxy(root_physx_view) - - # Set initial time stamp - self._sim_timestamp = 0.0 - - # Obtain global physics sim view - stage_id = get_current_stage_id() - physics_sim_view = physx.create_simulation_view("torch", stage_id) - physics_sim_view.set_subspace_roots("/") - gravity = physics_sim_view.get_gravity() - # Convert to direction vector - gravity_dir = torch.tensor((gravity[0], gravity[1], gravity[2]), device=self.device) - gravity_dir = math_utils.normalize(gravity_dir.unsqueeze(0)).squeeze(0) - - # Initialize constants - self.GRAVITY_VEC_W = gravity_dir.repeat(self._root_physx_view.count, 1) - self.FORWARD_VEC_B = torch.tensor((1.0, 0.0, 0.0), device=self.device).repeat(self._root_physx_view.count, 1) - - # Initialize the lazy buffers. - # -- link frame w.r.t. world frame - self._root_link_pose_w = TimestampedBuffer() - self._root_link_vel_w = TimestampedBuffer() - # -- com frame w.r.t. link frame - self._body_com_pose_b = TimestampedBuffer() - # -- com frame w.r.t. world frame - self._root_com_pose_w = TimestampedBuffer() - self._root_com_vel_w = TimestampedBuffer() - self._body_com_acc_w = TimestampedBuffer() - # -- combined state (these are cached as they concatenate) - self._root_state_w = TimestampedBuffer() - self._root_link_state_w = TimestampedBuffer() - self._root_com_state_w = TimestampedBuffer() - - def update(self, dt: float): - """Updates the data for the rigid object. - - Args: - dt: The time step for the update. This must be a positive value. - """ - # update the simulation timestamp - self._sim_timestamp += dt - - ## - # Names. - ## - - body_names: list[str] = None - """Body names in the order parsed by the simulation view.""" - - ## - # Defaults. - ## - - default_root_state: torch.Tensor = None - """Default root state ``[pos, quat, lin_vel, ang_vel]`` in local environment frame. Shape is (num_instances, 13). - - The position and quaternion are of the rigid body's actor frame. Meanwhile, the linear and angular velocities are - of the center of mass frame. - """ - - default_mass: torch.Tensor = None - """Default mass read from the simulation. Shape is (num_instances, 1).""" - - default_inertia: torch.Tensor = None - """Default inertia tensor read from the simulation. Shape is (num_instances, 9). - - The inertia tensor should be given with respect to the center of mass, expressed in the rigid body's actor frame. - The values are stored in the order :math:`[I_{xx}, I_{yx}, I_{zx}, I_{xy}, I_{yy}, I_{zy}, I_{xz}, I_{yz}, I_{zz}]`. - However, due to the symmetry of inertia tensors, row- and column-major orders are equivalent. - - This quantity is parsed from the USD schema at the time of initialization. - """ - - ## - # Root state properties. - ## - - @property - def root_link_pose_w(self) -> torch.Tensor: - """Root link pose ``[pos, quat]`` in simulation world frame. Shape is (num_instances, 7). - - This quantity is the pose of the actor frame of the root rigid body relative to the world. - The orientation is provided in (w, x, y, z) format. - """ - if self._root_link_pose_w.timestamp < self._sim_timestamp: - # read data from simulation - pose = self._root_physx_view.get_transforms().clone() - pose[:, 3:7] = math_utils.convert_quat(pose[:, 3:7], to="wxyz") - # set the buffer data and timestamp - self._root_link_pose_w.data = pose - self._root_link_pose_w.timestamp = self._sim_timestamp - - return self._root_link_pose_w.data - - @property - def root_link_vel_w(self) -> torch.Tensor: - """Root link velocity ``[lin_vel, ang_vel]`` in simulation world frame. Shape is (num_instances, 6). - - This quantity contains the linear and angular velocities of the actor frame of the root - rigid body relative to the world. - """ - if self._root_link_vel_w.timestamp < self._sim_timestamp: - # read the CoM velocity - vel = self.root_com_vel_w.clone() - # adjust linear velocity to link from center of mass - vel[:, :3] += torch.linalg.cross( - vel[:, 3:], math_utils.quat_apply(self.root_link_quat_w, -self.body_com_pos_b[:, 0]), dim=-1 - ) - # set the buffer data and timestamp - self._root_link_vel_w.data = vel - self._root_link_vel_w.timestamp = self._sim_timestamp - - return self._root_link_vel_w.data - - @property - def root_com_pose_w(self) -> torch.Tensor: - """Root center of mass pose ``[pos, quat]`` in simulation world frame. Shape is (num_instances, 7). - - This quantity is the pose of the center of mass frame of the root rigid body relative to the world. - The orientation is provided in (w, x, y, z) format. - """ - if self._root_com_pose_w.timestamp < self._sim_timestamp: - # apply local transform to center of mass frame - pos, quat = math_utils.combine_frame_transforms( - self.root_link_pos_w, self.root_link_quat_w, self.body_com_pos_b[:, 0], self.body_com_quat_b[:, 0] - ) - # set the buffer data and timestamp - self._root_com_pose_w.data = torch.cat((pos, quat), dim=-1) - self._root_com_pose_w.timestamp = self._sim_timestamp - - return self._root_com_pose_w.data - - @property - def root_com_vel_w(self) -> torch.Tensor: - """Root center of mass velocity ``[lin_vel, ang_vel]`` in simulation world frame. Shape is (num_instances, 6). - - This quantity contains the linear and angular velocities of the root rigid body's center of mass frame - relative to the world. - """ - if self._root_com_vel_w.timestamp < self._sim_timestamp: - self._root_com_vel_w.data = self._root_physx_view.get_velocities() - self._root_com_vel_w.timestamp = self._sim_timestamp - - return self._root_com_vel_w.data - - @property - def root_state_w(self) -> torch.Tensor: - """Root state ``[pos, quat, lin_vel, ang_vel]`` in simulation world frame. Shape is (num_instances, 13). - - The position and orientation are of the rigid body's actor frame. Meanwhile, the linear and angular - velocities are of the rigid body's center of mass frame. - """ - if self._root_state_w.timestamp < self._sim_timestamp: - self._root_state_w.data = torch.cat((self.root_link_pose_w, self.root_com_vel_w), dim=-1) - self._root_state_w.timestamp = self._sim_timestamp - - return self._root_state_w.data - - @property - def root_link_state_w(self) -> torch.Tensor: - """Root state ``[pos, quat, lin_vel, ang_vel]`` in simulation world frame. Shape is (num_instances, 13). - - The position, quaternion, and linear/angular velocity are of the rigid body root frame relative to the - world. The orientation is provided in (w, x, y, z) format. - """ - if self._root_link_state_w.timestamp < self._sim_timestamp: - self._root_link_state_w.data = torch.cat((self.root_link_pose_w, self.root_link_vel_w), dim=-1) - self._root_link_state_w.timestamp = self._sim_timestamp - - return self._root_link_state_w.data - - @property - def root_com_state_w(self) -> torch.Tensor: - """Root center of mass state ``[pos, quat, lin_vel, ang_vel]`` in simulation world frame. - Shape is (num_instances, 13). - - The position, quaternion, and linear/angular velocity are of the rigid body's center of mass frame - relative to the world. Center of mass frame is the orientation principle axes of inertia. - """ - if self._root_com_state_w.timestamp < self._sim_timestamp: - self._root_com_state_w.data = torch.cat((self.root_com_pose_w, self.root_com_vel_w), dim=-1) - self._root_com_state_w.timestamp = self._sim_timestamp - - return self._root_com_state_w.data - - ## - # Body state properties. - ## - - @property - def body_link_pose_w(self) -> torch.Tensor: - """Body link pose ``[pos, quat]`` in simulation world frame. Shape is (num_instances, 1, 7). - - This quantity is the pose of the actor frame of the rigid body relative to the world. - The orientation is provided in (w, x, y, z) format. - """ - return self.root_link_pose_w.view(-1, 1, 7) - - @property - def body_link_vel_w(self) -> torch.Tensor: - """Body link velocity ``[lin_vel, ang_vel]`` in simulation world frame. Shape is (num_instances, 1, 6). - - This quantity contains the linear and angular velocities of the actor frame of the root - rigid body relative to the world. - """ - return self.root_link_vel_w.view(-1, 1, 6) - - @property - def body_com_pose_w(self) -> torch.Tensor: - """Body center of mass pose ``[pos, quat]`` in simulation world frame. Shape is (num_instances, 1, 7). - - This quantity is the pose of the center of mass frame of the rigid body relative to the world. - The orientation is provided in (w, x, y, z) format. - """ - return self.root_com_pose_w.view(-1, 1, 7) - - @property - def body_com_vel_w(self) -> torch.Tensor: - """Body center of mass velocity ``[lin_vel, ang_vel]`` in simulation world frame. - Shape is (num_instances, 1, 6). - - This quantity contains the linear and angular velocities of the root rigid body's center of mass frame - relative to the world. - """ - return self.root_com_vel_w.view(-1, 1, 6) - - @property - def body_state_w(self) -> torch.Tensor: - """State of all bodies `[pos, quat, lin_vel, ang_vel]` in simulation world frame. - Shape is (num_instances, 1, 13). - - The position and orientation are of the rigid bodies' actor frame. Meanwhile, the linear and angular - velocities are of the rigid bodies' center of mass frame. - """ - return self.root_state_w.view(-1, 1, 13) - - @property - def body_link_state_w(self) -> torch.Tensor: - """State of all bodies ``[pos, quat, lin_vel, ang_vel]`` in simulation world frame. - Shape is (num_instances, 1, 13). - - The position, quaternion, and linear/angular velocity are of the body's link frame relative to the world. - The orientation is provided in (w, x, y, z) format. - """ - return self.root_link_state_w.view(-1, 1, 13) - - @property - def body_com_state_w(self) -> torch.Tensor: - """State of all bodies ``[pos, quat, lin_vel, ang_vel]`` in simulation world frame. - Shape is (num_instances, num_bodies, 13). - - The position, quaternion, and linear/angular velocity are of the body's center of mass frame relative to the - world. Center of mass frame is assumed to be the same orientation as the link rather than the orientation of the - principle inertia. The orientation is provided in (w, x, y, z) format. - """ - return self.root_com_state_w.view(-1, 1, 13) - - @property - def body_com_acc_w(self) -> torch.Tensor: - """Acceleration of all bodies ``[lin_acc, ang_acc]`` in the simulation world frame. - Shape is (num_instances, 1, 6). - - This quantity is the acceleration of the rigid bodies' center of mass frame relative to the world. - """ - if self._body_com_acc_w.timestamp < self._sim_timestamp: - self._body_com_acc_w.data = self._root_physx_view.get_accelerations().unsqueeze(1) - self._body_com_acc_w.timestamp = self._sim_timestamp - - return self._body_com_acc_w.data - - @property - def body_com_pose_b(self) -> torch.Tensor: - """Center of mass pose ``[pos, quat]`` of all bodies in their respective body's link frames. - Shape is (num_instances, 1, 7). - - This quantity is the pose of the center of mass frame of the rigid body relative to the body's link frame. - The orientation is provided in (w, x, y, z) format. - """ - if self._body_com_pose_b.timestamp < self._sim_timestamp: - # read data from simulation - pose = self._root_physx_view.get_coms().to(self.device) - pose[:, 3:7] = math_utils.convert_quat(pose[:, 3:7], to="wxyz") - # set the buffer data and timestamp - self._body_com_pose_b.data = pose.view(-1, 1, 7) - self._body_com_pose_b.timestamp = self._sim_timestamp - - return self._body_com_pose_b.data - - ## - # Derived Properties. - ## - - @property - def projected_gravity_b(self) -> torch.Tensor: - """Projection of the gravity direction on base frame. Shape is (num_instances, 3).""" - return math_utils.quat_apply_inverse(self.root_link_quat_w, self.GRAVITY_VEC_W) - - @property - def heading_w(self) -> torch.Tensor: - """Yaw heading of the base frame (in radians). Shape is (num_instances,). - - Note: - This quantity is computed by assuming that the forward-direction of the base - frame is along x-direction, i.e. :math:`(1, 0, 0)`. - """ - forward_w = math_utils.quat_apply(self.root_link_quat_w, self.FORWARD_VEC_B) - return torch.atan2(forward_w[:, 1], forward_w[:, 0]) - - @property - def root_link_lin_vel_b(self) -> torch.Tensor: - """Root link linear velocity in base frame. Shape is (num_instances, 3). - - This quantity is the linear velocity of the actor frame of the root rigid body frame with respect to the - rigid body's actor frame. - """ - return math_utils.quat_apply_inverse(self.root_link_quat_w, self.root_link_lin_vel_w) - - @property - def root_link_ang_vel_b(self) -> torch.Tensor: - """Root link angular velocity in base world frame. Shape is (num_instances, 3). - - This quantity is the angular velocity of the actor frame of the root rigid body frame with respect to the - rigid body's actor frame. - """ - return math_utils.quat_apply_inverse(self.root_link_quat_w, self.root_link_ang_vel_w) - - @property - def root_com_lin_vel_b(self) -> torch.Tensor: - """Root center of mass linear velocity in base frame. Shape is (num_instances, 3). - - This quantity is the linear velocity of the root rigid body's center of mass frame with respect to the - rigid body's actor frame. - """ - return math_utils.quat_apply_inverse(self.root_link_quat_w, self.root_com_lin_vel_w) - - @property - def root_com_ang_vel_b(self) -> torch.Tensor: - """Root center of mass angular velocity in base world frame. Shape is (num_instances, 3). - - This quantity is the angular velocity of the root rigid body's center of mass frame with respect to the - rigid body's actor frame. - """ - return math_utils.quat_apply_inverse(self.root_link_quat_w, self.root_com_ang_vel_w) - - ## - # Sliced properties. - ## - - @property - def root_link_pos_w(self) -> torch.Tensor: - """Root link position in simulation world frame. Shape is (num_instances, 3). - - This quantity is the position of the actor frame of the root rigid body relative to the world. - """ - return self.root_link_pose_w[:, :3] - - @property - def root_link_quat_w(self) -> torch.Tensor: - """Root link orientation (w, x, y, z) in simulation world frame. Shape is (num_instances, 4). - - This quantity is the orientation of the actor frame of the root rigid body. - """ - return self.root_link_pose_w[:, 3:7] - - @property - def root_link_lin_vel_w(self) -> torch.Tensor: - """Root linear velocity in simulation world frame. Shape is (num_instances, 3). - - This quantity is the linear velocity of the root rigid body's actor frame relative to the world. - """ - return self.root_link_vel_w[:, :3] - - @property - def root_link_ang_vel_w(self) -> torch.Tensor: - """Root link angular velocity in simulation world frame. Shape is (num_instances, 3). - - This quantity is the angular velocity of the actor frame of the root rigid body relative to the world. - """ - return self.root_link_vel_w[:, 3:6] - - @property - def root_com_pos_w(self) -> torch.Tensor: - """Root center of mass position in simulation world frame. Shape is (num_instances, 3). - - This quantity is the position of the actor frame of the root rigid body relative to the world. - """ - return self.root_com_pose_w[:, :3] - - @property - def root_com_quat_w(self) -> torch.Tensor: - """Root center of mass orientation (w, x, y, z) in simulation world frame. Shape is (num_instances, 4). - - This quantity is the orientation of the actor frame of the root rigid body relative to the world. - """ - return self.root_com_pose_w[:, 3:7] - - @property - def root_com_lin_vel_w(self) -> torch.Tensor: - """Root center of mass linear velocity in simulation world frame. Shape is (num_instances, 3). - - This quantity is the linear velocity of the root rigid body's center of mass frame relative to the world. - """ - return self.root_com_vel_w[:, :3] - - @property - def root_com_ang_vel_w(self) -> torch.Tensor: - """Root center of mass angular velocity in simulation world frame. Shape is (num_instances, 3). - - This quantity is the angular velocity of the root rigid body's center of mass frame relative to the world. - """ - return self.root_com_vel_w[:, 3:6] - - @property - def body_link_pos_w(self) -> torch.Tensor: - """Positions of all bodies in simulation world frame. Shape is (num_instances, 1, 3). - - This quantity is the position of the rigid bodies' actor frame relative to the world. - """ - return self.body_link_pose_w[..., :3] - - @property - def body_link_quat_w(self) -> torch.Tensor: - """Orientation (w, x, y, z) of all bodies in simulation world frame. Shape is (num_instances, 1, 4). - - This quantity is the orientation of the rigid bodies' actor frame relative to the world. - """ - return self.body_link_pose_w[..., 3:7] - - @property - def body_link_lin_vel_w(self) -> torch.Tensor: - """Linear velocity of all bodies in simulation world frame. Shape is (num_instances, 1, 3). - - This quantity is the linear velocity of the rigid bodies' center of mass frame relative to the world. - """ - return self.body_link_vel_w[..., :3] - - @property - def body_link_ang_vel_w(self) -> torch.Tensor: - """Angular velocity of all bodies in simulation world frame. Shape is (num_instances, 1, 3). - - This quantity is the angular velocity of the rigid bodies' center of mass frame relative to the world. - """ - return self.body_link_vel_w[..., 3:6] - - @property - def body_com_pos_w(self) -> torch.Tensor: - """Positions of all bodies in simulation world frame. Shape is (num_instances, 1, 3). - - This quantity is the position of the rigid bodies' actor frame. - """ - return self.body_com_pose_w[..., :3] - - @property - def body_com_quat_w(self) -> torch.Tensor: - """Orientation (w, x, y, z) of the principle axis of inertia of all bodies in simulation world frame. - - Shape is (num_instances, 1, 4). This quantity is the orientation of the rigid bodies' actor frame. - """ - return self.body_com_pose_w[..., 3:7] - - @property - def body_com_lin_vel_w(self) -> torch.Tensor: - """Linear velocity of all bodies in simulation world frame. Shape is (num_instances, 1, 3). - - This quantity is the linear velocity of the rigid bodies' center of mass frame. - """ - return self.body_com_vel_w[..., :3] - - @property - def body_com_ang_vel_w(self) -> torch.Tensor: - """Angular velocity of all bodies in simulation world frame. Shape is (num_instances, 1, 3). - - This quantity is the angular velocity of the rigid bodies' center of mass frame. - """ - return self.body_com_vel_w[..., 3:6] - - @property - def body_com_lin_acc_w(self) -> torch.Tensor: - """Linear acceleration of all bodies in simulation world frame. Shape is (num_instances, 1, 3). - - This quantity is the linear acceleration of the rigid bodies' center of mass frame. - """ - return self.body_com_acc_w[..., :3] - - @property - def body_com_ang_acc_w(self) -> torch.Tensor: - """Angular acceleration of all bodies in simulation world frame. Shape is (num_instances, 1, 3). - - This quantity is the angular acceleration of the rigid bodies' center of mass frame. - """ - return self.body_com_acc_w[..., 3:6] - - @property - def body_com_pos_b(self) -> torch.Tensor: - """Center of mass position of all of the bodies in their respective link frames. - Shape is (num_instances, 1, 3). - - This quantity is the center of mass location relative to its body'slink frame. - """ - return self.body_com_pose_b[..., :3] - - @property - def body_com_quat_b(self) -> torch.Tensor: - """Orientation (w, x, y, z) of the principle axis of inertia of all of the bodies in their - respective link frames. Shape is (num_instances, 1, 4). - - This quantity is the orientation of the principles axes of inertia relative to its body's link frame. - """ - return self.body_com_pose_b[..., 3:7] - - ## - # Properties for backwards compatibility. - ## - - @property - def root_pose_w(self) -> torch.Tensor: - """Same as :attr:`root_link_pose_w`.""" - return self.root_link_pose_w - - @property - def root_pos_w(self) -> torch.Tensor: - """Same as :attr:`root_link_pos_w`.""" - return self.root_link_pos_w - - @property - def root_quat_w(self) -> torch.Tensor: - """Same as :attr:`root_link_quat_w`.""" - return self.root_link_quat_w - - @property - def root_vel_w(self) -> torch.Tensor: - """Same as :attr:`root_com_vel_w`.""" - return self.root_com_vel_w - - @property - def root_lin_vel_w(self) -> torch.Tensor: - """Same as :attr:`root_com_lin_vel_w`.""" - return self.root_com_lin_vel_w - - @property - def root_ang_vel_w(self) -> torch.Tensor: - """Same as :attr:`root_com_ang_vel_w`.""" - return self.root_com_ang_vel_w - - @property - def root_lin_vel_b(self) -> torch.Tensor: - """Same as :attr:`root_com_lin_vel_b`.""" - return self.root_com_lin_vel_b - - @property - def root_ang_vel_b(self) -> torch.Tensor: - """Same as :attr:`root_com_ang_vel_b`.""" - return self.root_com_ang_vel_b - - @property - def body_pose_w(self) -> torch.Tensor: - """Same as :attr:`body_link_pose_w`.""" - return self.body_link_pose_w - - @property - def body_pos_w(self) -> torch.Tensor: - """Same as :attr:`body_link_pos_w`.""" - return self.body_link_pos_w - - @property - def body_quat_w(self) -> torch.Tensor: - """Same as :attr:`body_link_quat_w`.""" - return self.body_link_quat_w - - @property - def body_vel_w(self) -> torch.Tensor: - """Same as :attr:`body_com_vel_w`.""" - return self.body_com_vel_w - - @property - def body_lin_vel_w(self) -> torch.Tensor: - """Same as :attr:`body_com_lin_vel_w`.""" - return self.body_com_lin_vel_w - - @property - def body_ang_vel_w(self) -> torch.Tensor: - """Same as :attr:`body_com_ang_vel_w`.""" - return self.body_com_ang_vel_w - - @property - def body_acc_w(self) -> torch.Tensor: - """Same as :attr:`body_com_acc_w`.""" - return self.body_com_acc_w - - @property - def body_lin_acc_w(self) -> torch.Tensor: - """Same as :attr:`body_com_lin_acc_w`.""" - return self.body_com_lin_acc_w - - @property - def body_ang_acc_w(self) -> torch.Tensor: - """Same as :attr:`body_com_ang_acc_w`.""" - return self.body_com_ang_acc_w - - @property - def com_pos_b(self) -> torch.Tensor: - """Same as :attr:`body_com_pos_b`.""" - return self.body_com_pos_b - - @property - def com_quat_b(self) -> torch.Tensor: - """Same as :attr:`body_com_quat_b`.""" - return self.body_com_quat_b + def __new__(cls, *args, **kwargs) -> BaseRigidObjectData | PhysXRigidObjectData: + """Create a new instance of a rigid object data based on the backend.""" + return super().__new__(cls, *args, **kwargs) diff --git a/source/isaaclab/isaaclab/assets/rigid_object_collection/__init__.py b/source/isaaclab/isaaclab/assets/rigid_object_collection/__init__.py index edca995d62a..53eff6d98fc 100644 --- a/source/isaaclab/isaaclab/assets/rigid_object_collection/__init__.py +++ b/source/isaaclab/isaaclab/assets/rigid_object_collection/__init__.py @@ -5,6 +5,16 @@ """Sub-module for rigid object collection.""" +from .base_rigid_object_collection import BaseRigidObjectCollection +from .base_rigid_object_collection_data import BaseRigidObjectCollectionData from .rigid_object_collection import RigidObjectCollection from .rigid_object_collection_cfg import RigidObjectCollectionCfg from .rigid_object_collection_data import RigidObjectCollectionData + +__all__ = [ + "BaseRigidObjectCollection", + "BaseRigidObjectCollectionData", + "RigidObjectCollection", + "RigidObjectCollectionCfg", + "RigidObjectCollectionData", +] \ No newline at end of file diff --git a/source/isaaclab/isaaclab/assets/rigid_object_collection/base_rigid_object_collection.py b/source/isaaclab/isaaclab/assets/rigid_object_collection/base_rigid_object_collection.py new file mode 100644 index 00000000000..bf444bacdf2 --- /dev/null +++ b/source/isaaclab/isaaclab/assets/rigid_object_collection/base_rigid_object_collection.py @@ -0,0 +1,464 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from __future__ import annotations + +import torch +from abc import abstractmethod +from collections.abc import Sequence +from typing import TYPE_CHECKING + +from isaaclab.utils.wrench_composer import WrenchComposer + +from ..asset_base import AssetBase + +if TYPE_CHECKING: + from .rigid_object_collection_cfg import RigidObjectCollectionCfg + from .rigid_object_collection_data import RigidObjectCollectionData + + +class BaseRigidObjectCollection(AssetBase): + """A rigid object collection class. + + This class represents a collection of rigid objects in the simulation, where the state of the + rigid objects can be accessed and modified using a batched ``(env_ids, object_ids)`` API. + + For each rigid body in the collection, the root prim of the asset must have the `USD RigidBodyAPI`_ + applied to it. This API is used to define the simulation properties of the rigid bodies. On playing the + simulation, the physics engine will automatically register the rigid bodies and create a corresponding + rigid body handle. This handle can be accessed using the :attr:`root_physx_view` attribute. + + Rigid objects in the collection are uniquely identified via the key of the dictionary + :attr:`~isaaclab.assets.RigidObjectCollectionCfg.rigid_objects` in the + :class:`~isaaclab.assets.RigidObjectCollectionCfg` configuration class. + This differs from the :class:`~isaaclab.assets.RigidObject` class, where a rigid object is identified by + the name of the Xform where the `USD RigidBodyAPI`_ is applied. This would not be possible for the rigid + object collection since the :attr:`~isaaclab.assets.RigidObjectCollectionCfg.rigid_objects` dictionary + could contain the same rigid object multiple times, leading to ambiguity. + + .. _`USD RigidBodyAPI`: https://openusd.org/dev/api/class_usd_physics_rigid_body_a_p_i.html + """ + + cfg: RigidObjectCollectionCfg + """Configuration instance for the rigid object.""" + + __backend_name__: str = "base" + """The name of the backend for the rigid object.""" + + def __init__(self, cfg: RigidObjectCollectionCfg): + """Initialize the rigid object. + + Args: + cfg: A configuration instance. + """ + super().__init__(cfg) + + """ + Properties + """ + + @property + @abstractmethod + def data(self) -> RigidObjectCollectionData: + raise NotImplementedError() + + @property + @abstractmethod + def num_instances(self) -> int: + raise NotImplementedError() + + @property + @abstractmethod + def num_bodies(self) -> int: + """Number of bodies in the rigid object collection.""" + raise NotImplementedError() + + @property + @abstractmethod + def body_names(self) -> list[str]: + """Ordered names of bodies in the rigid object collection.""" + raise NotImplementedError() + + @property + @abstractmethod + def root_view(self): + """Root view for the rigid object collection. + + Note: + Use this view with caution. It requires handling of tensors in a specific way. + """ + raise NotImplementedError() + + @property + @abstractmethod + def instantaneous_wrench_composer(self) -> WrenchComposer: + """Instantaneous wrench composer for the rigid object collection.""" + raise NotImplementedError() + + @property + @abstractmethod + def permanent_wrench_composer(self) -> WrenchComposer: + """Permanent wrench composer for the rigid object collection.""" + raise NotImplementedError() + + """ + Operations. + """ + + @abstractmethod + def reset(self, env_ids: Sequence[int] | None = None, object_ids: slice | torch.Tensor | None = None) -> None: + """Resets all internal buffers of selected environments and objects. + + Args: + env_ids: Environment indices. If None, then all indices are used. + object_ids: Object indices. If None, then all indices are used. + """ + raise NotImplementedError() + + @abstractmethod + def write_data_to_sim(self) -> None: + """Write external wrench to the simulation. + + Note: + We write external wrench to the simulation here since this function is called before the simulation step. + This ensures that the external wrench is applied at every simulation step. + """ + raise NotImplementedError() + + @abstractmethod + def update(self, dt: float) -> None: + """Updates the simulation data. + + Args: + dt: The time step size in seconds. + """ + raise NotImplementedError() + + """ + Operations - Finders. + """ + + @abstractmethod + def find_bodies( + self, name_keys: str | Sequence[str], preserve_order: bool = False + ) -> tuple[torch.Tensor, list[str], list[int]]: + """Find bodies in the rigid body collection based on the name keys. + + Please check the :meth:`isaaclab.utils.string_utils.resolve_matching_names` function for more + information on the name matching. + + Args: + name_keys: A regular expression or a list of regular expressions to match the body names. + preserve_order: Whether to preserve the order of the name keys in the output. Defaults to False. + + Returns: + A tuple of lists containing the body mask, names and indices. + """ + raise NotImplementedError() + + """ + Operations - Write to simulation. + """ + + @abstractmethod + def write_body_state_to_sim( + self, + body_states: torch.Tensor, + env_ids: Sequence[int] | None = None, + body_ids: slice | torch.Tensor | None = None, + ) -> None: + """Set the bodies state over selected environment indices into the simulation. + + The body state comprises of the cartesian position, quaternion orientation in (x, y, z, w), and linear + and angular velocity. All the quantities are in the simulation frame. Shape is (len(env_ids), len(body_ids), 13). + + Args: + body_states: Body states in simulation frame. Shape is (len(env_ids), len(body_ids), 13). + env_ids: Environment indices. If None, then all indices are used. + body_ids: Body indices. If None, then all indices are used. + """ + raise NotImplementedError() + + @abstractmethod + def write_body_com_state_to_sim( + self, + body_states: torch.Tensor, + env_ids: Sequence[int] | None = None, + body_ids: slice | torch.Tensor | None = None, + ) -> None: + """Set the body center of mass state over selected environment and body indices into the simulation. + + The body state comprises of the cartesian position, quaternion orientation in (x, y, z, w), and linear + and angular velocity. All the quantities are in the simulation frame. + + Args: + body_states: Body states in simulation frame. Shape is (len(env_ids), len(body_ids), 13). + env_ids: Environment indices. If None, then all indices are used. + body_ids: Body indices. If None, then all indices are used. + """ + raise NotImplementedError() + + @abstractmethod + def write_body_link_state_to_sim( + self, + body_states: torch.Tensor, + env_ids: Sequence[int] | None = None, + body_ids: slice | torch.Tensor | None = None, + ) -> None: + """Set the body link state over selected environment and body indices into the simulation. + + The body state comprises of the cartesian position, quaternion orientation in (x, y, z, w), and linear + and angular velocity. All the quantities are in the simulation frame. + + Args: + body_states: Body states in simulation frame. Shape is (len(env_ids), len(body_ids), 13). + env_ids: Environment indices. If None, then all indices are used. + body_ids: Body indices. If None, then all indices are used. + """ + raise NotImplementedError() + + @abstractmethod + def write_body_pose_to_sim( + self, + body_poses: torch.Tensor, + env_ids: Sequence[int] | None = None, + body_ids: slice | torch.Tensor | None = None, + ) -> None: + """Set the body poses over selected environment and body indices into the simulation. + + The body pose comprises of the cartesian position and quaternion orientation in (x, y, z, w). + + Args: + body_poses: Body poses in simulation frame. Shape is (len(env_ids), len(body_ids), 7). + env_ids: Environment indices. If None, then all indices are used. + body_ids: Body indices. If None, then all indices are used. + """ + raise NotImplementedError() + + @abstractmethod + def write_body_link_pose_to_sim( + self, + body_poses: torch.Tensor, + env_ids: Sequence[int] | None = None, + body_ids: slice | torch.Tensor | None = None, + ) -> None: + """Set the body link pose over selected environment and body indices into the simulation. + + The body link pose comprises of the cartesian position and quaternion orientation in (x, y, z, w). + + Args: + body_poses: Body link poses in simulation frame. Shape is (len(env_ids), len(body_ids), 7). + env_ids: Environment indices. If None, then all indices are used. + body_ids: Body indices. If None, then all indices are used. + """ + raise NotImplementedError() + + @abstractmethod + def write_body_com_pose_to_sim( + self, + body_poses: torch.Tensor, + env_ids: Sequence[int] | None = None, + body_ids: slice | torch.Tensor | None = None, + ) -> None: + """Set the body center of mass pose over selected environment and body indices into the simulation. + + The body center of mass pose comprises of the cartesian position and quaternion orientation in (x, y, z, w). + The orientation is the orientation of the principle axes of inertia. + + Args: + body_poses: Body center of mass poses in simulation frame. Shape is (len(env_ids), len(body_ids), 7). + env_ids: Environment indices. If None, then all indices are used. + body_ids: Body indices. If None, then all indices are used. + """ + raise NotImplementedError() + + @abstractmethod + def write_body_velocity_to_sim( + self, + body_velocities: torch.Tensor, + env_ids: Sequence[int] | None = None, + body_ids: slice | torch.Tensor | None = None, + ) -> None: + """Set the body velocity over selected environment and body indices into the simulation. + + The velocity comprises linear velocity (x, y, z) and angular velocity (x, y, z) in that order. + ..note:: This sets the velocity of the body's center of mass rather than the body's frame. + + Args: + body_velocities: Body velocities in simulation frame. Shape is (len(env_ids), len(body_ids), 6). + env_ids: Environment indices. If None, then all indices are used. + body_ids: Body indices. If None, then all indices are used. + """ + raise NotImplementedError() + + @abstractmethod + def write_body_com_velocity_to_sim( + self, + body_velocities: torch.Tensor, + env_ids: Sequence[int] | None = None, + body_ids: slice | torch.Tensor | None = None, + ) -> None: + """Set the body center of mass velocity over selected environment and body indices into the simulation. + + The velocity comprises linear velocity (x, y, z) and angular velocity (x, y, z) in that order. + ..note:: This sets the velocity of the body's center of mass rather than the body's frame. + + Args: + body_velocities: Body center of mass velocities in simulation frame. Shape is (len(env_ids), len(body_ids), 6). + env_ids: Environment indices. If None, then all indices are used. + body_ids: Body indices. If None, then all indices are used. + """ + raise NotImplementedError() + + @abstractmethod + def write_body_link_velocity_to_sim( + self, + body_velocities: torch.Tensor, + env_ids: Sequence[int] | None = None, + body_ids: slice | torch.Tensor | None = None, + ) -> None: + """Set the body link velocity over selected environment and body indices into the simulation. + + The velocity comprises linear velocity (x, y, z) and angular velocity (x, y, z) in that order. + ..note:: This sets the velocity of the body's frame rather than the body's center of mass. + + Args: + body_velocities: Body link velocities in simulation frame. Shape is (len(env_ids), len(body_ids), 6). + env_ids: Environment indices. If None, then all indices are used. + body_ids: Body indices. If None, then all indices are used. + """ + raise NotImplementedError() + + """ + Operations - Setters. + """ + + @abstractmethod + def set_masses( + self, + masses: torch.Tensor, + body_ids: Sequence[int] | None = None, + env_ids: Sequence[int] | None = None, + ) -> None: + """Set masses of all bodies. + + Args: + masses: Masses of all bodies. Shape is (num_instances, num_bodies). + body_ids: The body indices to set the masses for. Defaults to None (all bodies). + env_ids: The environment indices to set the masses for. Defaults to None (all environments). + """ + raise NotImplementedError() + + @abstractmethod + def set_coms( + self, + coms: torch.Tensor, + body_ids: Sequence[int] | None = None, + env_ids: Sequence[int] | None = None, + ) -> None: + """Set center of mass positions of all bodies. + + Args: + coms: Center of mass positions of all bodies. Shape is (num_instances, num_bodies, 3). + body_ids: The body indices to set the center of mass positions for. Defaults to None (all bodies). + env_ids: The environment indices to set the center of mass positions for. Defaults to None (all environments). + """ + raise NotImplementedError() + + @abstractmethod + def set_inertias( + self, + inertias: torch.Tensor, + body_ids: Sequence[int] | None = None, + env_ids: Sequence[int] | None = None, + ) -> None: + """Set inertias of all bodies. + + Args: + inertias: Inertias of all bodies. Shape is (num_instances, num_bodies, 3, 3). + body_ids: The body indices to set the inertias for. Defaults to None (all bodies). + env_ids: The environment indices to set the inertias for. Defaults to None (all environments). + """ + raise NotImplementedError() + + @abstractmethod + def set_external_force_and_torque( + self, + forces: torch.Tensor, + torques: torch.Tensor, + positions: torch.Tensor | None = None, + body_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | None = None, + is_global: bool = False, + ) -> None: + """Set external force and torque to apply on the rigid object collection's bodies in their local frame. + + For many applications, we want to keep the applied external force on rigid bodies constant over a period of + time (for instance, during the policy control). This function allows us to store the external force and torque + into buffers which are then applied to the simulation at every step. Optionally, set the position to apply the + external wrench at (in the local link frame of the bodies). + + .. caution:: + If the function is called with empty forces and torques, then this function disables the application + of external wrench to the simulation. + + .. code-block:: python + + # example of disabling external wrench + asset.set_external_force_and_torque(forces=torch.zeros(0, 3), torques=torch.zeros(0, 3)) + + .. caution:: + If the function is called consecutively with and with different values for ``is_global``, then the + all the external wrenches will be applied in the frame specified by the last call. + + .. code-block:: python + + # example of setting external wrench in the global frame + asset.set_external_force_and_torque(forces=torch.ones(1, 1, 3), env_ids=[0], is_global=True) + # example of setting external wrench in the link frame + asset.set_external_force_and_torque(forces=torch.ones(1, 1, 3), env_ids=[1], is_global=False) + # Both environments will have the external wrenches applied in the link frame + + .. note:: + This function does not apply the external wrench to the simulation. It only fills the buffers with + the desired values. To apply the external wrench, call the :meth:`write_data_to_sim` function + right before the simulation step. + + Args: + forces: External forces in bodies' local frame. Shape is (len(env_ids), len(body_ids), 3). + torques: External torques in bodies' local frame. Shape is (len(env_ids), len(body_ids), 3). + positions: External wrench positions in bodies' local frame. Shape is (len(env_ids), len(body_ids), 3). Defaults to None. + body_ids: Body indices to apply external wrench to. Defaults to None (all bodies). + env_ids: Environment indices to apply external wrench to. Defaults to None (all instances). + is_global: Whether to apply the external wrench in the global frame. Defaults to False. If set to False, + the external wrench is applied in the link frame of the bodies. + """ + raise NotImplementedError() + + """ + Internal helper. + """ + + @abstractmethod + def _initialize_impl(self) -> None: + raise NotImplementedError() + + @abstractmethod + def _create_buffers(self) -> None: + raise NotImplementedError() + + @abstractmethod + def _process_cfg(self) -> None: + """Post processing of configuration parameters.""" + raise NotImplementedError() + + """ + Internal simulation callbacks. + """ + + @abstractmethod + def _invalidate_initialize_callback(self, event) -> None: + """Invalidates the scene elements.""" + super()._invalidate_initialize_callback(event) diff --git a/source/isaaclab/isaaclab/assets/rigid_object_collection/base_rigid_object_collection_data.py b/source/isaaclab/isaaclab/assets/rigid_object_collection/base_rigid_object_collection_data.py new file mode 100644 index 00000000000..a1464ebf17d --- /dev/null +++ b/source/isaaclab/isaaclab/assets/rigid_object_collection/base_rigid_object_collection_data.py @@ -0,0 +1,376 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from abc import ABC, abstractmethod + +import torch + + +class BaseRigidObjectCollectionData(ABC): + """Data container for a rigid object collection. + + This class contains the data for a rigid object collection in the simulation. The data includes the state of + all the bodies in the collection. The data is stored in the simulation world frame unless otherwise specified. + The data is in the order ``(num_instances, num_objects, data_size)``, where data_size is the size of the data. + + For a rigid body, there are two frames of reference that are used: + + - Actor frame: The frame of reference of the rigid body prim. This typically corresponds to the Xform prim + with the rigid body schema. + - Center of mass frame: The frame of reference of the center of mass of the rigid body. + + Depending on the settings of the simulation, the actor frame and the center of mass frame may be the same. + This needs to be taken into account when interpreting the data. + + The data is lazily updated, meaning that the data is only updated when it is accessed. This is useful + when the data is expensive to compute or retrieve. The data is updated when the timestamp of the buffer + is older than the current simulation timestamp. The timestamp is updated whenever the data is updated. + """ + + def __init__(self, root_view, num_objects: int, device: str): + """Initializes the rigid object data. + + Args: + root_view: The root rigid body collection view. + num_objects: The number of objects in the collection. + device: The device used for processing. + """ + # Set the parameters + self.device = device + + @abstractmethod + def update(self, dt: float) -> None: + """Updates the data for the rigid object. + + Args: + dt: The time step for the update. This must be a positive value. + """ + raise NotImplementedError() + + ## + # Names. + ## + + body_names: list[str] = None + """Body names in the order parsed by the simulation view.""" + + ## + # Defaults. + ## + + @property + @abstractmethod + def default_root_pose(self) -> torch.Tensor: + """Default root pose ``[pos, quat]`` in local environment frame. Shape is (num_instances, 7). + + The position and quaternion are of the rigid body's actor frame. + """ + raise NotImplementedError() + + @property + @abstractmethod + def default_root_vel(self) -> torch.Tensor: + """Default root velocity ``[lin_vel, ang_vel]`` in local environment frame. Shape is (num_instances, 6). + + The linear and angular velocities are of the rigid body's center of mass frame. + """ + raise NotImplementedError() + + @property + @abstractmethod + def default_root_state(self) -> torch.Tensor: + """Default root state ``[pos, quat, lin_vel, ang_vel]`` in local environment frame. Shape is (num_instances, 13). + + The position and quaternion are of the rigid body's actor frame. Meanwhile, the linear and angular velocities are + of the center of mass frame. + """ + raise NotImplementedError() + + ## + # Body state properties. + ## + + @property + @abstractmethod + def body_link_pose_w(self) -> torch.Tensor: + """Body link pose ``[pos, quat]`` in simulation world frame. Shape is (num_instances, 1, 7). + + This quantity is the pose of the actor frame of the rigid body relative to the world. + The orientation is provided in (x, y, z, w) format. + """ + raise NotImplementedError() + + @property + @abstractmethod + def body_link_vel_w(self) -> torch.Tensor: + """Body link velocity ``[lin_vel, ang_vel]`` in simulation world frame. Shape is (num_instances, 1, 6). + + This quantity contains the linear and angular velocities of the actor frame of the root + rigid body relative to the world. + """ + raise NotImplementedError() + + @property + @abstractmethod + def body_com_pose_w(self) -> torch.Tensor: + """Body center of mass pose ``[pos, quat]`` in simulation world frame. Shape is (num_instances, 1, 7). + + This quantity is the pose of the center of mass frame of the rigid body relative to the world. + The orientation is provided in (x, y, z, w) format. + """ + raise NotImplementedError() + + @property + @abstractmethod + def body_com_vel_w(self) -> torch.Tensor: + """Body center of mass velocity ``[lin_vel, ang_vel]`` in simulation world frame. + Shape is (num_instances, 1, 6). + + This quantity contains the linear and angular velocities of the root rigid body's center of mass frame + relative to the world. + """ + raise NotImplementedError() + + @property + @abstractmethod + def body_state_w(self) -> torch.Tensor: + """State of all bodies `[pos, quat, lin_vel, ang_vel]` in simulation world frame. + Shape is (num_instances, 1, 13). + + The position and orientation are of the rigid bodies' actor frame. Meanwhile, the linear and angular + velocities are of the rigid bodies' center of mass frame. + """ + raise NotImplementedError() + + @property + @abstractmethod + def body_link_state_w(self) -> torch.Tensor: + """State of all bodies ``[pos, quat, lin_vel, ang_vel]`` in simulation world frame. + Shape is (num_instances, 1, 13). + + The position, quaternion, and linear/angular velocity are of the body's link frame relative to the world. + The orientation is provided in (x, y, z, w) format. + """ + raise NotImplementedError() + + @property + @abstractmethod + def body_com_state_w(self) -> torch.Tensor: + """State of all bodies ``[pos, quat, lin_vel, ang_vel]`` in simulation world frame. + Shape is (num_instances, num_bodies, 13). + + The position, quaternion, and linear/angular velocity are of the body's center of mass frame relative to the + world. Center of mass frame is assumed to be the same orientation as the link rather than the orientation of the + principle inertia. The orientation is provided in (x, y, z, w) format. + """ + raise NotImplementedError() + + @property + @abstractmethod + def body_com_acc_w(self) -> torch.Tensor: + """Acceleration of all bodies ``[lin_acc, ang_acc]`` in the simulation world frame. + Shape is (num_instances, 1, 6). + + This quantity is the acceleration of the rigid bodies' center of mass frame relative to the world. + """ + raise NotImplementedError() + + @property + @abstractmethod + def body_com_pose_b(self) -> torch.Tensor: + """Center of mass pose ``[pos, quat]`` of all bodies in their respective body's link frames. + Shape is (num_instances, 1, 7). + + This quantity is the pose of the center of mass frame of the rigid body relative to the body's link frame. + The orientation is provided in (x, y, z, w) format. + """ + raise NotImplementedError() + + @property + @abstractmethod + def body_mass(self) -> torch.Tensor: + """Mass of all bodies in the simulation world frame. Shape is (num_instances, 1, 1).""" + raise NotImplementedError() + + @property + @abstractmethod + def body_inertia(self) -> torch.Tensor: + """Inertia of all bodies in the simulation world frame. Shape is (num_instances, 1, 3, 3).""" + raise NotImplementedError() + + ## + # Derived Properties. + ## + + @property + @abstractmethod + def projected_gravity_b(self) -> torch.Tensor: + """Projection of the gravity direction on base frame. Shape is (num_instances, 3).""" + raise NotImplementedError() + + @property + @abstractmethod + def heading_w(self) -> torch.Tensor: + """Yaw heading of the base frame (in radians). Shape is (num_instances,). + + Note: + This quantity is computed by assuming that the forward-direction of the base + frame is along x-direction, i.e. :math:`(1, 0, 0)`. + """ + raise NotImplementedError() + + @property + @abstractmethod + def body_link_lin_vel_b(self) -> torch.Tensor: + """Root link linear velocity in base frame. Shape is (num_instances, 3). + + This quantity is the linear velocity of the actor frame of the root rigid body frame with respect to the + rigid body's actor frame. + """ + raise NotImplementedError() + + @property + @abstractmethod + def body_link_ang_vel_b(self) -> torch.Tensor: + """Root link angular velocity in base world frame. Shape is (num_instances, 3). + + This quantity is the angular velocity of the actor frame of the root rigid body frame with respect to the + rigid body's actor frame. + """ + raise NotImplementedError() + + @property + @abstractmethod + def body_com_lin_vel_b(self) -> torch.Tensor: + """Root center of mass linear velocity in base frame. Shape is (num_instances, 3). + + This quantity is the linear velocity of the root rigid body's center of mass frame with respect to the + rigid body's actor frame. + """ + raise NotImplementedError() + + @property + @abstractmethod + def body_com_ang_vel_b(self) -> torch.Tensor: + """Root center of mass angular velocity in base world frame. Shape is (num_instances, 3). + + This quantity is the angular velocity of the root rigid body's center of mass frame with respect to the + rigid body's actor frame. + """ + raise NotImplementedError() + + ## + # Sliced properties. + ## + + @property + @abstractmethod + def body_link_pos_w(self) -> torch.Tensor: + """Positions of all bodies in simulation world frame. Shape is (num_instances, 1, 3). + + This quantity is the position of the rigid bodies' actor frame relative to the world. + """ + raise NotImplementedError() + + @property + @abstractmethod + def body_link_quat_w(self) -> torch.Tensor: + """Orientation (x, y, z, w) of all bodies in simulation world frame. Shape is (num_instances, 1, 4). + + This quantity is the orientation of the rigid bodies' actor frame relative to the world. + """ + raise NotImplementedError() + + @property + @abstractmethod + def body_link_lin_vel_w(self) -> torch.Tensor: + """Linear velocity of all bodies in simulation world frame. Shape is (num_instances, 1, 3). + + This quantity is the linear velocity of the rigid bodies' center of mass frame relative to the world. + """ + raise NotImplementedError() + + @property + @abstractmethod + def body_link_ang_vel_w(self) -> torch.Tensor: + """Angular velocity of all bodies in simulation world frame. Shape is (num_instances, 1, 3). + + This quantity is the angular velocity of the rigid bodies' center of mass frame relative to the world. + """ + raise NotImplementedError() + + @property + @abstractmethod + def body_com_pos_w(self) -> torch.Tensor: + """Positions of all bodies in simulation world frame. Shape is (num_instances, 1, 3). + + This quantity is the position of the rigid bodies' actor frame. + """ + raise NotImplementedError() + + @property + @abstractmethod + def body_com_quat_w(self) -> torch.Tensor: + """Orientation (x, y, z, w) of the principle axis of inertia of all bodies in simulation world frame. + + Shape is (num_instances, 1, 4). This quantity is the orientation of the rigid bodies' actor frame. + """ + raise NotImplementedError() + + @property + @abstractmethod + def body_com_lin_vel_w(self) -> torch.Tensor: + """Linear velocity of all bodies in simulation world frame. Shape is (num_instances, 1, 3). + + This quantity is the linear velocity of the rigid bodies' center of mass frame. + """ + raise NotImplementedError() + + @property + @abstractmethod + def body_com_ang_vel_w(self) -> torch.Tensor: + """Angular velocity of all bodies in simulation world frame. Shape is (num_instances, 1, 3). + + This quantity is the angular velocity of the rigid bodies' center of mass frame. + """ + raise NotImplementedError() + + @property + @abstractmethod + def body_com_lin_acc_w(self) -> torch.Tensor: + """Linear acceleration of all bodies in simulation world frame. Shape is (num_instances, 1, 3). + + This quantity is the linear acceleration of the rigid bodies' center of mass frame. + """ + raise NotImplementedError() + + @property + @abstractmethod + def body_com_ang_acc_w(self) -> torch.Tensor: + """Angular acceleration of all bodies in simulation world frame. Shape is (num_instances, 1, 3). + + This quantity is the angular acceleration of the rigid bodies' center of mass frame. + """ + raise NotImplementedError() + + @property + @abstractmethod + def body_com_pos_b(self) -> torch.Tensor: + """Center of mass position of all of the bodies in their respective link frames. + Shape is (num_instances, 1, 3). + + This quantity is the center of mass location relative to its body'slink frame. + """ + raise NotImplementedError() + + @property + @abstractmethod + def body_com_quat_b(self) -> torch.Tensor: + """Orientation (x, y, z, w) of the principle axis of inertia of all of the bodies in their + respective link frames. Shape is (num_instances, 1, 4). + + This quantity is the orientation of the principles axes of inertia relative to its body's link frame. + """ + raise NotImplementedError() \ No newline at end of file diff --git a/source/isaaclab/isaaclab/assets/rigid_object_collection/rigid_object_collection.py b/source/isaaclab/isaaclab/assets/rigid_object_collection/rigid_object_collection.py index b458b15b403..ce8df64a64a 100644 --- a/source/isaaclab/isaaclab/assets/rigid_object_collection/rigid_object_collection.py +++ b/source/isaaclab/isaaclab/assets/rigid_object_collection/rigid_object_collection.py @@ -5,779 +5,26 @@ from __future__ import annotations -import logging -import re -from collections.abc import Sequence from typing import TYPE_CHECKING -import torch -import warp as wp +from isaaclab.utils.backend_utils import FactoryBase -import omni.physics.tensors.impl.api as physx -from isaacsim.core.simulation_manager import SimulationManager -from pxr import UsdPhysics - -import isaaclab.sim as sim_utils -import isaaclab.utils.math as math_utils -import isaaclab.utils.string as string_utils -from isaaclab.utils.wrench_composer import WrenchComposer - -from ..asset_base import AssetBase -from .rigid_object_collection_data import RigidObjectCollectionData +from .base_rigid_object_collection import BaseRigidObjectCollection +from .base_rigid_object_collection_data import BaseRigidObjectCollectionData if TYPE_CHECKING: - from .rigid_object_collection_cfg import RigidObjectCollectionCfg - -# import logger -logger = logging.getLogger(__name__) - - -class RigidObjectCollection(AssetBase): - """A rigid object collection class. - - This class represents a collection of rigid objects in the simulation, where the state of the - rigid objects can be accessed and modified using a batched ``(env_ids, object_ids)`` API. - - For each rigid body in the collection, the root prim of the asset must have the `USD RigidBodyAPI`_ - applied to it. This API is used to define the simulation properties of the rigid bodies. On playing the - simulation, the physics engine will automatically register the rigid bodies and create a corresponding - rigid body handle. This handle can be accessed using the :attr:`root_physx_view` attribute. - - Rigid objects in the collection are uniquely identified via the key of the dictionary - :attr:`~isaaclab.assets.RigidObjectCollectionCfg.rigid_objects` in the - :class:`~isaaclab.assets.RigidObjectCollectionCfg` configuration class. - This differs from the :class:`~isaaclab.assets.RigidObject` class, where a rigid object is identified by - the name of the Xform where the `USD RigidBodyAPI`_ is applied. This would not be possible for the rigid - object collection since the :attr:`~isaaclab.assets.RigidObjectCollectionCfg.rigid_objects` dictionary - could contain the same rigid object multiple times, leading to ambiguity. - - .. _`USD RigidBodyAPI`: https://openusd.org/dev/api/class_usd_physics_rigid_body_a_p_i.html - """ - - cfg: RigidObjectCollectionCfg - """Configuration instance for the rigid object collection.""" - - def __init__(self, cfg: RigidObjectCollectionCfg): - """Initialize the rigid object collection. - - Args: - cfg: A configuration instance. - """ - # Note: We never call the parent constructor as it tries to call its own spawning which we don't want. - # check that the config is valid - cfg.validate() - # store inputs - self.cfg = cfg.copy() - # flag for whether the asset is initialized - self._is_initialized = False - self._prim_paths = [] - # spawn the rigid objects - for rigid_object_cfg in self.cfg.rigid_objects.values(): - # check if the rigid object path is valid - # note: currently the spawner does not work if there is a regex pattern in the leaf - # For example, if the prim path is "/World/Object_[1,2]" since the spawner will not - # know which prim to spawn. This is a limitation of the spawner and not the asset. - asset_path = rigid_object_cfg.prim_path.split("/")[-1] - asset_path_is_regex = re.match(r"^[a-zA-Z0-9/_]+$", asset_path) is None - # spawn the asset - if rigid_object_cfg.spawn is not None and not asset_path_is_regex: - rigid_object_cfg.spawn.func( - rigid_object_cfg.prim_path, - rigid_object_cfg.spawn, - translation=rigid_object_cfg.init_state.pos, - orientation=rigid_object_cfg.init_state.rot, - ) - # check that spawn was successful - matching_prims = sim_utils.find_matching_prims(rigid_object_cfg.prim_path) - if len(matching_prims) == 0: - raise RuntimeError(f"Could not find prim with path {rigid_object_cfg.prim_path}.") - self._prim_paths.append(rigid_object_cfg.prim_path) - # stores object names - self._object_names_list = [] - - # register various callback functions - self._register_callbacks() - self._debug_vis_handle = None - - """ - Properties - """ - - @property - def data(self) -> RigidObjectCollectionData: - return self._data - - @property - def num_instances(self) -> int: - """Number of instances of the collection.""" - return self.root_physx_view.count // self.num_objects - - @property - def num_objects(self) -> int: - """Number of objects in the collection. - - This corresponds to the distinct number of rigid bodies in the collection. - """ - return len(self.object_names) - - @property - def object_names(self) -> list[str]: - """Ordered names of objects in the rigid object collection.""" - return self._object_names_list - - @property - def root_physx_view(self) -> physx.RigidBodyView: - """Rigid body view for the rigid body collection (PhysX). - - Note: - Use this view with caution. It requires handling of tensors in a specific way. - """ - return self._root_physx_view # type: ignore - - @property - def instantaneous_wrench_composer(self) -> WrenchComposer: - """Instantaneous wrench composer. - - Returns a :class:`~isaaclab.utils.wrench_composer.WrenchComposer` instance. Wrenches added or set to this wrench - composer are only valid for the current simulation step. At the end of the simulation step, the wrenches set - to this object are discarded. This is useful to apply forces that change all the time, things like drag forces - for instance. - """ - return self._instantaneous_wrench_composer - - @property - def permanent_wrench_composer(self) -> WrenchComposer: - """Permanent wrench composer. - - Returns a :class:`~isaaclab.utils.wrench_composer.WrenchComposer` instance. Wrenches added or set to this wrench - composer are persistent and are applied to the simulation at every step. This is useful to apply forces that - are constant over a period of time, things like the thrust of a motor for instance. - """ - return self._permanent_wrench_composer - - """ - Operations. - """ - - def reset(self, env_ids: torch.Tensor | None = None, object_ids: slice | torch.Tensor | None = None): - """Resets all internal buffers of selected environments and objects. - - Args: - env_ids: The indices of the object to reset. Defaults to None (all instances). - object_ids: The indices of the object to reset. Defaults to None (all objects). - """ - # resolve all indices - if env_ids is None: - env_ids = self._ALL_ENV_INDICES - if object_ids is None: - object_ids = self._ALL_OBJ_INDICES - # reset external wrench - self._instantaneous_wrench_composer.reset(env_ids) - self._permanent_wrench_composer.reset(env_ids) - - def write_data_to_sim(self): - """Write external wrench to the simulation. - - Note: - We write external wrench to the simulation here since this function is called before the simulation step. - This ensures that the external wrench is applied at every simulation step. - """ - # write external wrench - if self._instantaneous_wrench_composer.active or self._permanent_wrench_composer.active: - if self._instantaneous_wrench_composer.active: - # Compose instantaneous wrench with permanent wrench - self._instantaneous_wrench_composer.add_forces_and_torques( - forces=self._permanent_wrench_composer.composed_force, - torques=self._permanent_wrench_composer.composed_torque, - body_ids=self._ALL_OBJ_INDICES_WP, - env_ids=self._ALL_ENV_INDICES_WP, - ) - # Apply both instantaneous and permanent wrench to the simulation - self.root_physx_view.apply_forces_and_torques_at_position( - force_data=self.reshape_data_to_view(self._instantaneous_wrench_composer.composed_force_as_torch), - torque_data=self.reshape_data_to_view(self._instantaneous_wrench_composer.composed_torque_as_torch), - position_data=None, - indices=self._env_obj_ids_to_view_ids(self._ALL_ENV_INDICES, self._ALL_OBJ_INDICES), - is_global=False, - ) - else: - # Apply permanent wrench to the simulation - self.root_physx_view.apply_forces_and_torques_at_position( - force_data=self.reshape_data_to_view(self._permanent_wrench_composer.composed_force_as_torch), - torque_data=self.reshape_data_to_view(self._permanent_wrench_composer.composed_torque_as_torch), - position_data=None, - indices=self._env_obj_ids_to_view_ids(self._ALL_ENV_INDICES, self._ALL_OBJ_INDICES), - is_global=False, - ) - self._instantaneous_wrench_composer.reset() - - def update(self, dt: float): - self._data.update(dt) - - """ - Operations - Finders. - """ - - def find_objects( - self, name_keys: str | Sequence[str], preserve_order: bool = False - ) -> tuple[torch.Tensor, list[str]]: - """Find objects in the collection based on the name keys. - - Please check the :meth:`isaaclab.utils.string_utils.resolve_matching_names` function for more - information on the name matching. - - Args: - name_keys: A regular expression or a list of regular expressions to match the object names. - preserve_order: Whether to preserve the order of the name keys in the output. Defaults to False. - - Returns: - A tuple containing the object indices and names. - """ - obj_ids, obj_names = string_utils.resolve_matching_names(name_keys, self.object_names, preserve_order) - return torch.tensor(obj_ids, device=self.device), obj_names - - """ - Operations - Write to simulation. - """ - - def write_object_state_to_sim( - self, - object_state: torch.Tensor, - env_ids: torch.Tensor | None = None, - object_ids: slice | torch.Tensor | None = None, - ): - """Set the object state over selected environment and object indices into the simulation. - - The object state comprises of the cartesian position, quaternion orientation in (w, x, y, z), and linear - and angular velocity. All the quantities are in the simulation frame. - - Args: - object_state: Object state in simulation frame. Shape is (len(env_ids), len(object_ids), 13). - env_ids: Environment indices. If None, then all indices are used. - object_ids: Object indices. If None, then all indices are used. - """ - self.write_object_link_pose_to_sim(object_state[..., :7], env_ids=env_ids, object_ids=object_ids) - self.write_object_com_velocity_to_sim(object_state[..., 7:], env_ids=env_ids, object_ids=object_ids) - - def write_object_com_state_to_sim( - self, - object_state: torch.Tensor, - env_ids: torch.Tensor | None = None, - object_ids: slice | torch.Tensor | None = None, - ): - """Set the object center of mass state over selected environment indices into the simulation. - - The object state comprises of the cartesian position, quaternion orientation in (w, x, y, z), and linear - and angular velocity. All the quantities are in the simulation frame. - - Args: - object_state: Object state in simulation frame. Shape is (len(env_ids), len(object_ids), 13). - env_ids: Environment indices. If None, then all indices are used. - object_ids: Object indices. If None, then all indices are used. - """ - self.write_object_com_pose_to_sim(object_state[..., :7], env_ids=env_ids, object_ids=object_ids) - self.write_object_com_velocity_to_sim(object_state[..., 7:], env_ids=env_ids, object_ids=object_ids) - - def write_object_link_state_to_sim( - self, - object_state: torch.Tensor, - env_ids: torch.Tensor | None = None, - object_ids: slice | torch.Tensor | None = None, - ): - """Set the object link state over selected environment indices into the simulation. - - The object state comprises of the cartesian position, quaternion orientation in (w, x, y, z), and linear - and angular velocity. All the quantities are in the simulation frame. - - Args: - object_state: Object state in simulation frame. Shape is (len(env_ids), len(object_ids), 13). - env_ids: Environment indices. If None, then all indices are used. - object_ids: Object indices. If None, then all indices are used. - """ - self.write_object_link_pose_to_sim(object_state[..., :7], env_ids=env_ids, object_ids=object_ids) - self.write_object_link_velocity_to_sim(object_state[..., 7:], env_ids=env_ids, object_ids=object_ids) - - def write_object_pose_to_sim( - self, - object_pose: torch.Tensor, - env_ids: torch.Tensor | None = None, - object_ids: slice | torch.Tensor | None = None, - ): - """Set the object pose over selected environment and object indices into the simulation. - - The object pose comprises of the cartesian position and quaternion orientation in (w, x, y, z). - - Args: - object_pose: Object poses in simulation frame. Shape is (len(env_ids), len(object_ids), 7). - env_ids: Environment indices. If None, then all indices are used. - object_ids: Object indices. If None, then all indices are used. - """ - self.write_object_link_pose_to_sim(object_pose, env_ids=env_ids, object_ids=object_ids) - - def write_object_link_pose_to_sim( - self, - object_pose: torch.Tensor, - env_ids: torch.Tensor | None = None, - object_ids: slice | torch.Tensor | None = None, - ): - """Set the object pose over selected environment and object indices into the simulation. - - The object pose comprises of the cartesian position and quaternion orientation in (w, x, y, z). - - Args: - object_pose: Object poses in simulation frame. Shape is (len(env_ids), len(object_ids), 7). - env_ids: Environment indices. If None, then all indices are used. - object_ids: Object indices. If None, then all indices are used. - """ - # resolve all indices - # -- env_ids - if env_ids is None: - env_ids = self._ALL_ENV_INDICES - # -- object_ids - if object_ids is None: - object_ids = self._ALL_OBJ_INDICES - - # note: we need to do this here since tensors are not set into simulation until step. - # set into internal buffers - self._data.object_link_pose_w[env_ids[:, None], object_ids] = object_pose.clone() - # update these buffers only if the user is using them. Otherwise this adds to overhead. - if self._data._object_link_state_w.data is not None: - self._data.object_link_state_w[env_ids[:, None], object_ids, :7] = object_pose.clone() - if self._data._object_state_w.data is not None: - self._data.object_state_w[env_ids[:, None], object_ids, :7] = object_pose.clone() - if self._data._object_com_state_w.data is not None: - # get CoM pose in link frame - com_pos_b = self.data.object_com_pos_b[env_ids[:, None], object_ids] - com_quat_b = self.data.object_com_quat_b[env_ids[:, None], object_ids] - com_pos, com_quat = math_utils.combine_frame_transforms( - object_pose[..., :3], - object_pose[..., 3:7], - com_pos_b, - com_quat_b, - ) - self._data.object_com_state_w[env_ids[:, None], object_ids, :3] = com_pos - self._data.object_com_state_w[env_ids[:, None], object_ids, 3:7] = com_quat - - # convert the quaternion from wxyz to xyzw - poses_xyzw = self._data.object_link_pose_w.clone() - poses_xyzw[..., 3:] = math_utils.convert_quat(poses_xyzw[..., 3:], to="xyzw") - - # set into simulation - view_ids = self._env_obj_ids_to_view_ids(env_ids, object_ids) - self.root_physx_view.set_transforms(self.reshape_data_to_view(poses_xyzw), indices=view_ids) - - def write_object_com_pose_to_sim( - self, - object_pose: torch.Tensor, - env_ids: torch.Tensor | None = None, - object_ids: slice | torch.Tensor | None = None, - ): - """Set the object center of mass pose over selected environment indices into the simulation. - - The object pose comprises of the cartesian position and quaternion orientation in (w, x, y, z). - The orientation is the orientation of the principle axes of inertia. - - Args: - object_pose: Object poses in simulation frame. Shape is (len(env_ids), len(object_ids), 7). - env_ids: Environment indices. If None, then all indices are used. - object_ids: Object indices. If None, then all indices are used. - """ - # resolve all indices - # -- env_ids - if env_ids is None: - env_ids = self._ALL_ENV_INDICES - # -- object_ids - if object_ids is None: - object_ids = self._ALL_OBJ_INDICES - - # set into internal buffers - self._data.object_com_pose_w[env_ids[:, None], object_ids] = object_pose.clone() - # update these buffers only if the user is using them. Otherwise this adds to overhead. - if self._data._object_com_state_w.data is not None: - self._data.object_com_state_w[env_ids[:, None], object_ids, :7] = object_pose.clone() - - # get CoM pose in link frame - com_pos_b = self.data.object_com_pos_b[env_ids[:, None], object_ids] - com_quat_b = self.data.object_com_quat_b[env_ids[:, None], object_ids] - # transform input CoM pose to link frame - object_link_pos, object_link_quat = math_utils.combine_frame_transforms( - object_pose[..., :3], - object_pose[..., 3:7], - math_utils.quat_apply(math_utils.quat_inv(com_quat_b), -com_pos_b), - math_utils.quat_inv(com_quat_b), - ) - - # write transformed pose in link frame to sim - object_link_pose = torch.cat((object_link_pos, object_link_quat), dim=-1) - self.write_object_link_pose_to_sim(object_link_pose, env_ids=env_ids, object_ids=object_ids) - - def write_object_velocity_to_sim( - self, - object_velocity: torch.Tensor, - env_ids: torch.Tensor | None = None, - object_ids: slice | torch.Tensor | None = None, - ): - """Set the object velocity over selected environment and object indices into the simulation. - - Args: - object_velocity: Object velocities in simulation frame. Shape is (len(env_ids), len(object_ids), 6). - env_ids: Environment indices. If None, then all indices are used. - object_ids: Object indices. If None, then all indices are used. - """ - self.write_object_com_velocity_to_sim(object_velocity, env_ids=env_ids, object_ids=object_ids) - - def write_object_com_velocity_to_sim( - self, - object_velocity: torch.Tensor, - env_ids: torch.Tensor | None = None, - object_ids: slice | torch.Tensor | None = None, - ): - """Set the object center of mass velocity over selected environment and object indices into the simulation. - - Args: - object_velocity: Object velocities in simulation frame. Shape is (len(env_ids), len(object_ids), 6). - env_ids: Environment indices. If None, then all indices are used. - object_ids: Object indices. If None, then all indices are used. - """ - # resolve all indices - # -- env_ids - if env_ids is None: - env_ids = self._ALL_ENV_INDICES - # -- object_ids - if object_ids is None: - object_ids = self._ALL_OBJ_INDICES - - # note: we need to do this here since tensors are not set into simulation until step. - # set into internal buffers - self._data.object_com_vel_w[env_ids[:, None], object_ids] = object_velocity.clone() - # update these buffers only if the user is using them. Otherwise this adds to overhead. - if self._data._object_com_state_w.data is not None: - self._data.object_com_state_w[env_ids[:, None], object_ids, 7:] = object_velocity.clone() - if self._data._object_state_w.data is not None: - self._data.object_state_w[env_ids[:, None], object_ids, 7:] = object_velocity.clone() - if self._data._object_link_state_w.data is not None: - self._data.object_link_state_w[env_ids[:, None], object_ids, 7:] = object_velocity.clone() - # make the acceleration zero to prevent reporting old values - self._data.object_com_acc_w[env_ids[:, None], object_ids] = 0.0 - - # set into simulation - view_ids = self._env_obj_ids_to_view_ids(env_ids, object_ids) - self.root_physx_view.set_velocities(self.reshape_data_to_view(self._data.object_com_vel_w), indices=view_ids) - - def write_object_link_velocity_to_sim( - self, - object_velocity: torch.Tensor, - env_ids: torch.Tensor | None = None, - object_ids: slice | torch.Tensor | None = None, - ): - """Set the object link velocity over selected environment indices into the simulation. - - The velocity comprises linear velocity (x, y, z) and angular velocity (x, y, z) in that order. - NOTE: This sets the velocity of the object's frame rather than the objects center of mass. - - Args: - object_velocity: Object velocities in simulation frame. Shape is (len(env_ids), len(object_ids), 6). - env_ids: Environment indices. If None, then all indices are used. - object_ids: Object indices. If None, then all indices are used. - """ - # resolve all indices - # -- env_ids - if env_ids is None: - env_ids = self._ALL_ENV_INDICES - # -- object_ids - if object_ids is None: - object_ids = self._ALL_OBJ_INDICES - - # set into internal buffers - self._data.object_link_vel_w[env_ids[:, None], object_ids] = object_velocity.clone() - # update these buffers only if the user is using them. Otherwise this adds to overhead. - if self._data._object_link_state_w.data is not None: - self._data.object_link_state_w[env_ids[:, None], object_ids, 7:] = object_velocity.clone() - - # get CoM pose in link frame - quat = self.data.object_link_quat_w[env_ids[:, None], object_ids] - com_pos_b = self.data.object_com_pos_b[env_ids[:, None], object_ids] - # transform input velocity to center of mass frame - object_com_velocity = object_velocity.clone() - object_com_velocity[..., :3] += torch.linalg.cross( - object_com_velocity[..., 3:], math_utils.quat_apply(quat, com_pos_b), dim=-1 - ) - - # write center of mass velocity to sim - self.write_object_com_velocity_to_sim(object_com_velocity, env_ids=env_ids, object_ids=object_ids) - - """ - Operations - Setters. - """ - - def set_external_force_and_torque( - self, - forces: torch.Tensor, - torques: torch.Tensor, - positions: torch.Tensor | None = None, - object_ids: slice | torch.Tensor | None = None, - env_ids: torch.Tensor | None = None, - is_global: bool = False, - ): - """Set external force and torque to apply on the objects' bodies in their local frame. - - For many applications, we want to keep the applied external force on rigid bodies constant over a period of - time (for instance, during the policy control). This function allows us to store the external force and torque - into buffers which are then applied to the simulation at every step. - - .. caution:: - If the function is called with empty forces and torques, then this function disables the application - of external wrench to the simulation. - - .. code-block:: python - - # example of disabling external wrench - asset.set_external_force_and_torque(forces=torch.zeros(0, 0, 3), torques=torch.zeros(0, 0, 3)) - - .. note:: - This function does not apply the external wrench to the simulation. It only fills the buffers with - the desired values. To apply the external wrench, call the :meth:`write_data_to_sim` function - right before the simulation step. - - Args: - forces: External forces in bodies' local frame. Shape is (len(env_ids), len(object_ids), 3). - torques: External torques in bodies' local frame. Shape is (len(env_ids), len(object_ids), 3). - positions: External wrench positions in bodies' local frame. Shape is (len(env_ids), len(object_ids), 3). - object_ids: Object indices to apply external wrench to. Defaults to None (all objects). - env_ids: Environment indices to apply external wrench to. Defaults to None (all instances). - is_global: Whether to apply the external wrench in the global frame. Defaults to False. If set to False, - the external wrench is applied in the link frame of the bodies. - """ - logger.warning( - "The function 'set_external_force_and_torque' will be deprecated in a future release. Please" - " use 'permanent_wrench_composer.set_forces_and_torques' instead." - ) - - if forces is None and torques is None: - logger.warning("No forces or torques provided. No permanent external wrench will be applied.") - - # resolve all indices - # -- env_ids - if env_ids is None: - env_ids = self._ALL_ENV_INDICES_WP - elif not isinstance(env_ids, torch.Tensor): - env_ids = wp.array(env_ids, dtype=wp.int32, device=self.device) - else: - env_ids = wp.from_torch(env_ids.to(torch.int32), dtype=wp.int32) - # -- object_ids - if object_ids is None: - object_ids = self._ALL_OBJ_INDICES_WP - elif isinstance(object_ids, slice): - object_ids = wp.from_torch( - torch.arange(self.num_objects, dtype=torch.int32, device=self.device)[object_ids], dtype=wp.int32 - ) - elif not isinstance(object_ids, torch.Tensor): - object_ids = wp.array(object_ids, dtype=wp.int32, device=self.device) - else: - object_ids = wp.from_torch(object_ids.to(torch.int32), dtype=wp.int32) - - # Write to wrench composer - self._permanent_wrench_composer.set_forces_and_torques( - forces=wp.from_torch(forces, dtype=wp.vec3f) if forces is not None else None, - torques=wp.from_torch(torques, dtype=wp.vec3f) if torques is not None else None, - positions=wp.from_torch(positions, dtype=wp.vec3f) if positions is not None else None, - body_ids=object_ids, - env_ids=env_ids, - is_global=is_global, - ) - - """ - Helper functions. - """ - - def reshape_view_to_data(self, data: torch.Tensor) -> torch.Tensor: - """Reshapes and arranges the data coming from the :attr:`root_physx_view` to - (num_instances, num_objects, data_dim). - - Args: - data: The data coming from the :attr:`root_physx_view`. Shape is (num_instances * num_objects, data_dim). - - Returns: - The reshaped data. Shape is (num_instances, num_objects, data_dim). - """ - return torch.einsum("ijk -> jik", data.reshape(self.num_objects, self.num_instances, -1)) - - def reshape_data_to_view(self, data: torch.Tensor) -> torch.Tensor: - """Reshapes and arranges the data to the be consistent with data from the :attr:`root_physx_view`. - - Args: - data: The data to be reshaped. Shape is (num_instances, num_objects, data_dim). - - Returns: - The reshaped data. Shape is (num_instances * num_objects, data_dim). - """ - return torch.einsum("ijk -> jik", data).reshape(self.num_objects * self.num_instances, *data.shape[2:]) - - """ - Internal helper. - """ - - def _initialize_impl(self): - # clear object names list to prevent double counting on re-initialization - self._object_names_list.clear() - # obtain global simulation view - self._physics_sim_view = SimulationManager.get_physics_sim_view() - root_prim_path_exprs = [] - for name, rigid_object_cfg in self.cfg.rigid_objects.items(): - # obtain the first prim in the regex expression (all others are assumed to be a copy of this) - template_prim = sim_utils.find_first_matching_prim(rigid_object_cfg.prim_path) - if template_prim is None: - raise RuntimeError(f"Failed to find prim for expression: '{rigid_object_cfg.prim_path}'.") - template_prim_path = template_prim.GetPath().pathString - - # find rigid root prims - root_prims = sim_utils.get_all_matching_child_prims( - template_prim_path, - predicate=lambda prim: prim.HasAPI(UsdPhysics.RigidBodyAPI), - traverse_instance_prims=False, - ) - if len(root_prims) == 0: - raise RuntimeError( - f"Failed to find a rigid body when resolving '{rigid_object_cfg.prim_path}'." - " Please ensure that the prim has 'USD RigidBodyAPI' applied." - ) - if len(root_prims) > 1: - raise RuntimeError( - f"Failed to find a single rigid body when resolving '{rigid_object_cfg.prim_path}'." - f" Found multiple '{root_prims}' under '{template_prim_path}'." - " Please ensure that there is only one rigid body in the prim path tree." - ) - - # check that no rigid object has an articulation root API, which decreases simulation performance - articulation_prims = sim_utils.get_all_matching_child_prims( - template_prim_path, - predicate=lambda prim: prim.HasAPI(UsdPhysics.ArticulationRootAPI), - traverse_instance_prims=False, - ) - if len(articulation_prims) != 0: - if articulation_prims[0].GetAttribute("physxArticulation:articulationEnabled").Get(): - raise RuntimeError( - f"Found an articulation root when resolving '{rigid_object_cfg.prim_path}' in the rigid object" - f" collection. These are located at: '{articulation_prims}' under '{template_prim_path}'." - " Please disable the articulation root in the USD or from code by setting the parameter" - " 'ArticulationRootPropertiesCfg.articulation_enabled' to False in the spawn configuration." - ) - - # resolve root prim back into regex expression - root_prim_path = root_prims[0].GetPath().pathString - root_prim_path_expr = rigid_object_cfg.prim_path + root_prim_path[len(template_prim_path) :] - root_prim_path_exprs.append(root_prim_path_expr.replace(".*", "*")) - - self._object_names_list.append(name) - - # -- object view - self._root_physx_view = self._physics_sim_view.create_rigid_body_view(root_prim_path_exprs) - - # check if the rigid body was created - if self._root_physx_view._backend is None: - raise RuntimeError("Failed to create rigid body collection. Please check PhysX logs.") - - # log information about the rigid body - logger.info(f"Number of instances: {self.num_instances}") - logger.info(f"Number of distinct objects: {self.num_objects}") - logger.info(f"Object names: {self.object_names}") - - # container for data access - self._data = RigidObjectCollectionData(self.root_physx_view, self.num_objects, self.device) - - # create buffers - self._create_buffers() - # process configuration - self._process_cfg() - # update the rigid body data - self.update(0.0) - - def _create_buffers(self): - """Create buffers for storing data.""" - # constants - self._ALL_ENV_INDICES = torch.arange(self.num_instances, dtype=torch.long, device=self.device) - self._ALL_OBJ_INDICES = torch.arange(self.num_objects, dtype=torch.long, device=self.device) - self._ALL_ENV_INDICES_WP = wp.from_torch(self._ALL_ENV_INDICES.to(torch.int32), dtype=wp.int32) - self._ALL_OBJ_INDICES_WP = wp.from_torch(self._ALL_OBJ_INDICES.to(torch.int32), dtype=wp.int32) - - # external wrench composer - self._instantaneous_wrench_composer = WrenchComposer(self) - self._permanent_wrench_composer = WrenchComposer(self) - - # set information about rigid body into data - self._data.object_names = self.object_names - self._data.default_mass = self.reshape_view_to_data(self.root_physx_view.get_masses().clone()) - self._data.default_inertia = self.reshape_view_to_data(self.root_physx_view.get_inertias().clone()) - - def _process_cfg(self): - """Post processing of configuration parameters.""" - # default state - # -- object state - default_object_states = [] - for rigid_object_cfg in self.cfg.rigid_objects.values(): - default_object_state = ( - tuple(rigid_object_cfg.init_state.pos) - + tuple(rigid_object_cfg.init_state.rot) - + tuple(rigid_object_cfg.init_state.lin_vel) - + tuple(rigid_object_cfg.init_state.ang_vel) - ) - default_object_state = ( - torch.tensor(default_object_state, dtype=torch.float, device=self.device) - .repeat(self.num_instances, 1) - .unsqueeze(1) - ) - default_object_states.append(default_object_state) - # concatenate the default state for each object - default_object_states = torch.cat(default_object_states, dim=1) - self._data.default_object_state = default_object_states - - def _env_obj_ids_to_view_ids( - self, env_ids: torch.Tensor, object_ids: Sequence[int] | slice | torch.Tensor - ) -> torch.Tensor: - """Converts environment and object indices to indices consistent with data from :attr:`root_physx_view`. - - Args: - env_ids: Environment indices. - object_ids: Object indices. - - Returns: - The view indices. - """ - # the order is env_0/object_0, env_0/object_1, env_0/object_..., env_1/object_0, env_1/object_1, ... - # return a flat tensor of indices - if isinstance(object_ids, slice): - object_ids = self._ALL_OBJ_INDICES - elif isinstance(object_ids, Sequence): - object_ids = torch.tensor(object_ids, device=self.device) - return (object_ids.unsqueeze(1) * self.num_instances + env_ids).flatten() - - """ - Internal simulation callbacks. - """ + from isaaclab_physx.assets.rigid_object_collection import RigidObjectCollection as PhysXRigidObjectCollection + from isaaclab_physx.assets.rigid_object_collection import RigidObjectCollectionData as PhysXRigidObjectCollectionData - def _invalidate_initialize_callback(self, event): - """Invalidates the scene elements.""" - # call parent - super()._invalidate_initialize_callback(event) - # set all existing views to None to invalidate them - self._root_physx_view = None - def _on_prim_deletion(self, prim_path: str) -> None: - """Invalidates and deletes the callbacks when the prim is deleted. +class RigidObjectCollection(FactoryBase, BaseRigidObjectCollection): + """Factory for creating rigid object collection instances.""" - Args: - prim_path: The path to the prim that is being deleted. + data: BaseRigidObjectCollectionData | PhysXRigidObjectCollectionData - Note: - This function is called when the prim is deleted. - """ - if prim_path == "/": - self._clear_callbacks() - return - for prim_path_expr in self._prim_paths: - result = re.match( - pattern="^" + "/".join(prim_path_expr.split("/")[: prim_path.count("/") + 1]) + "$", string=prim_path - ) - if result: - self._clear_callbacks() - return + def __new__(cls, *args, **kwargs) -> BaseRigidObjectCollection | PhysXRigidObjectCollection: + """Create a new instance of a rigid object collection based on the backend.""" + # The `FactoryBase` __new__ method will handle the logic and return + # an instance of the correct backend-specific rigid object collection class, + # which is guaranteed to be a subclass of `BaseRigidObjectCollection` by convention. + return super().__new__(cls, *args, **kwargs) diff --git a/source/isaaclab/isaaclab/assets/rigid_object_collection/rigid_object_collection_data.py b/source/isaaclab/isaaclab/assets/rigid_object_collection/rigid_object_collection_data.py index 5156ef729e4..0e15a3eebfa 100644 --- a/source/isaaclab/isaaclab/assets/rigid_object_collection/rigid_object_collection_data.py +++ b/source/isaaclab/isaaclab/assets/rigid_object_collection/rigid_object_collection_data.py @@ -3,513 +3,21 @@ # # SPDX-License-Identifier: BSD-3-Clause -import weakref +from __future__ import annotations -import torch +from typing import TYPE_CHECKING -import omni.physics.tensors.impl.api as physx +from isaaclab.utils.backend_utils import FactoryBase -import isaaclab.utils.math as math_utils -from isaaclab.sim.utils.stage import get_current_stage_id -from isaaclab.utils.buffers import TimestampedBuffer +from .base_rigid_object_collection_data import BaseRigidObjectCollectionData +if TYPE_CHECKING: + from isaaclab_physx.assets.rigid_object_collection.rigid_object_collection_data import RigidObjectCollectionData as PhysXRigidObjectCollectionData -class RigidObjectCollectionData: - """Data container for a rigid object collection. - This class contains the data for a rigid object collection in the simulation. The data includes the state of - all the bodies in the collection. The data is stored in the simulation world frame unless otherwise specified. - The data is in the order ``(num_instances, num_objects, data_size)``, where data_size is the size of the data. +class RigidObjectCollectionData(FactoryBase): + """Factory for creating rigid object collection data instances.""" - For a rigid body, there are two frames of reference that are used: - - - Actor frame: The frame of reference of the rigid body prim. This typically corresponds to the Xform prim - with the rigid body schema. - - Center of mass frame: The frame of reference of the center of mass of the rigid body. - - Depending on the settings of the simulation, the actor frame and the center of mass frame may be the same. - This needs to be taken into account when interpreting the data. - - The data is lazily updated, meaning that the data is only updated when it is accessed. This is useful - when the data is expensive to compute or retrieve. The data is updated when the timestamp of the buffer - is older than the current simulation timestamp. The timestamp is updated whenever the data is updated. - """ - - def __init__(self, root_physx_view: physx.RigidBodyView, num_objects: int, device: str): - """Initializes the data. - - Args: - root_physx_view: The root rigid body view. - num_objects: The number of objects in the collection. - device: The device used for processing. - """ - # Set the parameters - self.device = device - self.num_objects = num_objects - # Set the root rigid body view - # note: this is stored as a weak reference to avoid circular references between the asset class - # and the data container. This is important to avoid memory leaks. - self._root_physx_view: physx.RigidBodyView = weakref.proxy(root_physx_view) - self.num_instances = self._root_physx_view.count // self.num_objects - - # Set initial time stamp - self._sim_timestamp = 0.0 - - # Obtain global physics sim view - stage_id = get_current_stage_id() - physics_sim_view = physx.create_simulation_view("torch", stage_id) - physics_sim_view.set_subspace_roots("/") - gravity = physics_sim_view.get_gravity() - # Convert to direction vector - gravity_dir = torch.tensor((gravity[0], gravity[1], gravity[2]), device=self.device) - gravity_dir = math_utils.normalize(gravity_dir.unsqueeze(0)).squeeze(0) - - # Initialize constants - self.GRAVITY_VEC_W = gravity_dir.repeat(self.num_instances, self.num_objects, 1) - self.FORWARD_VEC_B = torch.tensor((1.0, 0.0, 0.0), device=self.device).repeat( - self.num_instances, self.num_objects, 1 - ) - - # Initialize the lazy buffers. - # -- link frame w.r.t. world frame - self._object_link_pose_w = TimestampedBuffer() - self._object_link_vel_w = TimestampedBuffer() - # -- com frame w.r.t. link frame - self._object_com_pose_b = TimestampedBuffer() - # -- com frame w.r.t. world frame - self._object_com_pose_w = TimestampedBuffer() - self._object_com_vel_w = TimestampedBuffer() - self._object_com_acc_w = TimestampedBuffer() - # -- combined state(these are cached as they concatenate) - self._object_state_w = TimestampedBuffer() - self._object_link_state_w = TimestampedBuffer() - self._object_com_state_w = TimestampedBuffer() - - def update(self, dt: float): - """Updates the data for the rigid object collection. - - Args: - dt: The time step for the update. This must be a positive value. - """ - # update the simulation timestamp - self._sim_timestamp += dt - - ## - # Names. - ## - - object_names: list[str] = None - """Object names in the order parsed by the simulation view.""" - - ## - # Defaults. - ## - - default_object_state: torch.Tensor = None - """Default object state ``[pos, quat, lin_vel, ang_vel]`` in local environment frame. - Shape is (num_instances, num_objects, 13). - - The position and quaternion are of each object's rigid body's actor frame. Meanwhile, the linear and - angular velocities are of the center of mass frame. - """ - - default_mass: torch.Tensor = None - """Default object mass read from the simulation. Shape is (num_instances, num_objects, 1).""" - - default_inertia: torch.Tensor = None - """Default object inertia tensor read from the simulation. Shape is (num_instances, num_objects, 9). - - The inertia tensor should be given with respect to the center of mass, expressed in the rigid body's actor frame. - The values are stored in the order :math:`[I_{xx}, I_{yx}, I_{zx}, I_{xy}, I_{yy}, I_{zy}, I_{xz}, I_{yz}, I_{zz}]`. - However, due to the symmetry of inertia tensors, row- and column-major orders are equivalent. - - This quantity is parsed from the USD schema at the time of initialization. - """ - - ## - # Root state properties. - ## - - @property - def object_link_pose_w(self): - """Object link pose ``[pos, quat]`` in simulation world frame. Shape is (num_instances, num_objects, 7). - - The position and orientation are of the rigid body's actor frame. - """ - if self._object_link_pose_w.timestamp < self._sim_timestamp: - # read data from simulation - pose = self._reshape_view_to_data(self._root_physx_view.get_transforms().clone()) - pose[..., 3:7] = math_utils.convert_quat(pose[..., 3:7], to="wxyz") - # set the buffer data and timestamp - self._object_link_pose_w.data = pose - self._object_link_pose_w.timestamp = self._sim_timestamp - - return self._object_link_pose_w.data - - @property - def object_link_vel_w(self): - """Object link velocity ``[lin_vel, ang_vel]`` in simulation world frame. - Shape is (num_instances, num_objects, 6). - - The linear and angular velocities are of the rigid body's actor frame. - """ - if self._object_link_vel_w.timestamp < self._sim_timestamp: - # read data from simulation - velocity = self.object_com_vel_w.clone() - # adjust linear velocity to link from center of mass - velocity[..., :3] += torch.linalg.cross( - velocity[..., 3:], math_utils.quat_apply(self.object_link_quat_w, -self.object_com_pos_b), dim=-1 - ) - # set the buffer data and timestamp - self._object_link_vel_w.data = velocity - self._object_link_vel_w.timestamp = self._sim_timestamp - - return self._object_link_vel_w.data - - @property - def object_com_pose_w(self): - """Object center of mass pose ``[pos, quat]`` in simulation world frame. - Shape is (num_instances, num_objects, 7). - - The position and orientation are of the rigid body's center of mass frame. - """ - if self._object_com_pose_w.timestamp < self._sim_timestamp: - # adjust pose to center of mass - pos, quat = math_utils.combine_frame_transforms( - self.object_link_pos_w, self.object_link_quat_w, self.object_com_pos_b, self.object_com_quat_b - ) - # set the buffer data and timestamp - self._object_com_pose_w.data = torch.cat((pos, quat), dim=-1) - self._object_com_pose_w.timestamp = self._sim_timestamp - - return self._object_com_pose_w.data - - @property - def object_com_vel_w(self): - """Object center of mass velocity ``[lin_vel, ang_vel]`` in simulation world frame. - Shape is (num_instances, num_objects, 6). - - The linear and angular velocities are of the rigid body's center of mass frame. - """ - if self._object_com_vel_w.timestamp < self._sim_timestamp: - self._object_com_vel_w.data = self._reshape_view_to_data(self._root_physx_view.get_velocities()) - self._object_com_vel_w.timestamp = self._sim_timestamp - - return self._object_com_vel_w.data - - @property - def object_state_w(self): - """Object state ``[pos, quat, lin_vel, ang_vel]`` in simulation world frame. - Shape is (num_instances, num_objects, 13). - - The position and orientation are of the rigid body's actor frame. Meanwhile, the linear and angular - velocities are of the rigid body's center of mass frame. - """ - if self._object_state_w.timestamp < self._sim_timestamp: - self._object_state_w.data = torch.cat((self.object_link_pose_w, self.object_com_vel_w), dim=-1) - self._object_state_w.timestamp = self._sim_timestamp - - return self._object_state_w.data - - @property - def object_link_state_w(self): - """Object center of mass state ``[pos, quat, lin_vel, ang_vel]`` in simulation world frame. - Shape is (num_instances, num_objects, 13). - - The position, quaternion, and linear/angular velocity are of the rigid body root frame relative to the - world. - """ - if self._object_link_state_w.timestamp < self._sim_timestamp: - self._object_link_state_w.data = torch.cat((self.object_link_pose_w, self.object_link_vel_w), dim=-1) - self._object_link_state_w.timestamp = self._sim_timestamp - - return self._object_link_state_w.data - - @property - def object_com_state_w(self): - """Object state ``[pos, quat, lin_vel, ang_vel]`` in simulation world frame. - Shape is (num_instances, num_objects, 13). - - The position, quaternion, and linear/angular velocity are of the rigid body's center of mass frame - relative to the world. Center of mass frame has the orientation along the principle axes of inertia. - """ - if self._object_com_state_w.timestamp < self._sim_timestamp: - self._object_com_state_w.data = torch.cat((self.object_com_pose_w, self.object_com_vel_w), dim=-1) - self._object_com_state_w.timestamp = self._sim_timestamp - - return self._object_com_state_w.data - - @property - def object_com_acc_w(self): - """Acceleration of all objects. Shape is (num_instances, num_objects, 6). - - This quantity is the acceleration of the rigid bodies' center of mass frame. - """ - if self._object_com_acc_w.timestamp < self._sim_timestamp: - self._object_com_acc_w.data = self._reshape_view_to_data(self._root_physx_view.get_accelerations()) - self._object_com_acc_w.timestamp = self._sim_timestamp - return self._object_com_acc_w.data - - @property - def object_com_pose_b(self): - """Object center of mass pose ``[pos, quat]`` in their respective body's link frame. - Shape is (num_instances, num_objects, 7). - - The position and orientation are of the rigid body's center of mass frame. - The orientation is provided in (w, x, y, z) format. - """ - if self._object_com_pose_b.timestamp < self._sim_timestamp: - # obtain the coms - poses = self._root_physx_view.get_coms().to(self.device) - poses[:, 3:7] = math_utils.convert_quat(poses[:, 3:7], to="wxyz") - # read data from simulation - self._object_com_pose_b.data = self._reshape_view_to_data(poses) - self._object_com_pose_b.timestamp = self._sim_timestamp - - return self._object_com_pose_b.data - - ## - # Derived properties. - ## - - @property - def projected_gravity_b(self): - """Projection of the gravity direction on base frame. Shape is (num_instances, num_objects, 3).""" - return math_utils.quat_apply_inverse(self.object_link_quat_w, self.GRAVITY_VEC_W) - - @property - def heading_w(self): - """Yaw heading of the base frame (in radians). Shape is (num_instances, num_objects,). - - Note: - This quantity is computed by assuming that the forward-direction of the base - frame is along x-direction, i.e. :math:`(1, 0, 0)`. - """ - forward_w = math_utils.quat_apply(self.object_link_quat_w, self.FORWARD_VEC_B) - return torch.atan2(forward_w[..., 1], forward_w[..., 0]) - - @property - def object_link_lin_vel_b(self) -> torch.Tensor: - """Object link linear velocity in base frame. Shape is (num_instances, num_objects, 3). - - This quantity is the linear velocity of the actor frame of the root rigid body frame with - respect to the rigid body's actor frame. - """ - return math_utils.quat_apply_inverse(self.object_link_quat_w, self.object_link_lin_vel_w) - - @property - def object_link_ang_vel_b(self) -> torch.Tensor: - """Object link angular velocity in base world frame. Shape is (num_instances, num_objects, 3). - - This quantity is the angular velocity of the actor frame of the root rigid body frame with - respect to the rigid body's actor frame. - """ - return math_utils.quat_apply_inverse(self.object_link_quat_w, self.object_link_ang_vel_w) - - @property - def object_com_lin_vel_b(self) -> torch.Tensor: - """Object center of mass linear velocity in base frame. Shape is (num_instances, num_objects, 3). - - This quantity is the linear velocity of the center of mass frame of the root rigid body frame with - respect to the rigid body's actor frame. - """ - return math_utils.quat_apply_inverse(self.object_link_quat_w, self.object_com_lin_vel_w) - - @property - def object_com_ang_vel_b(self) -> torch.Tensor: - """Object center of mass angular velocity in base world frame. Shape is (num_instances, num_objects, 3). - - This quantity is the angular velocity of the center of mass frame of the root rigid body frame with - respect to the rigid body's actor frame. - """ - return math_utils.quat_apply_inverse(self.object_link_quat_w, self.object_com_ang_vel_w) - - ## - # Sliced properties. - ## - - @property - def object_link_pos_w(self) -> torch.Tensor: - """Object link position in simulation world frame. Shape is (num_instances, num_objects, 3). - - This quantity is the position of the actor frame of the rigid bodies. - """ - return self.object_link_pose_w[..., :3] - - @property - def object_link_quat_w(self) -> torch.Tensor: - """Object link orientation (w, x, y, z) in simulation world frame. Shape is (num_instances, num_objects, 4). - - This quantity is the orientation of the actor frame of the rigid bodies. - """ - return self.object_link_pose_w[..., 3:7] - - @property - def object_link_lin_vel_w(self) -> torch.Tensor: - """Object link linear velocity in simulation world frame. Shape is (num_instances, num_objects, 3). - - This quantity is the linear velocity of the rigid bodies' actor frame. - """ - return self.object_link_vel_w[..., :3] - - @property - def object_link_ang_vel_w(self) -> torch.Tensor: - """Object link angular velocity in simulation world frame. Shape is (num_instances, num_objects, 3). - - This quantity is the angular velocity of the rigid bodies' actor frame. - """ - return self.object_link_vel_w[..., 3:6] - - @property - def object_com_pos_w(self) -> torch.Tensor: - """Object center of mass position in simulation world frame. Shape is (num_instances, num_objects, 3). - - This quantity is the position of the center of mass frame of the rigid bodies. - """ - return self.object_com_pose_w[..., :3] - - @property - def object_com_quat_w(self) -> torch.Tensor: - """Object center of mass orientation (w, x, y, z) in simulation world frame. - Shape is (num_instances, num_objects, 4). - - This quantity is the orientation of the center of mass frame of the rigid bodies. - """ - return self.object_com_pose_w[..., 3:7] - - @property - def object_com_lin_vel_w(self) -> torch.Tensor: - """Object center of mass linear velocity in simulation world frame. Shape is (num_instances, num_objects, 3). - - This quantity is the linear velocity of the rigid bodies' center of mass frame. - """ - return self.object_com_vel_w[..., :3] - - @property - def object_com_ang_vel_w(self) -> torch.Tensor: - """Object center of mass angular velocity in simulation world frame. Shape is (num_instances, num_objects, 3). - - This quantity is the angular velocity of the rigid bodies' center of mass frame. - """ - return self.object_com_vel_w[..., 3:6] - - @property - def object_com_lin_acc_w(self) -> torch.Tensor: - """Object center of mass linear acceleration in simulation world frame. - Shape is (num_instances, num_objects, 3). - - This quantity is the linear acceleration of the rigid bodies' center of mass frame. - """ - return self.object_com_acc_w[..., :3] - - @property - def object_com_ang_acc_w(self) -> torch.Tensor: - """Object center of mass angular acceleration in simulation world frame. - Shape is (num_instances, num_objects, 3). - - This quantity is the angular acceleration of the rigid bodies' center of mass frame. - """ - return self.object_com_acc_w[..., 3:6] - - @property - def object_com_pos_b(self) -> torch.Tensor: - """Center of mass of all of the bodies in their respective body's link frame. - Shape is (num_instances, num_objects, 3). - - This quantity is the center of mass location relative to its body link frame. - """ - return self.object_com_pose_b[..., :3] - - @property - def object_com_quat_b(self) -> torch.Tensor: - """Orientation (w,x,y,z) of the principle axis of inertia of all of the bodies in simulation world frame. - Shape is (num_instances, num_objects, 4). - - This quantity is the orientation of the principles axes of inertia relative to its body link frame. - The orientation is provided in (w, x, y, z) format. - """ - return self.object_com_pose_b[..., 3:7] - - ## - # Properties for backwards compatibility. - ## - - @property - def object_pose_w(self) -> torch.Tensor: - """Same as :attr:`object_link_pose_w`.""" - return self.object_link_pose_w - - @property - def object_pos_w(self) -> torch.Tensor: - """Same as :attr:`object_link_pos_w`.""" - return self.object_link_pos_w - - @property - def object_quat_w(self) -> torch.Tensor: - """Same as :attr:`object_link_quat_w`.""" - return self.object_link_quat_w - - @property - def object_vel_w(self) -> torch.Tensor: - """Same as :attr:`object_com_vel_w`.""" - return self.object_com_vel_w - - @property - def object_lin_vel_w(self) -> torch.Tensor: - """Same as :attr:`object_com_lin_vel_w`.""" - return self.object_com_lin_vel_w - - @property - def object_ang_vel_w(self) -> torch.Tensor: - """Same as :attr:`object_com_ang_vel_w`.""" - return self.object_com_ang_vel_w - - @property - def object_lin_vel_b(self) -> torch.Tensor: - """Same as :attr:`object_com_lin_vel_b`.""" - return self.object_com_lin_vel_b - - @property - def object_ang_vel_b(self) -> torch.Tensor: - """Same as :attr:`object_com_ang_vel_b`.""" - return self.object_com_ang_vel_b - - @property - def object_acc_w(self) -> torch.Tensor: - """Same as :attr:`object_com_acc_w`.""" - return self.object_com_acc_w - - @property - def object_lin_acc_w(self) -> torch.Tensor: - """Same as :attr:`object_com_lin_acc_w`.""" - return self.object_com_lin_acc_w - - @property - def object_ang_acc_w(self) -> torch.Tensor: - """Same as :attr:`object_com_ang_acc_w`.""" - return self.object_com_ang_acc_w - - @property - def com_pos_b(self) -> torch.Tensor: - """Same as :attr:`object_com_pos_b`.""" - return self.object_com_pos_b - - @property - def com_quat_b(self) -> torch.Tensor: - """Same as :attr:`object_com_quat_b`.""" - return self.object_com_quat_b - - ## - # Helpers. - ## - - def _reshape_view_to_data(self, data: torch.Tensor) -> torch.Tensor: - """Reshapes and arranges the data from the physics view to (num_instances, num_objects, data_size). - - Args: - data: The data from the physics view. Shape is (num_instances * num_objects, data_size). - - Returns: - The reshaped data. Shape is (num_objects, num_instances, data_size). - """ - return torch.einsum("ijk -> jik", data.reshape(self.num_objects, self.num_instances, -1)) + def __new__(cls, *args, **kwargs) -> BaseRigidObjectCollectionData | PhysXRigidObjectCollectionData: + """Create a new instance of a rigid object collection data based on the backend.""" + return super().__new__(cls, *args, **kwargs) diff --git a/source/isaaclab/isaaclab/envs/mdp/actions/surface_gripper_actions.py b/source/isaaclab/isaaclab/envs/mdp/actions/surface_gripper_actions.py index f16d1403853..7de29172a33 100644 --- a/source/isaaclab/isaaclab/envs/mdp/actions/surface_gripper_actions.py +++ b/source/isaaclab/isaaclab/envs/mdp/actions/surface_gripper_actions.py @@ -11,7 +11,7 @@ import torch -from isaaclab.assets.surface_gripper import SurfaceGripper +from isaaclab_physx.assets import SurfaceGripper from isaaclab.managers.action_manager import ActionTerm if TYPE_CHECKING: diff --git a/source/isaaclab/isaaclab/envs/mdp/events.py b/source/isaaclab/isaaclab/envs/mdp/events.py index 484121f4e49..fe76c887115 100644 --- a/source/isaaclab/isaaclab/envs/mdp/events.py +++ b/source/isaaclab/isaaclab/envs/mdp/events.py @@ -29,7 +29,8 @@ import isaaclab.sim as sim_utils import isaaclab.utils.math as math_utils from isaaclab.actuators import ImplicitActuator -from isaaclab.assets import Articulation, DeformableObject, RigidObject +from isaaclab.assets import Articulation, RigidObject +from isaaclab_physx.assets import DeformableObject from isaaclab.managers import EventTermCfg, ManagerTermBase, SceneEntityCfg from isaaclab.sim.utils.stage import get_current_stage from isaaclab.terrains import TerrainImporter diff --git a/source/isaaclab/isaaclab/scene/interactive_scene.py b/source/isaaclab/isaaclab/scene/interactive_scene.py index 291e371ca39..3d615bdfef9 100644 --- a/source/isaaclab/isaaclab/scene/interactive_scene.py +++ b/source/isaaclab/isaaclab/scene/interactive_scene.py @@ -18,15 +18,14 @@ Articulation, ArticulationCfg, AssetBaseCfg, - DeformableObject, - DeformableObjectCfg, RigidObject, RigidObjectCfg, RigidObjectCollection, RigidObjectCollectionCfg, - SurfaceGripper, - SurfaceGripperCfg, ) +from isaaclab_physx.assets import SurfaceGripper, SurfaceGripperCfg +from isaaclab_physx.assets import DeformableObject, DeformableObjectCfg + from isaaclab.sensors import ContactSensorCfg, FrameTransformerCfg, SensorBase, SensorBaseCfg, VisuoTactileSensorCfg from isaaclab.sim import SimulationContext from isaaclab.sim.utils.stage import get_current_stage, get_current_stage_id diff --git a/source/isaaclab/isaaclab/sim/simulation_manager.py b/source/isaaclab/isaaclab/sim/simulation_manager.py new file mode 100644 index 00000000000..d1687db4fb8 --- /dev/null +++ b/source/isaaclab/isaaclab/sim/simulation_manager.py @@ -0,0 +1,1004 @@ +# SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import weakref +from collections import OrderedDict + +import carb +import isaacsim.core.utils.numpy as np_utils +import isaacsim.core.utils.torch as torch_utils +import isaacsim.core.utils.warp as warp_utils +import omni.kit +import omni.physx +import omni.timeline +import omni.usd +from isaacsim.core.utils.prims import get_prim_at_path, is_prim_path_valid +from isaacsim.core.utils.stage import get_current_stage, get_current_stage_id +from pxr import PhysxSchema + +from .isaac_events import IsaacEvents + + +class SimulationManager: + """This class provide functions that take care of many time-related events such as + warm starting simulation in order for the physics data to be retrievable. + Adding/ removing callback functions that gets triggered with certain events such as a physics step, + on post reset, on physics ready..etc.""" + + _warmup_needed = True + #_timeline = omni.timeline.get_timeline_interface() + _message_bus = carb.eventdispatcher.get_eventdispatcher() + _physx_sim_interface = omni.physx.get_physx_simulation_interface() + _physx_interface = omni.physx.get_physx_interface() + _physics_sim_view = None + _physics_sim_view__warp = None + _backend = "torch" + _carb_settings = carb.settings.get_settings() + _physics_scene_apis = OrderedDict() + #_callbacks = dict() + _simulation_manager_interface = None + _simulation_view_created = False + #_assets_loaded = True + #_assets_loading_callback = None + #_assets_loaded_callback = None + _default_physics_scene_idx = -1 + + # callback handles + #_warm_start_callback = None + #_on_stop_callback = None + #_post_warm_start_callback = None + #_stage_open_callback = None + + # Add callback state tracking + #_callbacks_enabled = { + # "warm_start": True, + # "on_stop": True, + # "post_warm_start": True, + # "stage_open": True, + #} + + @classmethod + def _initialize(cls) -> None: + # Initialize all callbacks as enabled by default + SimulationManager.enable_all_default_callbacks(True) + SimulationManager._track_physics_scenes() + + # ------------------------------------------------------------------------------------------------------------------ + # TODO: Removing this as the callbacks handling are moved to the SimulationContext class. + #@classmethod + #def _setup_warm_start_callback(cls) -> None: + # if cls._callbacks_enabled["warm_start"] and cls._warm_start_callback is None: + # cls._warm_start_callback = cls._timeline.get_timeline_event_stream().create_subscription_to_pop_by_type( + # int(omni.timeline.TimelineEventType.PLAY), cls._warm_start + # ) + + #@classmethod + #def _setup_on_stop_callback(cls) -> None: + # if cls._callbacks_enabled["on_stop"] and cls._on_stop_callback is None: + # cls._on_stop_callback = cls._timeline.get_timeline_event_stream().create_subscription_to_pop_by_type( + # int(omni.timeline.TimelineEventType.STOP), cls._on_stop + # ) + + #@classmethod + #def _setup_post_warm_start_callback(cls) -> None: + # if cls._callbacks_enabled["post_warm_start"] and cls._post_warm_start_callback is None: + # cls._post_warm_start_callback = cls._message_bus.observe_event( + # event_name=IsaacEvents.PHYSICS_WARMUP.value, + # on_event=cls._create_simulation_view, + # observer_name="SimulationManager._post_warm_start_callback", + # ) + + #@classmethod + #def _setup_stage_open_callback(cls) -> None: + # if cls._callbacks_enabled["stage_open"] and cls._stage_open_callback is None: + # cls._stage_open_callback = cls._message_bus.observe_event( + # event_name=omni.usd.get_context().stage_event_name(omni.usd.StageEventType.OPENED), + # on_event=cls._post_stage_open, + # observer_name="SimulationManager._stage_open_callback", + # ) + + @classmethod + def _clear(cls) -> None: + # Use callback management system for main callbacks + #cls.enable_all_default_callbacks(False) + + # Handle additional cleanup not covered by enable_all_default_callbacks + cls._physics_sim_view = None + cls._physics_sim_view__warp = None + cls._assets_loading_callback = None + cls._assets_loaded_callback = None + cls._simulation_manager_interface.reset() + cls._physics_scene_apis.clear() + cls._callbacks.clear() + + def _post_stage_open(event) -> None: + SimulationManager._simulation_manager_interface.reset() + SimulationManager._physics_scene_apis.clear() + SimulationManager._callbacks.clear() + SimulationManager._track_physics_scenes() + SimulationManager._assets_loaded = True + SimulationManager._assets_loading_callback = None + SimulationManager._assets_loaded_callback = None + + # TODO: This should not be needed as we should be synchronous. + #def _assets_loading(event): + # SimulationManager._assets_loaded = False + + #def _assets_loaded(event): + # SimulationManager._assets_loaded = True + + #SimulationManager._assets_loading_callback = SimulationManager._message_bus.observe_event( + # event_name=omni.usd.get_context().stage_event_name(omni.usd.StageEventType.ASSETS_LOADING), + # on_event=_assets_loading, + # observer_name="SimulationManager._assets_loading_callback", + #) + + #SimulationManager._assets_loaded_callback = SimulationManager._message_bus.observe_event( + # event_name=omni.usd.get_context().stage_event_name(omni.usd.StageEventType.ASSETS_LOADED), + # on_event=_assets_loaded, + # observer_name="SimulationManager._assets_loaded_callback", + #) + + # ------------------------------------------------------------------------------------------------------------------ + # TODO: This is very USD centric, we should not need this. Also we should have only one physics scene. + #def _track_physics_scenes() -> None: + # def add_physics_scenes(physics_scene_prim_path): + # prim = get_prim_at_path(physics_scene_prim_path) + # if prim.GetTypeName() == "PhysicsScene": + # SimulationManager._physics_scene_apis[physics_scene_prim_path] = PhysxSchema.PhysxSceneAPI.Apply(prim) + + # def remove_physics_scenes(physics_scene_prim_path): + # # TODO: match physics scene prim path + # if physics_scene_prim_path in SimulationManager._physics_scene_apis: + # del SimulationManager._physics_scene_apis[physics_scene_prim_path] + + # SimulationManager._simulation_manager_interface.register_physics_scene_addition_callback(add_physics_scenes) + # SimulationManager._simulation_manager_interface.register_deletion_callback(remove_physics_scenes) + + def _warm_start(event) -> None: + if SimulationManager._carb_settings.get_as_bool("/app/player/playSimulations"): + SimulationManager.initialize_physics() + + def _on_stop(event) -> None: + SimulationManager._warmup_needed = True + if SimulationManager._physics_sim_view: + SimulationManager._physics_sim_view.invalidate() + SimulationManager._physics_sim_view = None + SimulationManager._simulation_view_created = False + if SimulationManager._physics_sim_view__warp: + SimulationManager._physics_sim_view__warp.invalidate() + SimulationManager._physics_sim_view__warp = None + + def _create_simulation_view(event) -> None: + if "cuda" in SimulationManager.get_physics_sim_device() and SimulationManager._backend == "numpy": + SimulationManager._backend = "torch" + carb.log_warn("changing backend from numpy to torch since numpy backend cannot be used with GPU piplines") + SimulationManager._physics_sim_view = omni.physics.tensors.create_simulation_view( + SimulationManager.get_backend(), stage_id=get_current_stage_id() + ) + SimulationManager._physics_sim_view.set_subspace_roots("/") + #SimulationManager._physics_sim_view__warp = omni.physics.tensors.create_simulation_view( + # "warp", stage_id=get_current_stage_id() + #) + #SimulationManager._physics_sim_view__warp.set_subspace_roots("/") + SimulationManager._physx_interface.update_simulation(SimulationManager.get_physics_dt(), 0.0) + SimulationManager._message_bus.dispatch_event(IsaacEvents.SIMULATION_VIEW_CREATED.value, payload={}) + SimulationManager._simulation_view_created = True + SimulationManager._message_bus.dispatch_event(IsaacEvents.PHYSICS_READY.value, payload={}) + + # ------------------------------------------------------------------------------------------------------------------ + # TODO: Removing this as we should use our internal tools. + #@classmethod + #def _get_backend_utils(cls) -> str: + # if SimulationManager._backend == "numpy": + # return np_utils + # elif SimulationManager._backend == "torch": + # return torch_utils + # elif SimulationManager._backend == "warp": + # return warp_utils + # else: + # raise Exception( + # f"Provided backend is not supported: {SimulationManager.get_backend()}. Supported: torch, numpy, warp." + # ) + + # ------------------------------------------------------------------------------------------------------------------ + # TODO: This is very USD centric, we should not need this. Also we should have only one physics scene. + #@classmethod + #def _get_physics_scene_api(cls, physics_scene: str = None): + # if physics_scene is None: + # if len(SimulationManager._physics_scene_apis) > 0: + # physics_scene_api = list(SimulationManager._physics_scene_apis.values())[ + # SimulationManager._default_physics_scene_idx + # ] + # else: + # # carb.log_warn("Physics scene is not found in stage") + # return None + # else: + # if physics_scene in SimulationManager._physics_scene_apis: + # physics_scene_api = SimulationManager._physics_scene_apis[physics_scene] + # else: + # carb.log_warn("physics scene specified {} doesn't exist".format(physics_scene)) + # return None + # return physics_scene_api + + @classmethod + def set_backend(cls, val: str) -> None: + SimulationManager._backend = val + + @classmethod + def get_backend(cls) -> str: + return SimulationManager._backend + + @classmethod + def initialize_physics(cls) -> None: + if SimulationManager._warmup_needed: + SimulationManager._physx_interface.force_load_physics_from_usd() + SimulationManager._physx_interface.start_simulation() + SimulationManager._physx_interface.update_simulation(SimulationManager.get_physics_dt(), 0.0) + SimulationManager._physx_sim_interface.fetch_results() + SimulationManager._message_bus.dispatch_event(IsaacEvents.PHYSICS_WARMUP.value, payload={}) + SimulationManager._warmup_needed = False + + # TODO: Should handle simulation time itself. + @classmethod + def get_simulation_time(cls): + #return SimulationManager._simulation_manager_interface.get_simulation_time() + return self._simulation_time + + # TODO: Should handle simulation time itself. + @classmethod + def get_num_physics_steps(cls): + return SimulationManager._simulation_manager_interface.get_num_physics_steps() + + # TODO: Doesn't need to know if the simulation is simulating or not. + #@classmethod + #def is_simulating(cls): + # return SimulationManager._simulation_manager_interface.is_simulating() + + # TODO: Doesn't need to know if the simulation is paused or not. + #@classmethod + #def is_paused(cls): + # return SimulationManager._simulation_manager_interface.is_paused() + + @classmethod + def get_physics_sim_view(cls): + return SimulationManager._physics_sim_view + + @classmethod + def set_default_physics_scene(cls, physics_scene_prim_path: str): + if SimulationManager._warm_start_callback is None: + carb.log_warn("Calling set_default_physics_scene while SimulationManager is not tracking physics scenes") + return + if physics_scene_prim_path in SimulationManager._physics_scene_apis: + SimulationManager._default_physics_scene_idx = list(SimulationManager._physics_scene_apis.keys()).index( + physics_scene_prim_path + ) + elif is_prim_path_valid(physics_scene_prim_path): + prim = get_prim_at_path(physics_scene_prim_path) + if prim.GetTypeName() == "PhysicsScene": + SimulationManager._physics_scene_apis[physics_scene_prim_path] = PhysxSchema.PhysxSceneAPI.Apply(prim) + SimulationManager._default_physics_scene_idx = list(SimulationManager._physics_scene_apis.keys()).index( + physics_scene_prim_path + ) + else: + raise Exception("physics scene specified {} doesn't exist".format(physics_scene_prim_path)) + + @classmethod + def get_default_physics_scene(cls) -> str: + if len(SimulationManager._physics_scene_apis) > 0: + return list(SimulationManager._physics_scene_apis.keys())[SimulationManager._default_physics_scene_idx] + else: + carb.log_warn("No physics scene is found in stage") + return None + + @classmethod + def step(cls, render: bool = False): + if render: + raise Exception( + "Stepping the renderer is not supported at the moment through SimulationManager, use SimulationContext instead." + ) + SimulationManager._physx_sim_interface.simulate( + SimulationManager.get_physics_dt(physics_scene=None), SimulationManager.get_simulation_time() + ) + SimulationManager._physx_sim_interface.fetch_results() + + @classmethod + def set_physics_sim_device(cls, val) -> None: + if "cuda" in val: + parsed_device = val.split(":") + if len(parsed_device) == 1: + device_id = SimulationManager._carb_settings.get_as_int("/physics/cudaDevice") + if device_id < 0: + SimulationManager._carb_settings.set_int("/physics/cudaDevice", 0) + device_id = 0 + else: + SimulationManager._carb_settings.set_int("/physics/cudaDevice", int(parsed_device[1])) + SimulationManager._carb_settings.set_bool("/physics/suppressReadback", True) + SimulationManager.set_broadphase_type("GPU") + SimulationManager.enable_gpu_dynamics(flag=True) + SimulationManager.enable_fabric(enable=True) + elif "cpu" == val.lower(): + SimulationManager._carb_settings.set_bool("/physics/suppressReadback", False) + # SimulationManager._carb_settings.set_int("/physics/cudaDevice", -1) + SimulationManager.set_broadphase_type("MBP") + SimulationManager.enable_gpu_dynamics(flag=False) + else: + raise Exception("Device {} is not supported.".format(val)) + + @classmethod + def get_physics_sim_device(cls) -> str: + supress_readback = SimulationManager._carb_settings.get_as_bool("/physics/suppressReadback") + if (not SimulationManager._physics_scene_apis and supress_readback) or ( + supress_readback + and SimulationManager.get_broadphase_type() == "GPU" + and SimulationManager.is_gpu_dynamics_enabled() + ): + device_id = SimulationManager._carb_settings.get_as_int("/physics/cudaDevice") + if device_id < 0: + SimulationManager._carb_settings.set_int("/physics/cudaDevice", 0) + device_id = 0 + return f"cuda:{device_id}" + else: + return "cpu" + + def set_physics_dt(cls, dt: float = 1.0 / 60.0, physics_scene: str = None) -> None: + """Sets the physics dt on the physics scene provided. + + Args: + dt (float, optional): physics dt. Defaults to 1.0/60.0. + physics_scene (str, optional): physics scene prim path. Defaults to first physics scene found in the stage. + + Raises: + Exception: If the prim path registered in context doesn't correspond to a valid prim path currently. + ValueError: Physics dt must be a >= 0. + ValueError: Physics dt must be a <= 1.0. + """ + if physics_scene is None: + physics_scene_apis = SimulationManager._physics_scene_apis.values() + else: + physics_scene_apis = [SimulationManager._get_physics_scene_api(physics_scene=physics_scene)] + + for physics_scene_api in physics_scene_apis: + if dt < 0: + raise ValueError("physics dt cannot be <0") + # if no stage or no change in physics timestep, exit. + if get_current_stage() is None: + return + if dt == 0: + physics_scene_api.GetTimeStepsPerSecondAttr().Set(0) + elif dt > 1.0: + raise ValueError("physics dt must be <= 1.0") + else: + steps_per_second = int(1.0 / dt) + physics_scene_api.GetTimeStepsPerSecondAttr().Set(steps_per_second) + return + + @classmethod + def get_physics_dt(cls, physics_scene: str = None) -> str: + """ + Returns the current physics dt. + + Args: + physics_scene (str, optional): physics scene prim path. + + Raises: + Exception: If the prim path registered in context doesn't correspond to a valid prim path currently. + + Returns: + float: physics dt. + """ + physics_scene_api = SimulationManager._get_physics_scene_api(physics_scene=physics_scene) + if physics_scene_api is None: + return 1.0 / 60.0 + physics_hz = physics_scene_api.GetTimeStepsPerSecondAttr().Get() + if physics_hz == 0: + return 0.0 + else: + return 1.0 / physics_hz + + # ------------------------------------------------------------------------------------------------------------------ + # TODO: Removing this as we will only set the settings on initialization. + #@classmethod + #def get_broadphase_type(cls, physics_scene: str = None) -> str: + # """Gets current broadcast phase algorithm type. + + # Args: + # physics_scene (str, optional): physics scene prim path. + + # Raises: + # Exception: If the prim path registered in context doesn't correspond to a valid prim path currently. + + # Returns: + # str: Broadcast phase algorithm used. + # """ + # physics_scene_api = SimulationManager._get_physics_scene_api(physics_scene=physics_scene) + # return physics_scene_api.GetBroadphaseTypeAttr().Get() + + #@classmethod + #def set_broadphase_type(cls, val: str, physics_scene: str = None) -> None: + # """Broadcast phase algorithm used in simulation. + + # Args: + # val (str): type of broadcasting to be used, can be "MBP". + # physics_scene (str, optional): physics scene prim path. + + # Raises: + # Exception: If the prim path registered in context doesn't correspond to a valid prim path currently. + # """ + # if physics_scene is None: + # for path, physx_scene_api in SimulationManager._physics_scene_apis.items(): + # if not physx_scene_api.GetPrim().IsValid(): + # continue + # if physx_scene_api.GetBroadphaseTypeAttr().Get() is None: + # physx_scene_api.CreateBroadphaseTypeAttr(val) + # else: + # physx_scene_api.GetBroadphaseTypeAttr().Set(val) + # else: + # physx_scene_api = SimulationManager._get_physics_scene_api(physics_scene=physics_scene) + # if physx_scene_api.GetBroadphaseTypeAttr().Get() is None: + # physx_scene_api.CreateBroadphaseTypeAttr(val) + # else: + # physx_scene_api.GetBroadphaseTypeAttr().Set(val) + + #@classmethod + #def enable_ccd(cls, flag: bool, physics_scene: str = None) -> None: + # """Enables a second broad phase after integration that makes it possible to prevent objects from tunneling + # through each other. + + # Args: + # flag (bool): enables or disables ccd on the PhysicsScene + # physics_scene (str, optional): physics scene prim path. + + # Raises: + # Exception: If the prim path registered in context doesn't correspond to a valid prim path currently. + # """ + # if physics_scene is None: + # for path, physx_scene_api in SimulationManager._physics_scene_apis.items(): + # if not physx_scene_api.GetPrim().IsValid(): + # continue + # if physx_scene_api.GetEnableCCDAttr().Get() is None: + # physx_scene_api.CreateEnableCCDAttr(flag) + # else: + # physx_scene_api.GetEnableCCDAttr().Set(flag) + # else: + # physx_scene_api = SimulationManager._get_physics_scene_api(physics_scene=physics_scene) + # if physx_scene_api.GetEnableCCDAttr().Get() is None: + # physx_scene_api.CreateEnableCCDAttr(flag) + # else: + # physx_scene_api.GetEnableCCDAttr().Set(flag) + + #@classmethod + #def is_ccd_enabled(cls, physics_scene: str = None) -> bool: + # """Checks if ccd is enabled. + + # Args: + # physics_scene (str, optional): physics scene prim path. + + # Raises: + # Exception: If the prim path registered in context doesn't correspond to a valid prim path currently. + + # Returns: + # bool: True if ccd is enabled, otherwise False. + # """ + # physx_scene_api = SimulationManager._get_physics_scene_api(physics_scene=physics_scene) + # return physx_scene_api.GetEnableCCDAttr().Get() + + #@classmethod + #def enable_ccd(cls, flag: bool, physics_scene: str = None) -> None: + # """Enables Continuous Collision Detection (CCD). + + # Args: + # flag (bool): enables or disables CCD on the PhysicsScene. + # physics_scene (str, optional): physics scene prim path. + + # Raises: + # Exception: If the prim path registered in context doesn't correspond to a valid prim path currently. + # """ + # if flag and "cuda" in SimulationManager.get_physics_sim_device(): + # carb.log_warn("CCD is not supported on GPU, ignoring request to enable it") + # return + # if physics_scene is None: + # for path, physx_scene_api in SimulationManager._physics_scene_apis.items(): + # if not physx_scene_api.GetPrim().IsValid(): + # continue + # if physx_scene_api.GetEnableCCDAttr().Get() is None: + # physx_scene_api.CreateEnableCCDAttr(flag) + # else: + # physx_scene_api.GetEnableCCDAttr().Set(flag) + # else: + # if physics_scene in SimulationManager._physics_scene_apis: + # physx_scene_api = SimulationManager._physics_scene_apis[physics_scene] + # if physx_scene_api.GetEnableCCDAttr().Get() is None: + # physx_scene_api.CreateEnableCCDAttr(flag) + # else: + # physx_scene_api.GetEnableCCDAttr().Set(flag) + # else: + # raise Exception("physics scene specified {} doesn't exist".format(physics_scene)) + + #@classmethod + #def is_ccd_enabled(cls, physics_scene: str = None) -> bool: + # """Checks if Continuous Collision Detection (CCD) is enabled. + + # Args: + # physics_scene (str, optional): physics scene prim path. + + # Raises: + # Exception: If the prim path registered in context doesn't correspond to a valid prim path currently. + + # Returns: + # bool: True if CCD is enabled, otherwise False. + # """ + # if physics_scene is None: + # if len(SimulationManager._physics_scene_apis) > 0: + # physx_scene_api = SimulationManager._get_physics_scene_api(physics_scene=physics_scene) + # return physx_scene_api.GetEnableCCDAttr().Get() + # else: + # return False + # else: + # if physics_scene in SimulationManager._physics_scene_apis: + # physx_scene_api = SimulationManager._physics_scene_apis[physics_scene] + # return physx_scene_api.GetEnableCCDAttr().Get() + # else: + # raise Exception("physics scene specified {} doesn't exist".format(physics_scene)) + + #@classmethod + #def enable_gpu_dynamics(cls, flag: bool, physics_scene: str = None) -> None: + # """Enables gpu dynamics pipeline, required for deformables for instance. + + # Args: + # flag (bool): enables or disables gpu dynamics on the PhysicsScene. If flag is true, CCD is disabled. + # physics_scene (str, optional): physics scene prim path. + + # Raises: + # Exception: If the prim path registered in context doesn't correspond to a valid prim path currently. + # """ + # if physics_scene is None: + # for path, physx_scene_api in SimulationManager._physics_scene_apis.items(): + # if not physx_scene_api.GetPrim().IsValid(): + # continue + # if physx_scene_api.GetEnableGPUDynamicsAttr().Get() is None: + # physx_scene_api.CreateEnableGPUDynamicsAttr(flag) + # else: + # physx_scene_api.GetEnableGPUDynamicsAttr().Set(flag) + # # Disable CCD for GPU dynamics as its not supported + # if flag: + # if SimulationManager.is_ccd_enabled(): + # carb.log_warn("Disabling CCD for GPU dynamics as its not supported") + # SimulationManager.enable_ccd(flag=False) + # else: + # physx_scene_api = SimulationManager._get_physics_scene_api(physics_scene=physics_scene) + # if physx_scene_api.GetEnableGPUDynamicsAttr().Get() is None: + # physx_scene_api.CreateEnableGPUDynamicsAttr(flag) + # else: + # physx_scene_api.GetEnableGPUDynamicsAttr().Set(flag) + # # Disable CCD for GPU dynamics as its not supported + # if flag: + # if SimulationManager.is_ccd_enabled(physics_scene=physics_scene): + # carb.log_warn("Disabling CCD for GPU dynamics as its not supported") + # SimulationManager.enable_ccd(flag=False, physics_scene=physics_scene) + # else: + # physx_scene_api.GetEnableGPUDynamicsAttr().Set(flag) + + #@classmethod + #def is_gpu_dynamics_enabled(cls, physics_scene: str = None) -> bool: + # """Checks if Gpu Dynamics is enabled. + + # Args: + # physics_scene (str, optional): physics scene prim path. + + # Raises: + # Exception: If the prim path registered in context doesn't correspond to a valid prim path currently. + + # Returns: + # bool: True if Gpu Dynamics is enabled, otherwise False. + # """ + # physx_scene_api = SimulationManager._get_physics_scene_api(physics_scene=physics_scene) + # return physx_scene_api.GetEnableGPUDynamicsAttr().Get() + + # ------------------------------------------------------------------------------------------------------------------ + + @classmethod + def enable_fabric(cls, enable): + """Enables or disables physics fabric integration and associated settings. + + Args: + enable: Whether to enable or disable fabric. + """ + manager = omni.kit.app.get_app().get_extension_manager() + fabric_was_enabled = manager.is_extension_enabled("omni.physx.fabric") + if not fabric_was_enabled and enable: + manager.set_extension_enabled_immediate("omni.physx.fabric", True) + elif fabric_was_enabled and not enable: + manager.set_extension_enabled_immediate("omni.physx.fabric", False) + SimulationManager._carb_settings.set_bool("/physics/updateToUsd", not enable) + SimulationManager._carb_settings.set_bool("/physics/updateParticlesToUsd", not enable) + SimulationManager._carb_settings.set_bool("/physics/updateVelocitiesToUsd", not enable) + SimulationManager._carb_settings.set_bool("/physics/updateForceSensorsToUsd", not enable) + + @classmethod + def is_fabric_enabled(cls, enable): + """Checks if fabric is enabled. + + Args: + enable: Whether to check if fabric is enabled. + + Returns: + bool: True if fabric is enabled, otherwise False. + """ + return omni.kit.app.get_app().get_extension_manager().is_extension_enabled("omni.physx.fabric") + + # ------------------------------------------------------------------------------------------------------------------ + + # TODO: Removing this as we will only set the settings on initialization. + #@classmethod + #def set_solver_type(cls, solver_type: str, physics_scene: str = None) -> None: + # """solver used for simulation. + + # Args: + # solver_type (str): can be "TGS" or "PGS". + # physics_scene (str, optional): physics scene prim path. + + # Raises: + # Exception: If the prim path registered in context doesn't correspond to a valid prim path currently. + # """ + # if solver_type not in ["TGS", "PGS"]: + # raise Exception("Solver type {} is not supported".format(solver_type)) + # if physics_scene is None: + # for path, physx_scene_api in SimulationManager._physics_scene_apis.items(): + # if not physx_scene_api.GetPrim().IsValid(): + # continue + # if physx_scene_api.GetSolverTypeAttr().Get() is None: + # physx_scene_api.CreateSolverTypeAttr(solver_type) + # else: + # physx_scene_api.GetSolverTypeAttr().Set(solver_type) + # else: + # physx_scene_api = SimulationManager._get_physics_scene_api(physics_scene=physics_scene) + # if physx_scene_api.GetSolverTypeAttr().Get() is None: + # physx_scene_api.CreateSolverTypeAttr(solver_type) + # else: + # physx_scene_api.GetSolverTypeAttr().Set(solver_type) + + #@classmethod + #def get_solver_type(cls, physics_scene: str = None) -> str: + # """Gets current solver type. + + # Args: + # physics_scene (str, optional): physics scene prim path. + + # Raises: + # Exception: If the prim path registered in context doesn't correspond to a valid prim path currently. + + # Returns: + # str: solver used for simulation. + # """ + # physx_scene_api = SimulationManager._get_physics_scene_api(physics_scene=physics_scene) + # return physx_scene_api.GetSolverTypeAttr().Get() + + # ------------------------------------------------------------------------------------------------------------------ + + # TODO: Removing this, this is oddly specific, we will use string sets on initialization. + #@classmethod + #def enable_stablization(cls, flag: bool, physics_scene: str = None) -> None: + # """Enables additional stabilization pass in the solver. + + # Args: + # flag (bool): enables or disables stabilization on the PhysicsScene + # physics_scene (str, optional): physics scene prim path. + + # Raises: + # Exception: If the prim path registered in context doesn't correspond to a valid prim path currently. + # """ + # if physics_scene is None: + # for path, physx_scene_api in SimulationManager._physics_scene_apis.items(): + # if not physx_scene_api.GetPrim().IsValid(): + # continue + # if physx_scene_api.GetEnableStabilizationAttr().Get() is None: + # physx_scene_api.CreateEnableStabilizationAttr(flag) + # else: + # physx_scene_api.GetEnableStabilizationAttr().Set(flag) + # else: + # physx_scene_api = SimulationManager._get_physics_scene_api(physics_scene=physics_scene) + # if physx_scene_api.GetEnableStabilizationAttr().Get() is None: + # physx_scene_api.CreateEnableStabilizationAttr(flag) + # else: + # physx_scene_api.GetEnableStabilizationAttr().Set(flag) + + #@classmethod + #def is_stablization_enabled(cls, physics_scene: str = None) -> bool: + # """Checks if stabilization is enabled. + + # Args: + # physics_scene (str, optional): physics scene prim path. + + # Raises: + # Exception: If the prim path registered in context doesn't correspond to a valid prim path currently. + + # Returns: + # bool: True if stabilization is enabled, otherwise False. + # """ + # physx_scene_api = SimulationManager._get_physics_scene_api(physics_scene=physics_scene) + # return physx_scene_api.GetEnableStabilizationAttr().Get() + + # ------------------------------------------------------------------------------------------------------------------ + + # TODO: Removing this as the callbacks handling are moved to the SimulationContext class. + #@classmethod + #def register_callback(cls, callback: callable, event, order: int = 0, name: str = None): + # """Registers a callback to be triggered when a specific event occurs. + + # Args: + # callback: The callback function to register. + # event: The event to trigger the callback. + # order: The order in which the callback should be triggered. + # name: The name of the callback. + + # Returns: + # int: The ID of the callback. + # """ + # proxy_needed = False + # if hasattr(callback, "__self__"): + # proxy_needed = True + # callback_name = callback.__name__ + # callback_id = SimulationManager._simulation_manager_interface.get_callback_iter() + # if event in [ + # IsaacEvents.PHYSICS_WARMUP, + # IsaacEvents.PHYSICS_READY, + # IsaacEvents.POST_RESET, + # IsaacEvents.SIMULATION_VIEW_CREATED, + # ]: + # if proxy_needed: + # SimulationManager._callbacks[callback_id] = SimulationManager._message_bus.observe_event( + # event_name=event.value, + # order=order, + # on_event=lambda event, obj=weakref.proxy(callback.__self__): getattr(obj, callback_name)(event), + # observer_name=f"SimulationManager._callbacks.{event.value}", + # ) + # else: + # SimulationManager._callbacks[callback_id] = SimulationManager._message_bus.observe_event( + # event_name=event.value, + # order=order, + # on_event=callback, + # observer_name=f"SimulationManager._callbacks.{event.value}", + # ) + # elif event == IsaacEvents.PRIM_DELETION: + # if proxy_needed: + # SimulationManager._simulation_manager_interface.register_deletion_callback( + # lambda event, obj=weakref.proxy(callback.__self__): getattr(obj, callback_name)(event) + # ) + # else: + # SimulationManager._simulation_manager_interface.register_deletion_callback(callback) + # elif event == IsaacEvents.POST_PHYSICS_STEP: + # if proxy_needed: + # SimulationManager._callbacks[callback_id] = ( + # SimulationManager._physx_interface.subscribe_physics_on_step_events( + # lambda step_dt, obj=weakref.proxy(callback.__self__): ( + # getattr(obj, callback_name)(step_dt) if SimulationManager._simulation_view_created else None + # ), + # pre_step=False, + # order=order, + # ) + # ) + # else: + # SimulationManager._callbacks[callback_id] = ( + # SimulationManager._physx_interface.subscribe_physics_on_step_events( + # lambda step_dt: callback(step_dt) if SimulationManager._simulation_view_created else None, + # pre_step=False, + # order=order, + # ) + # ) + # elif event == IsaacEvents.PRE_PHYSICS_STEP: + # if proxy_needed: + # SimulationManager._callbacks[callback_id] = ( + # SimulationManager._physx_interface.subscribe_physics_on_step_events( + # lambda step_dt, obj=weakref.proxy(callback.__self__): ( + # getattr(obj, callback_name)(step_dt) if SimulationManager._simulation_view_created else None + # ), + # pre_step=True, + # order=order, + # ) + # ) + # else: + # SimulationManager._callbacks[callback_id] = ( + # SimulationManager._physx_interface.subscribe_physics_on_step_events( + # lambda step_dt: callback(step_dt) if SimulationManager._simulation_view_created else None, + # pre_step=True, + # order=order, + # ) + # ) + # elif event == IsaacEvents.TIMELINE_STOP: + # if proxy_needed: + # SimulationManager._callbacks[ + # callback_id + # ] = SimulationManager._timeline.get_timeline_event_stream().create_subscription_to_pop_by_type( + # int(omni.timeline.TimelineEventType.STOP), + # lambda event, obj=weakref.proxy(callback.__self__): getattr(obj, callback_name)(event), + # order=order, + # name=name, + # ) + # else: + # SimulationManager._callbacks[ + # callback_id + # ] = SimulationManager._timeline.get_timeline_event_stream().create_subscription_to_pop_by_type( + # int(omni.timeline.TimelineEventType.STOP), callback, order=order, name=name + # ) + # else: + # raise Exception("{} event doesn't exist for callback registering".format(event)) + # SimulationManager._simulation_manager_interface.set_callback_iter(callback_id + 1) + # return callback_id + + # ------------------------------------------------------------------------------------------------------------------ + + # TODO: Removing this as the callbacks handling are moved to the SimulationContext class. + #@classmethod + #def deregister_callback(cls, callback_id): + # """Deregisters a callback which was previously registered using register_callback. + + # Args: + # callback_id: The ID of the callback returned by register_callback to deregister. + # """ + # if callback_id in SimulationManager._callbacks: + # del SimulationManager._callbacks[callback_id] + # elif SimulationManager._simulation_manager_interface.deregister_callback(callback_id): + # return + # else: + # raise Exception("callback with id {} doesn't exist to be deregistered".format(callback_id)) + + # ------------------------------------------------------------------------------------------------------------------ + + @classmethod + def enable_usd_notice_handler(cls, flag): + """Enables or disables the usd notice handler. + + Args: + flag: Whether to enable or disable the handler. + """ + SimulationManager._simulation_manager_interface.enable_usd_notice_handler(flag) + return + + @classmethod + def enable_fabric_usd_notice_handler(cls, stage_id, flag): + """Enables or disables the fabric usd notice handler. + + Args: + stage_id: The stage ID to enable or disable the handler for. + flag: Whether to enable or disable the handler. + """ + SimulationManager._simulation_manager_interface.enable_fabric_usd_notice_handler(stage_id, flag) + return + + @classmethod + def is_fabric_usd_notice_handler_enabled(cls, stage_id): + """Checks if fabric usd notice handler is enabled. + + Args: + stage_id: The stage ID to check. + + Returns: + bool: True if fabric usd notice handler is enabled, otherwise False. + """ + return SimulationManager._simulation_manager_interface.is_fabric_usd_notice_handler_enabled(stage_id) + + # ------------------------------------------------------------------------------------------------------------------ + + # TODO: Removing this as we will not use asynchronous stuff. + #@classmethod + #def assets_loading(cls) -> bool: + # """Checks if textures are loaded. + + # Returns: + # bool: True if textures are loading and not done yet, otherwise False. + # """ + # return not SimulationManager._assets_loaded + + # ------------------------------------------------------------------------------------------------------------------ + + # TODO: Removing these as the callbacks handling are moved to the SimulationContext class. + # Public API methods for enabling/disabling callbacks + #@classmethod + #def enable_warm_start_callback(cls, enable: bool = True) -> None: + # """Enable or disable the warm start callback. + + # Args: + # enable: Whether to enable the callback. + # """ + # cls._callbacks_enabled["warm_start"] = enable + # if enable: + # cls._setup_warm_start_callback() + # else: + # if cls._warm_start_callback is not None: + # cls._warm_start_callback = None + + #@classmethod + #def enable_on_stop_callback(cls, enable: bool = True) -> None: + # """Enable or disable the on stop callback. + + # Args: + # enable: Whether to enable the callback. + # """ + # cls._callbacks_enabled["on_stop"] = enable + # if enable: + # cls._setup_on_stop_callback() + # else: + # if cls._on_stop_callback is not None: + # cls._on_stop_callback = None + + #@classmethod + #def enable_post_warm_start_callback(cls, enable: bool = True) -> None: + # """Enable or disable the post warm start callback. + + # Args: + # enable: Whether to enable the callback. + # """ + # cls._callbacks_enabled["post_warm_start"] = enable + # if enable: + # cls._setup_post_warm_start_callback() + # else: + # if cls._post_warm_start_callback is not None: + # cls._post_warm_start_callback = None + + #@classmethod + #def enable_stage_open_callback(cls, enable: bool = True) -> None: + # """Enable or disable the stage open callback. + # Note: This also enables/disables the assets loading and loaded callbacks. If disabled, assets_loading() will always return True. + + # Args: + # enable: Whether to enable the callback. + # """ + # cls._callbacks_enabled["stage_open"] = enable + # if enable: + # cls._setup_stage_open_callback() + # else: + # if cls._stage_open_callback is not None: + # cls._stage_open_callback = None + # # Reset assets loading and loaded callbacks + # cls._assets_loaded = True + # cls._assets_loading_callback = None + # cls._assets_loaded_callback = None + + ## Convenience methods for bulk operations + #@classmethod + #def enable_all_default_callbacks(cls, enable: bool = True) -> None: + # """Enable or disable all default callbacks. + # Default callbacks are: warm_start, on_stop, post_warm_start, stage_open. + + # Args: + # enable: Whether to enable all callbacks. + # """ + # cls.enable_warm_start_callback(enable) + # cls.enable_on_stop_callback(enable) + # cls.enable_post_warm_start_callback(enable) + # cls.enable_stage_open_callback(enable) + + #@classmethod + #def is_default_callback_enabled(cls, callback_name: str) -> bool: + # """Check if a specific default callback is enabled. + # Default callbacks are: warm_start, on_stop, post_warm_start, stage_open. + + # Args: + # callback_name: Name of the callback to check. + + # Returns: + # Whether the callback is enabled. + # """ + # return cls._callbacks_enabled.get(callback_name, False) + + #@classmethod + #def get_default_callback_status(cls) -> dict: + # """Get the status of all default callbacks. + # Default callbacks are: warm_start, on_stop, post_warm_start, stage_open. + + # Returns: + # Dictionary with callback names and their enabled status. + # """ + # return cls._callbacks_enabled.copy() diff --git a/source/isaaclab/isaaclab/utils/backend_utils.py b/source/isaaclab/isaaclab/utils/backend_utils.py new file mode 100644 index 00000000000..7ed5444efcd --- /dev/null +++ b/source/isaaclab/isaaclab/utils/backend_utils.py @@ -0,0 +1,79 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +import importlib +import logging + +logger = logging.getLogger(__name__) + + +class FactoryBase: + """A generic factory class that dynamically loads backends.""" + + def __init_subclass__(cls, **kwargs): + """Initializes a new factory subclass.""" + super().__init_subclass__(**kwargs) + cls._registry = {} + # Determine the module subpath for dynamic loading. + # e.g., if factory is in 'isaaclab.assets.articulation.articulation', + # the subpath becomes 'assets.articulation'. + module_parts = cls.__module__.split(".") + if module_parts[0] != "isaaclab": + raise ImportError(f"Factory class {cls.__name__} must be defined within the 'isaaclab' package.") + # The subpath is what comes between 'isaaclab' and the final module name. + cls._module_subpath = ".".join(module_parts[1:-1]) + + @classmethod + def register(cls, name: str, sub_class) -> None: + """Register a new implementation class.""" + if name in cls._registry and cls._registry[name] is not sub_class: + raise ValueError(f"Backend {name!r} already registered with a different class for factory {cls.__name__}.") + cls._registry[name] = sub_class + logger.info(f"Registered backend {name!r} for factory {cls.__name__}.") + + def __new__(cls, *args, **kwargs): + """Create a new instance of an implementation based on the backend.""" + + # TODO: Make the backend configurable. + backend = "physx" + + if cls == FactoryBase: + raise TypeError("FactoryBase cannot be instantiated directly. Please subclass it.") + + # If backend is not in registry, try to import it and register the class. + # This is done to only import the module once. + if backend not in cls._registry: + # Construct the module name from the backend and the determined subpath. + module_name = f"isaaclab_{backend}.{cls._module_subpath}" + try: + module = importlib.import_module(module_name) + module_class = getattr(module, cls.__name__) + # Manually register the class + cls.register(backend, module_class) + + except ImportError as e: + raise ValueError( + f"Could not import module for backend {backend!r} for factory {cls.__name__}. " + f"Attempted to import from '{module_name}'.\n" + f"Original error: {e}" + ) from e + + # Now check registry again. The import should have registered the class. + try: + impl = cls._registry[backend] + except KeyError: + available = list(cls.get_registry_keys()) + raise ValueError( + f"Unknown backend {backend!r} for {cls.__name__}. " + f"A module was found at '{module_name}', but it did not contain a class with the name {cls.__name__}.\n" + f"Currently available backends: {available}." + ) from None + # Return an instance of the chosen class. + return impl(*args, **kwargs) + + @classmethod + def get_registry_keys(cls) -> list[str]: + """Returns a list of registered backend names.""" + return list(cls._registry.keys()) diff --git a/source/isaaclab/isaaclab/utils/wrench_composer.py b/source/isaaclab/isaaclab/utils/wrench_composer.py index 8bd42f81e9e..c138e6acfc3 100644 --- a/source/isaaclab/isaaclab/utils/wrench_composer.py +++ b/source/isaaclab/isaaclab/utils/wrench_composer.py @@ -14,11 +14,11 @@ from isaaclab.utils.warp.kernels import add_forces_and_torques_at_position, set_forces_and_torques_at_position if TYPE_CHECKING: - from isaaclab.assets import Articulation, RigidObject, RigidObjectCollection + from isaaclab.assets import BaseArticulation, BaseRigidObject, BaseRigidObjectCollection class WrenchComposer: - def __init__(self, asset: Articulation | RigidObject | RigidObjectCollection) -> None: + def __init__(self, asset: BaseArticulation | BaseRigidObject | BaseRigidObjectCollection) -> None: """Wrench composer. This class is used to compose forces and torques at the body's link frame. @@ -32,7 +32,7 @@ def __init__(self, asset: Articulation | RigidObject | RigidObjectCollection) -> if hasattr(asset, "num_bodies"): self.num_bodies = asset.num_bodies else: - self.num_bodies = asset.num_objects + raise ValueError(f"Unsupported asset type: {asset.__class__.__name__}") self.device = asset.device self._asset = asset self._active = False @@ -41,9 +41,6 @@ def __init__(self, asset: Articulation | RigidObject | RigidObjectCollection) -> if hasattr(self._asset.data, "body_link_pos_w") and hasattr(self._asset.data, "body_link_quat_w"): self._get_link_position_fn = lambda a=self._asset: a.data.body_link_pos_w[..., :3] self._get_link_quaternion_fn = lambda a=self._asset: a.data.body_link_quat_w[..., :4] - elif hasattr(self._asset.data, "object_link_pos_w") and hasattr(self._asset.data, "object_link_quat_w"): - self._get_link_position_fn = lambda a=self._asset: a.data.object_link_pos_w[..., :3] - self._get_link_quaternion_fn = lambda a=self._asset: a.data.object_link_quat_w[..., :4] else: raise ValueError(f"Unsupported asset type: {self._asset.__class__.__name__}") diff --git a/source/isaaclab/test/assets/test_deformable_object.py b/source/isaaclab/test/assets/test_deformable_object.py index 4726a274462..a593d31707d 100644 --- a/source/isaaclab/test/assets/test_deformable_object.py +++ b/source/isaaclab/test/assets/test_deformable_object.py @@ -26,7 +26,7 @@ import isaaclab.sim as sim_utils import isaaclab.utils.math as math_utils -from isaaclab.assets import DeformableObject, DeformableObjectCfg +from isaaclab_physx.assets import DeformableObject, DeformableObjectCfg from isaaclab.sim import build_simulation_context diff --git a/source/isaaclab/test/assets/test_surface_gripper.py b/source/isaaclab/test/assets/test_surface_gripper.py index 86f112fdf98..be4c5ebec9e 100644 --- a/source/isaaclab/test/assets/test_surface_gripper.py +++ b/source/isaaclab/test/assets/test_surface_gripper.py @@ -26,9 +26,8 @@ ArticulationCfg, RigidObject, RigidObjectCfg, - SurfaceGripper, - SurfaceGripperCfg, ) +from isaaclab_physx.assets import SurfaceGripper, SurfaceGripperCfg from isaaclab.sim import build_simulation_context from isaaclab.utils.assets import ISAACLAB_NUCLEUS_DIR from isaaclab.utils.version import get_isaac_sim_version diff --git a/source/isaaclab_physx/config/extension.toml b/source/isaaclab_physx/config/extension.toml new file mode 100644 index 00000000000..7effa8970c0 --- /dev/null +++ b/source/isaaclab_physx/config/extension.toml @@ -0,0 +1,21 @@ +[package] + +# Note: Semantic Versioning is used: https://semver.org/ +version = "0.1.0" + +# Description +title = "PhysX simulation interfaces for IsaacLab core package" +description="Extension providing IsaacLab with PhysX specific abstractions." +readme = "docs/README.md" +repository = "https://github.com/isaac-sim/IsaacLab" +category = "robotics" +keywords = ["robotics", "simulation", "physx"] + +[dependencies] +"isaaclab" = {} + +[core] +reloadable = false + +[[python.module]] +name = "isaaclab_physx" diff --git a/source/isaaclab_physx/docs/CHANGELOG.rst b/source/isaaclab_physx/docs/CHANGELOG.rst new file mode 100644 index 00000000000..2539cd9655d --- /dev/null +++ b/source/isaaclab_physx/docs/CHANGELOG.rst @@ -0,0 +1,10 @@ +Changelog +--------- + +0.1.0 (2026-01-26) +~~~~~~~~~~~~~~~~~~~ + +Added +^^^^^ + +* Added PhysX simulation interfaces for IsaacLab core package. \ No newline at end of file diff --git a/source/isaaclab_physx/docs/README.md b/source/isaaclab_physx/docs/README.md new file mode 100644 index 00000000000..4ae826f1d6b --- /dev/null +++ b/source/isaaclab_physx/docs/README.md @@ -0,0 +1 @@ +# Isaac Lab PhysX Simulation interfaces diff --git a/source/isaaclab_physx/isaaclab_physx/__init__.py b/source/isaaclab_physx/isaaclab_physx/__init__.py new file mode 100644 index 00000000000..68a8a553287 --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/__init__.py @@ -0,0 +1,19 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Package containing the PhysX simulation interfaces for IsaacLab core package.""" + +import os +import toml + +# Conveniences to other module directories via relative paths +ISAACLAB_PHYSX_EXT_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), "../")) +"""Path to the extension source directory.""" + +ISAACLAB_PHYSX_METADATA = toml.load(os.path.join(ISAACLAB_PHYSX_EXT_DIR, "config", "extension.toml")) +"""Extension metadata dictionary parsed from the extension.toml file.""" + +# Configure the module-level variables +__version__ = ISAACLAB_PHYSX_METADATA["package"]["version"] diff --git a/source/isaaclab_physx/isaaclab_physx/assets/__init__.py b/source/isaaclab_physx/isaaclab_physx/assets/__init__.py new file mode 100644 index 00000000000..eb529daa1d1 --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/assets/__init__.py @@ -0,0 +1,59 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Sub-package for different assets, such as rigid objects and articulations. + +An asset is a physical object that can be spawned in the simulation. The class handles both +the spawning of the asset into the USD stage as well as initialization of necessary physics +handles to interact with the asset. + +Upon construction of the asset instance, the prim corresponding to the asset is spawned into the +USD stage if the spawn configuration is not None. The spawn configuration is defined in the +:attr:`AssetBaseCfg.spawn` attribute. In case the configured :attr:`AssetBaseCfg.prim_path` is +an expression, then the prim is spawned at all the matching paths. Otherwise, a single prim is +spawned at the configured path. For more information on the spawn configuration, see the +:mod:`isaaclab.sim.spawners` module. + +The asset class also registers callbacks for the stage play/stop events. These are used to +construct the physics handles for the asset as the physics engine is only available when the +stage is playing. Additionally, the class registers a callback for debug visualization of the +asset. This can be enabled by setting the :attr:`AssetBaseCfg.debug_vis` attribute to True. + +The asset class follows the following naming convention for its methods: + +* **set_xxx()**: These are used to only set the buffers into the :attr:`data` instance. However, they + do not write the data into the simulator. The writing of data only happens when the + :meth:`write_data_to_sim` method is called. +* **write_xxx_to_sim()**: These are used to set the buffers into the :attr:`data` instance and write + the corresponding data into the simulator as well. +* **update(dt)**: These are used to update the buffers in the :attr:`data` instance. This should + be called after a simulation step is performed. + +The main reason to separate the ``set`` and ``write`` operations is to provide flexibility to the +user when they need to perform a post-processing operation of the buffers before applying them +into the simulator. A common example for this is dealing with explicit actuator models where the +specified joint targets are not directly applied to the simulator but are instead used to compute +the corresponding actuator torques. +""" + +from .articulation import Articulation, ArticulationData +from .deformable_object import DeformableObject, DeformableObjectCfg, DeformableObjectData +from .rigid_object import RigidObject, RigidObjectData +from .rigid_object_collection import RigidObjectCollection, RigidObjectCollectionData +from .surface_gripper import SurfaceGripper, SurfaceGripperCfg + +__all__ = [ + "Articulation", + "ArticulationData", + "DeformableObject", + "DeformableObjectCfg", + "DeformableObjectData", + "RigidObject", + "RigidObjectData", + "RigidObjectCollection", + "RigidObjectCollectionData", + "SurfaceGripper", + "SurfaceGripperCfg", +] \ No newline at end of file diff --git a/source/isaaclab_physx/isaaclab_physx/assets/articulation/__init__.py b/source/isaaclab_physx/isaaclab_physx/assets/articulation/__init__.py new file mode 100644 index 00000000000..1105cd27520 --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/assets/articulation/__init__.py @@ -0,0 +1,14 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Sub-module for rigid articulated assets.""" + +from .articulation import Articulation +from .articulation_data import ArticulationData + +__all__ = [ + "Articulation", + "ArticulationData", +] \ No newline at end of file diff --git a/source/isaaclab_physx/isaaclab_physx/assets/articulation/articulation.py b/source/isaaclab_physx/isaaclab_physx/assets/articulation/articulation.py new file mode 100644 index 00000000000..2d0ae4be5eb --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/assets/articulation/articulation.py @@ -0,0 +1,2177 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +# Flag for pyright to ignore type errors in this file. +# pyright: reportPrivateUsage=false + +from __future__ import annotations + +import logging +from collections.abc import Sequence +from typing import TYPE_CHECKING + +import torch +import warp as wp +from prettytable import PrettyTable + +import omni.physics.tensors.impl.api as physx +from isaacsim.core.simulation_manager import SimulationManager +from pxr import PhysxSchema, UsdPhysics + +import isaaclab.sim as sim_utils +import isaaclab.utils.math as math_utils +import isaaclab.utils.string as string_utils +from isaaclab.actuators import ActuatorBase, ActuatorBaseCfg, ImplicitActuator +from isaaclab.utils.types import ArticulationActions +from isaaclab.utils.version import get_isaac_sim_version +from isaaclab.utils.wrench_composer import WrenchComposer + +from isaaclab.assets.articulation.base_articulation import BaseArticulation +from .articulation_data import ArticulationData + +if TYPE_CHECKING: + from isaaclab.assets.articulation.articulation_cfg import ArticulationCfg + +# import logger +logger = logging.getLogger(__name__) + +class Articulation(BaseArticulation): + """An articulation asset class. + + An articulation is a collection of rigid bodies connected by joints. The joints can be either + fixed or actuated. The joints can be of different types, such as revolute, prismatic, D-6, etc. + However, the articulation class has currently been tested with revolute and prismatic joints. + The class supports both floating-base and fixed-base articulations. The type of articulation + is determined based on the root joint of the articulation. If the root joint is fixed, then + the articulation is considered a fixed-base system. Otherwise, it is considered a floating-base + system. This can be checked using the :attr:`Articulation.is_fixed_base` attribute. + + For an asset to be considered an articulation, the root prim of the asset must have the + `USD ArticulationRootAPI`_. This API is used to define the sub-tree of the articulation using + the reduced coordinate formulation. On playing the simulation, the physics engine parses the + articulation root prim and creates the corresponding articulation in the physics engine. The + articulation root prim can be specified using the :attr:`AssetBaseCfg.prim_path` attribute. + + The articulation class also provides the functionality to augment the simulation of an articulated + system with custom actuator models. These models can either be explicit or implicit, as detailed in + the :mod:`isaaclab.actuators` module. The actuator models are specified using the + :attr:`ArticulationCfg.actuators` attribute. These are then parsed and used to initialize the + corresponding actuator models, when the simulation is played. + + During the simulation step, the articulation class first applies the actuator models to compute + the joint commands based on the user-specified targets. These joint commands are then applied + into the simulation. The joint commands can be either position, velocity, or effort commands. + As an example, the following snippet shows how this can be used for position commands: + + .. code-block:: python + + # an example instance of the articulation class + my_articulation = Articulation(cfg) + + # set joint position targets + my_articulation.set_joint_position_target(position) + # propagate the actuator models and apply the computed commands into the simulation + my_articulation.write_data_to_sim() + + # step the simulation using the simulation context + sim_context.step() + + # update the articulation state, where dt is the simulation time step + my_articulation.update(dt) + + .. _`USD ArticulationRootAPI`: https://openusd.org/dev/api/class_usd_physics_articulation_root_a_p_i.html + + """ + + cfg: ArticulationCfg + """Configuration instance for the articulations.""" + + __backend_name__: str = "physx" + """The name of the backend for the articulation.""" + + actuators: dict + """Dictionary of actuator instances for the articulation. + + The keys are the actuator names and the values are the actuator instances. The actuator instances + are initialized based on the actuator configurations specified in the :attr:`ArticulationCfg.actuators` + attribute. They are used to compute the joint commands during the :meth:`write_data_to_sim` function. + """ + + def __init__(self, cfg: ArticulationCfg): + """Initialize the articulation. + + Args: + cfg: A configuration instance. + """ + super().__init__(cfg) + + """ + Properties + """ + + @property + def data(self) -> ArticulationData: + return self._data + + @property + def num_instances(self) -> int: + return self.root_view.count + + @property + def is_fixed_base(self) -> bool: + """Whether the articulation is a fixed-base or floating-base system.""" + return self.root_view.shared_metatype.fixed_base + + @property + def num_joints(self) -> int: + """Number of joints in articulation.""" + return self.root_view.shared_metatype.dof_count + + @property + def num_fixed_tendons(self) -> int: + """Number of fixed tendons in articulation.""" + return self.root_view.max_fixed_tendons + + @property + def num_spatial_tendons(self) -> int: + """Number of spatial tendons in articulation.""" + return self.root_view.max_spatial_tendons + + @property + def num_bodies(self) -> int: + """Number of bodies in articulation.""" + return self.root_view.shared_metatype.link_count + + @property + def joint_names(self) -> list[str]: + """Ordered names of joints in articulation.""" + return self.root_view.shared_metatype.dof_names + + @property + def fixed_tendon_names(self) -> list[str]: + """Ordered names of fixed tendons in articulation.""" + return self._fixed_tendon_names + + @property + def spatial_tendon_names(self) -> list[str]: + """Ordered names of spatial tendons in articulation.""" + return self._spatial_tendon_names + + @property + def body_names(self) -> list[str]: + """Ordered names of bodies in articulation.""" + return self.root_view.shared_metatype.link_names + + @property + def root_view(self): + """Root view for the asset. + + .. note:: + Use this view with caution. It requires handling of tensors in a specific way. + """ + return self._root_view + + @property + def instantaneous_wrench_composer(self) -> WrenchComposer: + """Instantaneous wrench composer for the articulation.""" + raise self._instantaneous_wrench_composer + + @property + def permanent_wrench_composer(self) -> WrenchComposer: + """Permanent wrench composer for the articulation.""" + raise self._permanent_wrench_composer + + """ + Operations. + """ + + def reset(self, env_ids: Sequence[int] | None = None): + """Reset the articulation. + + Args: + env_ids: Environment indices. If None, then all indices are used. + """ + # use ellipses object to skip initial indices. + if env_ids is None: + env_ids = slice(None) + # reset actuators + for actuator in self.actuators.values(): + actuator.reset(env_ids) + # reset external wrenches. + self._instantaneous_wrench_composer.reset(env_ids) + self._permanent_wrench_composer.reset(env_ids) + + def write_data_to_sim(self): + """Write external wrenches and joint commands to the simulation. + + If any explicit actuators are present, then the actuator models are used to compute the + joint commands. Otherwise, the joint commands are directly set into the simulation. + + .. note:: + We write external wrench to the simulation here since this function is called before the simulation step. + This ensures that the external wrench is applied at every simulation step. + """ + # write external wrench + if self._instantaneous_wrench_composer.active or self._permanent_wrench_composer.active: + if self._instantaneous_wrench_composer.active: + # Compose instantaneous wrench with permanent wrench + self._instantaneous_wrench_composer.add_forces_and_torques( + forces=self._permanent_wrench_composer.composed_force, + torques=self._permanent_wrench_composer.composed_torque, + body_ids=self._ALL_BODY_INDICES_WP, + env_ids=self._ALL_INDICES_WP, + ) + # Apply both instantaneous and permanent wrench to the simulation + self.root_view.apply_forces_and_torques_at_position( + force_data=self._instantaneous_wrench_composer.composed_force_as_torch.view(-1, 3), + torque_data=self._instantaneous_wrench_composer.composed_torque_as_torch.view(-1, 3), + position_data=None, + indices=self._ALL_INDICES, + is_global=False, + ) + else: + # Apply permanent wrench to the simulation + self.root_view.apply_forces_and_torques_at_position( + force_data=self._permanent_wrench_composer.composed_force_as_torch.view(-1, 3), + torque_data=self._permanent_wrench_composer.composed_torque_as_torch.view(-1, 3), + position_data=None, + indices=self._ALL_INDICES, + is_global=False, + ) + self._instantaneous_wrench_composer.reset() + + # apply actuator models + self._apply_actuator_model() + # write actions into simulation + self.root_view.set_dof_actuation_forces(self._joint_effort_target_sim, self._ALL_INDICES) + # position and velocity targets only for implicit actuators + if self._has_implicit_actuators: + self.root_view.set_dof_position_targets(self._joint_pos_target_sim, self._ALL_INDICES) + self.root_view.set_dof_velocity_targets(self._joint_vel_target_sim, self._ALL_INDICES) + + def update(self, dt: float): + """Updates the simulation data. + + Args: + dt: The time step size in seconds. + """ + self.data.update(dt) + + """ + Operations - Finders. + """ + + def find_bodies( + self, name_keys: str | Sequence[str], preserve_order: bool = False + ) -> tuple[list[int], list[str]]: + """Find bodies in the articulation based on the name keys. + + Please check the :meth:`isaaclab.utils.string_utils.resolve_matching_names` function for more + information on the name matching. + + Args: + name_keys: A regular expression or a list of regular expressions to match the body names. + preserve_order: Whether to preserve the order of the name keys in the output. Defaults to False. + + Returns: + A tuple of lists containing the body indices and names. + """ + return string_utils.resolve_matching_names(name_keys, self.body_names, preserve_order) + + def find_joints( + self, name_keys: str | Sequence[str], joint_subset: list[str] | None = None, preserve_order: bool = False + ) -> tuple[list[int], list[str]]: + """Find joints in the articulation based on the name keys. + + Please see the :func:`isaaclab.utils.string.resolve_matching_names` function for more information + on the name matching. + + Args: + name_keys: A regular expression or a list of regular expressions to match the joint names. + joint_subset: A subset of joints to search for. Defaults to None, which means all joints + in the articulation are searched. + preserve_order: Whether to preserve the order of the name keys in the output. Defaults to False. + + Returns: + A tuple of lists containing the joint indices and names. + """ + if joint_subset is None: + joint_subset = self.joint_names + # find joints + return string_utils.resolve_matching_names(name_keys, joint_subset, preserve_order) + + def find_fixed_tendons( + self, name_keys: str | Sequence[str], tendon_subsets: list[str] | None = None, preserve_order: bool = False + ) -> tuple[list[int], list[str]]: + """Find fixed tendons in the articulation based on the name keys. + + Please see the :func:`isaaclab.utils.string.resolve_matching_names` function for more information + on the name matching. + + Args: + name_keys: A regular expression or a list of regular expressions to match the joint + names with fixed tendons. + tendon_subsets: A subset of joints with fixed tendons to search for. Defaults to None, which means + all joints in the articulation are searched. + preserve_order: Whether to preserve the order of the name keys in the output. Defaults to False. + + Returns: + A tuple of lists containing the tendon indices and names. + """ + if tendon_subsets is None: + # tendons follow the joint names they are attached to + tendon_subsets = self.fixed_tendon_names + # find tendons + return string_utils.resolve_matching_names(name_keys, tendon_subsets, preserve_order) + + def find_spatial_tendons( + self, name_keys: str | Sequence[str], tendon_subsets: list[str] | None = None, preserve_order: bool = False + ) -> tuple[list[int], list[str]]: + """Find spatial tendons in the articulation based on the name keys. + + Please see the :func:`isaaclab.utils.string.resolve_matching_names` function for more information + on the name matching. + + Args: + name_keys: A regular expression or a list of regular expressions to match the tendon names. + tendon_subsets: A subset of tendons to search for. Defaults to None, which means all tendons + in the articulation are searched. + preserve_order: Whether to preserve the order of the name keys in the output. Defaults to False. + + Returns: + A tuple of lists containing the tendon indices and names. + """ + if tendon_subsets is None: + tendon_subsets = self.spatial_tendon_names + # find tendons + return string_utils.resolve_matching_names(name_keys, tendon_subsets, preserve_order) + + """ + Operations - State Writers. + """ + + def write_root_state_to_sim(self, root_state: torch.Tensor, env_ids: Sequence[int] | None = None): + """Set the root state over selected environment indices into the simulation. + + The root state comprises of the cartesian position, quaternion orientation in (w, x, y, z), and linear + and angular velocity. All the quantities are in the simulation frame. + + Args: + root_state: Root state in simulation frame. Shape is (len(env_ids), 13). + env_ids: Environment indices. If None, then all indices are used. + """ + self.write_root_link_pose_to_sim(root_state[:, :7], env_ids=env_ids) + self.write_root_com_velocity_to_sim(root_state[:, 7:], env_ids=env_ids) + + def write_root_com_state_to_sim(self, root_state: torch.Tensor, env_ids: Sequence[int] | None = None): + """Set the root center of mass state over selected environment indices into the simulation. + + The root state comprises of the cartesian position, quaternion orientation in (w, x, y, z), and linear + and angular velocity. All the quantities are in the simulation frame. + + Args: + root_state: Root state in simulation frame. Shape is (len(env_ids), 13). + env_ids: Environment indices. If None, then all indices are used. + """ + self.write_root_com_pose_to_sim(root_state[:, :7], env_ids=env_ids) + self.write_root_com_velocity_to_sim(root_state[:, 7:], env_ids=env_ids) + + def write_root_link_state_to_sim(self, root_state: torch.Tensor, env_ids: Sequence[int] | None = None): + """Set the root link state over selected environment indices into the simulation. + + The root state comprises of the cartesian position, quaternion orientation in (w, x, y, z), and linear + and angular velocity. All the quantities are in the simulation frame. + + Args: + root_state: Root state in simulation frame. Shape is (len(env_ids), 13). + env_ids: Environment indices. If None, then all indices are used. + """ + self.write_root_link_pose_to_sim(root_state[:, :7], env_ids=env_ids) + self.write_root_link_velocity_to_sim(root_state[:, 7:], env_ids=env_ids) + + def write_root_pose_to_sim(self, root_pose: torch.Tensor, env_ids: Sequence[int] | None = None): + """Set the root pose over selected environment indices into the simulation. + + The root pose comprises of the cartesian position and quaternion orientation in (w, x, y, z). + + Args: + root_pose: Root poses in simulation frame. Shape is (len(env_ids), 7). + env_ids: Environment indices. If None, then all indices are used. + """ + self.write_root_link_pose_to_sim(root_pose, env_ids=env_ids) + + def write_root_link_pose_to_sim(self, root_pose: torch.Tensor, env_ids: Sequence[int] | None = None): + """Set the root link pose over selected environment indices into the simulation. + + The root pose comprises of the cartesian position and quaternion orientation in (w, x, y, z). + + Args: + root_pose: Root poses in simulation frame. Shape is (len(env_ids), 7). + env_ids: Environment indices. If None, then all indices are used. + """ + # resolve all indices + physx_env_ids = env_ids + if env_ids is None: + env_ids = slice(None) + physx_env_ids = self._ALL_INDICES + + # Note we need to do this here since tensors are not set into simulation until step. + # set into internal buffers + self.data.root_link_pose_w[env_ids] = root_pose.clone() + # update these buffers only if the user is using them. Otherwise this adds to overhead. + if self.data._root_link_state_w.data is not None: + self.data.root_link_state_w[env_ids, :7] = self.data.root_link_pose_w[env_ids] + if self.data._root_state_w.data is not None: + self.data.root_state_w[env_ids, :7] = self.data.root_link_pose_w[env_ids] + + # convert root quaternion from wxyz to xyzw + root_poses_xyzw = self.data.root_link_pose_w.clone() + root_poses_xyzw[:, 3:] = math_utils.convert_quat(root_poses_xyzw[:, 3:], to="xyzw") + + # Need to invalidate the buffer to trigger the update with the new state. + self.data._body_link_pose_w.timestamp = -1.0 + self.data._body_com_pose_w.timestamp = -1.0 + self.data._body_state_w.timestamp = -1.0 + self.data._body_link_state_w.timestamp = -1.0 + self.data._body_com_state_w.timestamp = -1.0 + + # set into simulation + self.root_view.set_root_transforms(root_poses_xyzw, indices=physx_env_ids) + + def write_root_com_pose_to_sim(self, root_pose: torch.Tensor, env_ids: Sequence[int] | None = None): + """Set the root center of mass pose over selected environment indices into the simulation. + + The root pose comprises of the cartesian position and quaternion orientation in (w, x, y, z). + The orientation is the orientation of the principle axes of inertia. + + Args: + root_pose: Root center of mass poses in simulation frame. Shape is (len(env_ids), 7). + env_ids: Environment indices. If None, then all indices are used. + """ + # resolve all indices + if env_ids is None: + local_env_ids = slice(env_ids) + else: + local_env_ids = env_ids + + # set into internal buffers + self.data.root_com_pose_w[local_env_ids] = root_pose.clone() + # update these buffers only if the user is using them. Otherwise this adds to overhead. + if self.data._root_com_state_w.data is not None: + self.data.root_com_state_w[local_env_ids, :7] = self.data.root_com_pose_w[local_env_ids] + + # get CoM pose in link frame + com_pos_b = self.data.body_com_pos_b[local_env_ids, 0, :] + com_quat_b = self.data.body_com_quat_b[local_env_ids, 0, :] + # transform input CoM pose to link frame + root_link_pos, root_link_quat = math_utils.combine_frame_transforms( + root_pose[..., :3], + root_pose[..., 3:7], + math_utils.quat_apply(math_utils.quat_inv(com_quat_b), -com_pos_b), + math_utils.quat_inv(com_quat_b), + ) + root_link_pose = torch.cat((root_link_pos, root_link_quat), dim=-1) + + # write transformed pose in link frame to sim + self.write_root_link_pose_to_sim(root_pose=root_link_pose, env_ids=env_ids) + + def write_root_velocity_to_sim(self, root_velocity: torch.Tensor, env_ids: Sequence[int] | None = None): + """Set the root center of mass velocity over selected environment indices into the simulation. + + The velocity comprises linear velocity (x, y, z) and angular velocity (x, y, z) in that order. + .. note:: This sets the velocity of the root's center of mass rather than the roots frame. + + Args: + root_velocity: Root center of mass velocities in simulation world frame. Shape is (len(env_ids), 6). + env_ids: Environment indices. If None, then all indices are used. + """ + self.write_root_com_velocity_to_sim(root_velocity=root_velocity, env_ids=env_ids) + + def write_root_com_velocity_to_sim(self, root_velocity: torch.Tensor, env_ids: Sequence[int] | None = None): + """Set the root center of mass velocity over selected environment indices into the simulation. + + The velocity comprises linear velocity (x, y, z) and angular velocity (x, y, z) in that order. + .. note:: This sets the velocity of the root's center of mass rather than the roots frame. + + Args: + root_velocity: Root center of mass velocities in simulation world frame. Shape is (len(env_ids), 6). + env_ids: Environment indices. If None, then all indices are used. + """ + # resolve all indices + physx_env_ids = env_ids + if env_ids is None: + env_ids = slice(None) + physx_env_ids = self._ALL_INDICES + + # Note we need to do this here since tensors are not set into simulation until step. + # set into internal buffers + self.data.root_com_vel_w[env_ids] = root_velocity.clone() + # update these buffers only if the user is using them. Otherwise this adds to overhead. + if self.data._root_com_state_w.data is not None: + self.data.root_com_state_w[env_ids, 7:] = self.data.root_com_vel_w[env_ids] + if self.data._root_state_w.data is not None: + self.data.root_state_w[env_ids, 7:] = self.data.root_com_vel_w[env_ids] + # make the acceleration zero to prevent reporting old values + self.data.body_acc_w[env_ids] = 0.0 + + # set into simulation + self.root_view.set_root_velocities(self.data.root_com_vel_w, indices=physx_env_ids) + + def write_root_link_velocity_to_sim(self, root_velocity: torch.Tensor, env_ids: Sequence[int] | None = None): + """Set the root link velocity over selected environment indices into the simulation. + + The velocity comprises linear velocity (x, y, z) and angular velocity (x, y, z) in that order. + .. note:: This sets the velocity of the root's frame rather than the roots center of mass. + + Args: + root_velocity: Root frame velocities in simulation world frame. Shape is (len(env_ids), 6). + env_ids: Environment indices. If None, then all indices are used. + """ + # resolve all indices + if env_ids is None: + local_env_ids = slice(env_ids) + else: + local_env_ids = env_ids + + # set into internal buffers + self.data.root_link_vel_w[local_env_ids] = root_velocity.clone() + # update these buffers only if the user is using them. Otherwise this adds to overhead. + if self.data._root_link_state_w.data is not None: + self.data.root_link_state_w[local_env_ids, 7:] = self.data.root_link_vel_w[local_env_ids] + + # get CoM pose in link frame + quat = self.data.root_link_quat_w[local_env_ids] + com_pos_b = self.data.body_com_pos_b[local_env_ids, 0, :] + # transform input velocity to center of mass frame + root_com_velocity = root_velocity.clone() + root_com_velocity[:, :3] += torch.linalg.cross( + root_com_velocity[:, 3:], math_utils.quat_apply(quat, com_pos_b), dim=-1 + ) + + # write transformed velocity in CoM frame to sim + self.write_root_com_velocity_to_sim(root_velocity=root_com_velocity, env_ids=env_ids) + + def write_joint_state_to_sim( + self, + position: torch.Tensor, + velocity: torch.Tensor, + joint_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | slice | None = None, + ): + """Write joint positions and velocities to the simulation. + + Args: + position: Joint positions. Shape is (len(env_ids), len(joint_ids)). + velocity: Joint velocities. Shape is (len(env_ids), len(joint_ids)). + joint_ids: The joint indices to set the targets for. Defaults to None (all joints). + env_ids: The environment indices to set the targets for. Defaults to None (all environments). + """ + # set into simulation + self.write_joint_position_to_sim(position, joint_ids=joint_ids, env_ids=env_ids) + self.write_joint_velocity_to_sim(velocity, joint_ids=joint_ids, env_ids=env_ids) + + def write_joint_position_to_sim( + self, + position: torch.Tensor, + joint_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | slice | None = None, + ): + """Write joint positions to the simulation. + + Args: + position: Joint positions. Shape is (len(env_ids), len(joint_ids)). + joint_ids: The joint indices to set the targets for. Defaults to None (all joints). + env_ids: The environment indices to set the targets for. Defaults to None (all environments). + """ + # resolve indices + physx_env_ids = env_ids + if env_ids is None: + env_ids = slice(None) + physx_env_ids = self._ALL_INDICES + if joint_ids is None: + joint_ids = slice(None) + # broadcast env_ids if needed to allow double indexing + if env_ids != slice(None) and joint_ids != slice(None): + env_ids = env_ids[:, None] + # set into internal buffers + self.data.joint_pos[env_ids, joint_ids] = position + # Need to invalidate the buffer to trigger the update with the new root pose. + self.data._body_com_vel_w.timestamp = -1.0 + self.data._body_link_vel_w.timestamp = -1.0 + self.data._body_com_pose_b.timestamp = -1.0 + self.data._body_com_pose_w.timestamp = -1.0 + self.data._body_link_pose_w.timestamp = -1.0 + + self.data._body_state_w.timestamp = -1.0 + self.data._body_link_state_w.timestamp = -1.0 + self.data._body_com_state_w.timestamp = -1.0 + # set into simulation + self.root_view.set_dof_positions(self.data.joint_pos, indices=physx_env_ids) + + def write_joint_velocity_to_sim( + self, + velocity: torch.Tensor, + joint_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | slice | None = None, + ): + """Write joint velocities to the simulation. + + Args: + velocity: Joint velocities. Shape is (len(env_ids), len(joint_ids)). + joint_ids: The joint indices to set the targets for. Defaults to None (all joints). + env_ids: The environment indices to set the targets for. Defaults to None (all environments). + """ + # resolve indices + physx_env_ids = env_ids + if env_ids is None: + env_ids = slice(None) + physx_env_ids = self._ALL_INDICES + if joint_ids is None: + joint_ids = slice(None) + # broadcast env_ids if needed to allow double indexing + if env_ids != slice(None) and joint_ids != slice(None): + env_ids = env_ids[:, None] + # set into internal buffers + self.data.joint_vel[env_ids, joint_ids] = velocity + self.data._previous_joint_vel[env_ids, joint_ids] = velocity + self.data.joint_acc[env_ids, joint_ids] = 0.0 + # set into simulation + self.root_view.set_dof_velocities(self.data.joint_vel, indices=physx_env_ids) + + """ + Operations - Simulation Parameters Writers. + """ + + def write_joint_stiffness_to_sim( + self, + stiffness: torch.Tensor | float, + joint_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | None = None, + ): + """Write joint stiffness into the simulation. + + Args: + stiffness: Joint stiffness. Shape is (len(env_ids), len(joint_ids)). + joint_ids: The joint indices to set the stiffness for. Defaults to None (all joints). + env_ids: The environment indices to set the stiffness for. Defaults to None (all environments). + """ + # Note This function isn't setting the values for actuator models. (#128) + # resolve indices + physx_env_ids = env_ids + if env_ids is None: + env_ids = slice(None) + physx_env_ids = self._ALL_INDICES + if joint_ids is None: + joint_ids = slice(None) + # broadcast env_ids if needed to allow double indexing + if env_ids != slice(None) and joint_ids != slice(None): + env_ids = env_ids[:, None] + # set into internal buffers + self.data.joint_stiffness[env_ids, joint_ids] = stiffness + # set into simulation + self.root_view.set_dof_stiffnesses(self.data.joint_stiffness.cpu(), indices=physx_env_ids.cpu()) + + def write_joint_damping_to_sim( + self, + damping: torch.Tensor | float, + joint_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | None = None, + ): + """Write joint damping into the simulation. + + Args: + damping: Joint damping. Shape is (len(env_ids), len(joint_ids)). + joint_ids: The joint indices to set the damping for. Defaults to None (all joints). + env_ids: The environment indices to set the damping for. Defaults to None (all environments). + """ + # Note This function isn't setting the values for actuator models. (#128) + # resolve indices + physx_env_ids = env_ids + if env_ids is None: + env_ids = slice(None) + physx_env_ids = self._ALL_INDICES + if joint_ids is None: + joint_ids = slice(None) + # broadcast env_ids if needed to allow double indexing + if env_ids != slice(None) and joint_ids != slice(None): + env_ids = env_ids[:, None] + # set into internal buffers + self.data.joint_damping[env_ids, joint_ids] = damping + # set into simulation + self.root_view.set_dof_dampings(self.data.joint_damping.cpu(), indices=physx_env_ids.cpu()) + + def write_joint_position_limit_to_sim( + self, + limits: torch.Tensor | float, + joint_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | None = None, + warn_limit_violation: bool = True, + ): + """Write joint position limits into the simulation. + + Args: + limits: Joint limits. Shape is (len(env_ids), len(joint_ids), 2). + joint_ids: The joint indices to set the limits for. Defaults to None (all joints). + env_ids: The environment indices to set the limits for. Defaults to None (all environments). + warn_limit_violation: Whether to use warning or info level logging when default joint positions + exceed the new limits. Defaults to True. + """ + # Note This function isn't setting the values for actuator models. (#128) + # resolve indices + physx_env_ids = env_ids + if env_ids is None: + env_ids = slice(None) + physx_env_ids = self._ALL_INDICES + if joint_ids is None: + joint_ids = slice(None) + # broadcast env_ids if needed to allow double indexing + if env_ids != slice(None) and joint_ids != slice(None): + env_ids = env_ids[:, None] + # set into internal buffers + self.data.joint_pos_limits[env_ids, joint_ids] = limits + # update default joint pos to stay within the new limits + if torch.any( + (self.data.default_joint_pos[env_ids, joint_ids] < limits[..., 0]) + | (self.data.default_joint_pos[env_ids, joint_ids] > limits[..., 1]) + ): + self.data.default_joint_pos[env_ids, joint_ids] = torch.clamp( + self.data.default_joint_pos[env_ids, joint_ids], limits[..., 0], limits[..., 1] + ) + violation_message = ( + "Some default joint positions are outside of the range of the new joint limits. Default joint positions" + " will be clamped to be within the new joint limits." + ) + if warn_limit_violation: + # warn level will show in console + logger.warning(violation_message) + else: + # info level is only written to log file + logger.info(violation_message) + # set into simulation + self.root_view.set_dof_limits(self.data.joint_pos_limits.cpu(), indices=physx_env_ids.cpu()) + + # compute the soft limits based on the joint limits + # TODO: Optimize this computation for only selected joints + # soft joint position limits (recommended not to be too close to limits). + joint_pos_mean = (self.data.joint_pos_limits[..., 0] + self.data.joint_pos_limits[..., 1]) / 2 + joint_pos_range = self.data.joint_pos_limits[..., 1] - self.data.joint_pos_limits[..., 0] + soft_limit_factor = self.cfg.soft_joint_pos_limit_factor + # add to data + self.data.soft_joint_pos_limits[..., 0] = joint_pos_mean - 0.5 * joint_pos_range * soft_limit_factor + self.data.soft_joint_pos_limits[..., 1] = joint_pos_mean + 0.5 * joint_pos_range * soft_limit_factor + + def write_joint_velocity_limit_to_sim( + self, + limits: torch.Tensor | float, + joint_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | None = None, + ): + """Write joint max velocity to the simulation. + + The velocity limit is used to constrain the joint velocities in the physics engine. The joint will only + be able to reach this velocity if the joint's effort limit is sufficiently large. If the joint is moving + faster than this velocity, the physics engine will actually try to brake the joint to reach this velocity. + + Args: + limits: Joint max velocity. Shape is (len(env_ids), len(joint_ids)). + joint_ids: The joint indices to set the max velocity for. Defaults to None (all joints). + env_ids: The environment indices to set the max velocity for. Defaults to None (all environments). + """ + # resolve indices + physx_env_ids = env_ids + if env_ids is None: + env_ids = slice(None) + physx_env_ids = self._ALL_INDICES + if joint_ids is None: + joint_ids = slice(None) + # broadcast env_ids if needed to allow double indexing + if env_ids != slice(None) and joint_ids != slice(None): + env_ids = env_ids[:, None] + # move tensor to cpu if needed + if isinstance(limits, torch.Tensor): + limits = limits.to(self.device) + # set into internal buffers + self.data.joint_vel_limits[env_ids, joint_ids] = limits + # set into simulation + self.root_view.set_dof_max_velocities(self.data.joint_vel_limits.cpu(), indices=physx_env_ids.cpu()) + + def write_joint_effort_limit_to_sim( + self, + limits: torch.Tensor | float, + joint_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | None = None, + ): + """Write joint effort limits into the simulation. + + The effort limit is used to constrain the computed joint efforts in the physics engine. If the + computed effort exceeds this limit, the physics engine will clip the effort to this value. + + Args: + limits: Joint torque limits. Shape is (len(env_ids), len(joint_ids)). + joint_ids: The joint indices to set the joint torque limits for. Defaults to None (all joints). + env_ids: The environment indices to set the joint torque limits for. Defaults to None (all environments). + """ + # Note This function isn't setting the values for actuator models. (#128) + # resolve indices + physx_env_ids = env_ids + if env_ids is None: + env_ids = slice(None) + physx_env_ids = self._ALL_INDICES + if joint_ids is None: + joint_ids = slice(None) + # broadcast env_ids if needed to allow double indexing + if env_ids != slice(None) and joint_ids != slice(None): + env_ids = env_ids[:, None] + # move tensor to cpu if needed + if isinstance(limits, torch.Tensor): + limits = limits.to(self.device) + # set into internal buffers + self.data.joint_effort_limits[env_ids, joint_ids] = limits + # set into simulation + self.root_view.set_dof_max_forces(self.data.joint_effort_limits.cpu(), indices=physx_env_ids.cpu()) + + def write_joint_armature_to_sim( + self, + armature: torch.Tensor | float, + joint_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | None = None, + ): + """Write joint armature into the simulation. + + The armature is directly added to the corresponding joint-space inertia. It helps improve the + simulation stability by reducing the joint velocities. + + Args: + armature: Joint armature. Shape is (len(env_ids), len(joint_ids)). + joint_ids: The joint indices to set the joint torque limits for. Defaults to None (all joints). + env_ids: The environment indices to set the joint torque limits for. Defaults to None (all environments). + """ + # resolve indices + physx_env_ids = env_ids + if env_ids is None: + env_ids = slice(None) + physx_env_ids = self._ALL_INDICES + if joint_ids is None: + joint_ids = slice(None) + # broadcast env_ids if needed to allow double indexing + if env_ids != slice(None) and joint_ids != slice(None): + env_ids = env_ids[:, None] + # set into internal buffers + self.data.joint_armature[env_ids, joint_ids] = armature + # set into simulation + self.root_view.set_dof_armatures(self.data.joint_armature.cpu(), indices=physx_env_ids.cpu()) + + def write_joint_friction_coefficient_to_sim( + self, + joint_friction_coeff: torch.Tensor | float, + joint_dynamic_friction_coeff: torch.Tensor | float | None = None, + joint_viscous_friction_coeff: torch.Tensor | float | None = None, + joint_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | None = None, + ): + r"""Write joint friction coefficients into the simulation. + + For Isaac Sim versions below 5.0, only the static friction coefficient is set. + This limits the resisting force or torque up to a maximum proportional to the transmitted + spatial force: :math:`\|F_{resist}\| \leq \mu_s \, \|F_{spatial}\|`. + + For Isaac Sim versions 5.0 and above, the static, dynamic, and viscous friction coefficients + are set. The model combines Coulomb (static & dynamic) friction with a viscous term: + + - Static friction :math:`\mu_s` defines the maximum effort that prevents motion at rest. + - Dynamic friction :math:`\mu_d` applies once motion begins and remains constant during motion. + - Viscous friction :math:`c_v` is a velocity-proportional resistive term. + + Args: + joint_friction_coeff: Static friction coefficient :math:`\mu_s`. + Shape is (len(env_ids), len(joint_ids)). Scalars are broadcast to all selections. + joint_dynamic_friction_coeff: Dynamic (Coulomb) friction coefficient :math:`\mu_d`. + Same shape as above. If None, the dynamic coefficient is not updated. + joint_viscous_friction_coeff: Viscous friction coefficient :math:`c_v`. + Same shape as above. If None, the viscous coefficient is not updated. + joint_ids: The joint indices to set the friction coefficients for. Defaults to None (all joints). + env_ids: The environment indices to set the friction coefficients for. Defaults to None (all environments). + """ + # resolve indices + physx_env_ids = env_ids + if env_ids is None: + env_ids = slice(None) + physx_env_ids = self._ALL_INDICES + if joint_ids is None: + joint_ids = slice(None) + # broadcast env_ids if needed to allow double indexing + if env_ids != slice(None) and joint_ids != slice(None): + env_ids = env_ids[:, None] + # set into internal buffers + self.data.joint_friction_coeff[env_ids, joint_ids] = joint_friction_coeff + + # if dynamic or viscous friction coeffs are provided, set them too + if joint_dynamic_friction_coeff is not None: + self.data.joint_dynamic_friction_coeff[env_ids, joint_ids] = joint_dynamic_friction_coeff + if joint_viscous_friction_coeff is not None: + self.data.joint_viscous_friction_coeff[env_ids, joint_ids] = joint_viscous_friction_coeff + + # move the indices to cpu + physx_envs_ids_cpu = physx_env_ids.cpu() + + # set into simulation + if get_isaac_sim_version().major < 5: + self.root_view.set_dof_friction_coefficients( + self.data.joint_friction_coeff.cpu(), indices=physx_envs_ids_cpu + ) + else: + friction_props = self.root_view.get_dof_friction_properties() + friction_props[physx_envs_ids_cpu, :, 0] = self.data.joint_friction_coeff[physx_envs_ids_cpu, :].cpu() + + # only set dynamic and viscous friction if provided + if joint_dynamic_friction_coeff is not None: + friction_props[physx_envs_ids_cpu, :, 1] = self.data.joint_dynamic_friction_coeff[ + physx_envs_ids_cpu, : + ].cpu() + + # only set viscous friction if provided + if joint_viscous_friction_coeff is not None: + friction_props[physx_envs_ids_cpu, :, 2] = self.data.joint_viscous_friction_coeff[ + physx_envs_ids_cpu, : + ].cpu() + + self.root_view.set_dof_friction_properties(friction_props, indices=physx_envs_ids_cpu) + + def write_joint_dynamic_friction_coefficient_to_sim( + self, + joint_dynamic_friction_coeff: torch.Tensor | float, + joint_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | None = None, + ): + """Write joint dynamic friction coefficient into the simulation. + + Args: + joint_dynamic_friction_coeff: Joint dynamic friction coefficient. Shape is (len(env_ids), len(joint_ids)). + joint_ids: The joint indices to set the dynamic friction coefficient for. Defaults to None (all joints). + env_ids: The environment indices to set the dynamic friction coefficient for. Defaults to None (all environments). + """ + if get_isaac_sim_version().major < 5: + logger.warning("Setting joint dynamic friction coefficients are not supported in Isaac Sim < 5.0") + return + # resolve indices + physx_env_ids = env_ids + if env_ids is None: + env_ids = slice(None) + physx_env_ids = self._ALL_INDICES + if joint_ids is None: + joint_ids = slice(None) + # broadcast env_ids if needed to allow double indexing + if env_ids != slice(None) and joint_ids != slice(None): + env_ids = env_ids[:, None] + # set into internal buffers + self.data.joint_dynamic_friction_coeff[env_ids, joint_ids] = joint_dynamic_friction_coeff + # set into simulation + friction_props = self.root_view.get_dof_friction_properties() + friction_props[physx_env_ids.cpu(), :, 1] = self.data.joint_dynamic_friction_coeff[physx_env_ids, :].cpu() + self.root_view.set_dof_friction_properties(friction_props, indices=physx_env_ids.cpu()) + + def write_joint_viscous_friction_coefficient_to_sim( + self, + joint_viscous_friction_coeff: torch.Tensor | float, + joint_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | None = None, + ): + """Write joint viscous friction coefficient into the simulation. + + Args: + joint_viscous_friction_coeff: Joint viscous friction coefficient. Shape is (len(env_ids), len(joint_ids)). + joint_ids: The joint indices to set the viscous friction coefficient for. Defaults to None (all joints). + env_ids: The environment indices to set the viscous friction coefficient for. Defaults to None (all environments). + """ + if get_isaac_sim_version().major < 5: + logger.warning("Setting joint viscous friction coefficients are not supported in Isaac Sim < 5.0") + return + # resolve indices + physx_env_ids = env_ids + if env_ids is None: + env_ids = slice(None) + physx_env_ids = self._ALL_INDICES + if joint_ids is None: + joint_ids = slice(None) + # broadcast env_ids if needed to allow double indexing + if env_ids != slice(None) and joint_ids != slice(None): + env_ids = env_ids[:, None] + # set into internal buffers + self.data.joint_viscous_friction_coeff[env_ids, joint_ids] = joint_viscous_friction_coeff + # set into simulation + friction_props = self.root_view.get_dof_friction_properties() + friction_props[physx_env_ids.cpu(), :, 2] = self.data.joint_viscous_friction_coeff[physx_env_ids, :].cpu() + self.root_view.set_dof_friction_properties(friction_props, indices=physx_env_ids.cpu()) + + """ + Operations - Setters. + """ + + def set_masses( + self, + masses: torch.Tensor, + body_ids: Sequence[int] | None = None, + env_ids: Sequence[int] | None = None, + ): + """Set masses of all bodies in the simulation world frame. + + Args: + masses: Masses of all bodies. Shape is (num_instances, num_bodies). + body_ids: The body indices to set the masses for. Defaults to None (all bodies). + env_ids: The environment indices to set the masses for. Defaults to None (all environments). + """ + # resolve indices + physx_env_ids = env_ids + if env_ids is None: + env_ids = slice(None) + physx_env_ids = self._ALL_INDICES + if body_ids is None: + body_ids = slice(None) + # broadcast env_ids if needed to allow double indexing + if env_ids != slice(None) and body_ids != slice(None): + env_ids = env_ids[:, None] + # set into internal buffers + self.data.body_mass[env_ids, body_ids] = masses + # set into simulation + self.root_view.set_masses(self.data.body_mass.cpu(), indices=physx_env_ids.cpu()) + + def set_coms( + self, + coms: torch.Tensor, + body_ids: Sequence[int] | None = None, + env_ids: Sequence[int] | None = None, + ): + """Set center of mass pose of all bodies in the simulation world frame. + + Args: + coms: Center of mass pose of all bodies. Shape is (num_instances, num_bodies, 7). + body_ids: The body indices to set the center of mass pose for. Defaults to None (all bodies). + env_ids: The environment indices to set the center of mass pose for. Defaults to None (all environments). + """ + # resolve indices + physx_env_ids = env_ids + if env_ids is None: + env_ids = slice(None) + physx_env_ids = self._ALL_INDICES + if body_ids is None: + body_ids = slice(None) + # broadcast env_ids if needed to allow double indexing + if env_ids != slice(None) and body_ids != slice(None): + env_ids = env_ids[:, None] + # set into internal buffers + self.data.body_com_pose_b[env_ids, body_ids] = coms + # set into simulation + self.root_view.set_coms(self.data.body_com_pose_b.cpu(), indices=physx_env_ids.cpu()) + + def set_inertias( + self, + inertias: torch.Tensor, + body_ids: Sequence[int] | None = None, + env_ids: Sequence[int] | None = None, + ): + """Set inertias of all bodies in the simulation world frame. + + Args: + inertias: Inertias of all bodies. Shape is (num_instances, num_bodies, 9). + body_ids: The body indices to set the inertias for. Defaults to None (all bodies). + env_ids: The environment indices to set the inertias for. Defaults to None (all environments). + """ + # resolve indices + physx_env_ids = env_ids + if env_ids is None: + env_ids = slice(None) + physx_env_ids = self._ALL_INDICES + if body_ids is None: + body_ids = slice(None) + # broadcast env_ids if needed to allow double indexing + if env_ids != slice(None) and body_ids != slice(None): + env_ids = env_ids[:, None] + # set into internal buffers + self.data.body_inertia[env_ids, body_ids] = inertias + # set into simulation + self.root_view.set_inertias(self.data.body_inertia.cpu(), indices=physx_env_ids.cpu()) + + def set_external_force_and_torque( + self, + forces: torch.Tensor, + torques: torch.Tensor, + positions: torch.Tensor | None = None, + body_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | None = None, + is_global: bool = False, + ): + """Set external force and torque to apply on the asset's bodies in their local frame. + + For many applications, we want to keep the applied external force on rigid bodies constant over a period of + time (for instance, during the policy control). This function allows us to store the external force and torque + into buffers which are then applied to the simulation at every step. Optionally, set the position to apply the + external wrench at (in the local link frame of the bodies). + + .. caution:: + If the function is called with empty forces and torques, then this function disables the application + of external wrench to the simulation. + + .. code-block:: python + + # example of disabling external wrench + asset.set_external_force_and_torque(forces=torch.zeros(0, 3), torques=torch.zeros(0, 3)) + + .. caution:: + If the function is called consecutively with and with different values for ``is_global``, then the + all the external wrenches will be applied in the frame specified by the last call. + + .. code-block:: python + + # example of setting external wrench in the global frame + asset.set_external_force_and_torque(forces=torch.ones(1, 1, 3), env_ids=[0], is_global=True) + # example of setting external wrench in the link frame + asset.set_external_force_and_torque(forces=torch.ones(1, 1, 3), env_ids=[1], is_global=False) + # Both environments will have the external wrenches applied in the link frame + + .. note:: + This function does not apply the external wrench to the simulation. It only fills the buffers with + the desired values. To apply the external wrench, call the :meth:`write_data_to_sim` function + right before the simulation step. + + Args: + forces: External forces in bodies' local frame. Shape is (len(env_ids), len(body_ids), 3) or (num_instances, num_bodies, 3). + torques: External torques in bodies' local frame. Shape is (len(env_ids), len(body_ids), 3) or (num_instances, num_bodies, 3). + positions: Positions to apply external wrench. Shape is (len(env_ids), len(body_ids), 3) or (num_instances, num_bodies, 3). Defaults to None. + body_ids: Body indices to apply external wrench to. Defaults to None (all bodies). + env_ids: Environment indices to apply external wrench to. Defaults to None (all instances). + is_global: Whether to apply the external wrench in the global frame. Defaults to False. If set to False, + the external wrench is applied in the link frame of the articulations' bodies. + """ + logger.warning( + "The function 'set_external_force_and_torque' will be deprecated in a future release. Please" + " use 'permanent_wrench_composer.set_forces_and_torques' instead." + ) + if forces is None and torques is None: + logger.warning("No forces or torques provided. No permanent external wrench will be applied.") + + # resolve all indices + # -- env_ids + if env_ids is None: + env_ids = self._ALL_INDICES_WP + elif not isinstance(env_ids, torch.Tensor): + env_ids = wp.array(env_ids, dtype=wp.int32, device=self.device) + else: + env_ids = wp.from_torch(env_ids.to(torch.int32), dtype=wp.int32) + # -- body_ids + if body_ids is None: + body_ids = self._ALL_BODY_INDICES_WP + elif isinstance(body_ids, slice): + body_ids = wp.from_torch( + torch.arange(self.num_bodies, dtype=torch.int32, device=self.device)[body_ids], dtype=wp.int32 + ) + elif not isinstance(body_ids, torch.Tensor): + body_ids = wp.array(body_ids, dtype=wp.int32, device=self.device) + else: + body_ids = wp.from_torch(body_ids.to(torch.int32), dtype=wp.int32) + + # Write to wrench composer + self._permanent_wrench_composer.set_forces_and_torques( + forces=wp.from_torch(forces, dtype=wp.vec3f) if forces is not None else None, + torques=wp.from_torch(torques, dtype=wp.vec3f) if torques is not None else None, + positions=wp.from_torch(positions, dtype=wp.vec3f) if positions is not None else None, + body_ids=body_ids, + env_ids=env_ids, + is_global=is_global, + ) + + def set_joint_position_target( + self, + target: torch.Tensor, + joint_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | None = None, + ): + """Set joint position targets into internal buffers. + + This function does not apply the joint targets to the simulation. It only fills the buffers with + the desired values. To apply the joint targets, call the :meth:`write_data_to_sim` function. + + Args: + target: Joint position targets. Shape is (len(env_ids), len(joint_ids)). + joint_ids: The joint indices to set the targets for. Defaults to None (all joints). + env_ids: The environment indices to set the targets for. Defaults to None (all environments). + """ + # resolve indices + if env_ids is None: + env_ids = slice(None) + if joint_ids is None: + joint_ids = slice(None) + # broadcast env_ids if needed to allow double indexing + if env_ids != slice(None) and joint_ids != slice(None): + env_ids = env_ids[:, None] + # set targets + self.data.joint_pos_target[env_ids, joint_ids] = target + + def set_joint_velocity_target( + self, + target: torch.Tensor, + joint_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | None = None, + ): + """Set joint velocity targets into internal buffers. + + This function does not apply the joint targets to the simulation. It only fills the buffers with + the desired values. To apply the joint targets, call the :meth:`write_data_to_sim` function. + + Args: + target: Joint velocity targets. Shape is (len(env_ids), len(joint_ids)) or (num_instances, num_joints). + joint_ids: The joint indices to set the targets for. Defaults to None (all joints). + env_ids: The environment indices to set the targets for. Defaults to None (all environments). + """ + # resolve indices + if env_ids is None: + env_ids = slice(None) + if joint_ids is None: + joint_ids = slice(None) + # broadcast env_ids if needed to allow double indexing + if env_ids != slice(None) and joint_ids != slice(None): + env_ids = env_ids[:, None] + # set targets + self.data.joint_vel_target[env_ids, joint_ids] = target + + def set_joint_effort_target( + self, + target: torch.Tensor, + joint_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | None = None, + ): + """Set joint efforts into internal buffers. + + This function does not apply the joint targets to the simulation. It only fills the buffers with + the desired values. To apply the joint targets, call the :meth:`write_data_to_sim` function. + + Args: + target: Joint effort targets. Shape is (len(env_ids), len(joint_ids)) or (num_instances, num_joints). + joint_ids: The joint indices to set the targets for. Defaults to None (all joints). + env_ids: The environment indices to set the targets for. Defaults to None (all environments). + """ + # resolve indices + if env_ids is None: + env_ids = slice(None) + if joint_ids is None: + joint_ids = slice(None) + # broadcast env_ids if needed to allow double indexing + if env_ids != slice(None) and joint_ids != slice(None): + env_ids = env_ids[:, None] + # set targets + self.data.joint_effort_target[env_ids, joint_ids] = target + + """ + Operations - Tendons. + """ + + def set_fixed_tendon_stiffness( + self, + stiffness: torch.Tensor, + fixed_tendon_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | None = None, + ): + """Set fixed tendon stiffness into internal buffers. + + This function does not apply the tendon stiffness to the simulation. It only fills the buffers with + the desired values. To apply the tendon stiffness, call the + :meth:`write_fixed_tendon_properties_to_sim` method. + + Args: + stiffness: Fixed tendon stiffness. Shape is (len(env_ids), len(fixed_tendon_ids)). + fixed_tendon_ids: The tendon indices to set the stiffness for. Defaults to None (all fixed tendons). + env_ids: The environment indices to set the stiffness for. Defaults to None (all environments). + """ + # resolve indices + if env_ids is None: + env_ids = slice(None) + if fixed_tendon_ids is None: + fixed_tendon_ids = slice(None) + if env_ids != slice(None) and fixed_tendon_ids != slice(None): + env_ids = env_ids[:, None] + # set stiffness + self.data.fixed_tendon_stiffness[env_ids, fixed_tendon_ids] = stiffness + + def set_fixed_tendon_damping( + self, + damping: torch.Tensor, + fixed_tendon_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | None = None, + ): + """Set fixed tendon damping into internal buffers. + + This function does not apply the tendon damping to the simulation. It only fills the buffers with + the desired values. To apply the tendon damping, call the :meth:`write_fixed_tendon_properties_to_sim` function. + + Args: + damping: Fixed tendon damping. Shape is (len(env_ids), len(fixed_tendon_ids)). + fixed_tendon_ids: The tendon indices to set the damping for. Defaults to None (all fixed tendons). + env_ids: The environment indices to set the damping for. Defaults to None (all environments). + """ + # resolve indices + if env_ids is None: + env_ids = slice(None) + if fixed_tendon_ids is None: + fixed_tendon_ids = slice(None) + if env_ids != slice(None) and fixed_tendon_ids != slice(None): + env_ids = env_ids[:, None] + # set damping + self.data.fixed_tendon_damping[env_ids, fixed_tendon_ids] = damping + + def set_fixed_tendon_limit_stiffness( + self, + limit_stiffness: torch.Tensor, + fixed_tendon_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | None = None, + ): + """Set fixed tendon limit stiffness efforts into internal buffers. + + This function does not apply the tendon limit stiffness to the simulation. It only fills the buffers with + the desired values. To apply the tendon limit stiffness, call the + :meth:`write_fixed_tendon_properties_to_sim` method. + + Args: + limit_stiffness: Fixed tendon limit stiffness. Shape is (len(env_ids), len(fixed_tendon_ids)). + fixed_tendon_ids: The tendon indices to set the limit stiffness for. Defaults to None (all fixed tendons). + env_ids: The environment indices to set the limit stiffness for. Defaults to None (all environments). + """ + # resolve indices + if env_ids is None: + env_ids = slice(None) + if fixed_tendon_ids is None: + fixed_tendon_ids = slice(None) + if env_ids != slice(None) and fixed_tendon_ids != slice(None): + env_ids = env_ids[:, None] + # set limit_stiffness + self.data.fixed_tendon_limit_stiffness[env_ids, fixed_tendon_ids] = limit_stiffness + + def set_fixed_tendon_position_limit( + self, + limit: torch.Tensor, + fixed_tendon_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | None = None, + ): + """Set fixed tendon limit efforts into internal buffers. + + This function does not apply the tendon limit to the simulation. It only fills the buffers with + the desired values. To apply the tendon limit, call the :meth:`write_fixed_tendon_properties_to_sim` function. + + Args: + limit: Fixed tendon limit. Shape is (len(env_ids), len(fixed_tendon_ids)). + fixed_tendon_ids: The tendon indices to set the limit for. Defaults to None (all fixed tendons). + env_ids: The environment indices to set the limit for. Defaults to None (all environments). + """ + # resolve indices + if env_ids is None: + env_ids = slice(None) + if fixed_tendon_ids is None: + fixed_tendon_ids = slice(None) + if env_ids != slice(None) and fixed_tendon_ids != slice(None): + env_ids = env_ids[:, None] + # set limit + self.data.fixed_tendon_pos_limits[env_ids, fixed_tendon_ids] = limit + + def set_fixed_tendon_rest_length( + self, + rest_length: torch.Tensor, + fixed_tendon_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | None = None, + ): + """Set fixed tendon rest length efforts into internal buffers. + + This function does not apply the tendon rest length to the simulation. It only fills the buffers with + the desired values. To apply the tendon rest length, call the + :meth:`write_fixed_tendon_properties_to_sim` method. + + Args: + rest_length: Fixed tendon rest length. Shape is (len(env_ids), len(fixed_tendon_ids)). + fixed_tendon_ids: The tendon indices to set the rest length for. Defaults to None (all fixed tendons). + env_ids: The environment indices to set the rest length for. Defaults to None (all environments). + """ + # resolve indices + if env_ids is None: + env_ids = slice(None) + if fixed_tendon_ids is None: + fixed_tendon_ids = slice(None) + if env_ids != slice(None) and fixed_tendon_ids != slice(None): + env_ids = env_ids[:, None] + # set rest_length + self.data.fixed_tendon_rest_length[env_ids, fixed_tendon_ids] = rest_length + + def set_fixed_tendon_offset( + self, + offset: torch.Tensor, + fixed_tendon_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | None = None, + ): + """Set fixed tendon offset efforts into internal buffers. + + This function does not apply the tendon offset to the simulation. It only fills the buffers with + the desired values. To apply the tendon offset, call the :meth:`write_fixed_tendon_properties_to_sim` function. + + Args: + offset: Fixed tendon offset. Shape is (len(env_ids), len(fixed_tendon_ids)). + fixed_tendon_ids: The tendon indices to set the offset for. Defaults to None (all fixed tendons). + env_ids: The environment indices to set the offset for. Defaults to None (all environments). + """ + # resolve indices + if env_ids is None: + env_ids = slice(None) + if fixed_tendon_ids is None: + fixed_tendon_ids = slice(None) + if env_ids != slice(None) and fixed_tendon_ids != slice(None): + env_ids = env_ids[:, None] + # set offset + self.data.fixed_tendon_offset[env_ids, fixed_tendon_ids] = offset + + def write_fixed_tendon_properties_to_sim( + self, + fixed_tendon_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | None = None, + ): + """Write fixed tendon properties into the simulation. + + Args: + fixed_tendon_ids: The fixed tendon indices to set the limits for. Defaults to None (all fixed tendons). + env_ids: The environment indices to set the limits for. Defaults to None (all environments). + """ + # resolve indices + physx_env_ids = env_ids + if env_ids is None: + physx_env_ids = self._ALL_INDICES + if fixed_tendon_ids is None: + fixed_tendon_ids = slice(None) + + # set into simulation + self.root_view.set_fixed_tendon_properties( + self.data.fixed_tendon_stiffness, + self.data.fixed_tendon_damping, + self.data.fixed_tendon_limit_stiffness, + self.data.fixed_tendon_pos_limits, + self.data.fixed_tendon_rest_length, + self.data.fixed_tendon_offset, + indices=physx_env_ids, + ) + + def set_spatial_tendon_stiffness( + self, + stiffness: torch.Tensor, + spatial_tendon_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | None = None, + ): + """Set spatial tendon stiffness into internal buffers. + + This function does not apply the tendon stiffness to the simulation. It only fills the buffers with + the desired values. To apply the tendon stiffness, call the + :meth:`write_spatial_tendon_properties_to_sim` method. + + Args: + stiffness: Spatial tendon stiffness. Shape is (len(env_ids), len(spatial_tendon_ids)). + spatial_tendon_ids: The tendon indices to set the stiffness for. Defaults to None (all spatial tendons). + env_ids: The environment indices to set the stiffness for. Defaults to None (all environments). + """ + # resolve indices + if env_ids is None: + env_ids = slice(None) + if spatial_tendon_ids is None: + spatial_tendon_ids = slice(None) + if env_ids != slice(None) and spatial_tendon_ids != slice(None): + env_ids = env_ids[:, None] + # set stiffness + self.data.spatial_tendon_stiffness[env_ids, spatial_tendon_ids] = stiffness + + def set_spatial_tendon_damping( + self, + damping: torch.Tensor, + spatial_tendon_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | None = None, + ): + """Set spatial tendon damping into internal buffers. + + This function does not apply the tendon damping to the simulation. It only fills the buffers with + the desired values. To apply the tendon damping, call the :meth:`write_spatial_tendon_properties_to_sim` function. + + + Args: + damping: Spatial tendon damping. Shape is (len(env_ids), len(spatial_tendon_ids)) or (num_instances, num_spatial_tendons). + spatial_tendon_ids: The tendon indices to set the damping for. Defaults to None (all spatial tendons). + env_ids: The environment indices to set the damping for. Defaults to None (all environments). + """ + # resolve indices + if env_ids is None: + env_ids = slice(None) + if spatial_tendon_ids is None: + spatial_tendon_ids = slice(None) + if env_ids != slice(None) and spatial_tendon_ids != slice(None): + env_ids = env_ids[:, None] + # set damping + self.data.spatial_tendon_damping[env_ids, spatial_tendon_ids] = damping + + def set_spatial_tendon_limit_stiffness( + self, + limit_stiffness: torch.Tensor, + spatial_tendon_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | None = None, + ): + """Set spatial tendon limit stiffness into internal buffers. + + This function does not apply the tendon limit stiffness to the simulation. It only fills the buffers with + the desired values. To apply the tendon limit stiffness, call the + :meth:`write_spatial_tendon_properties_to_sim` method. + + Args: + limit_stiffness: Spatial tendon limit stiffness. Shape is (len(env_ids), len(spatial_tendon_ids)). + spatial_tendon_ids: The tendon indices to set the limit stiffness for. Defaults to None, + which means all spatial tendons. + env_ids: The environment indices to set the limit stiffness for. Defaults to None (all environments). + """ + # resolve indices + if env_ids is None: + env_ids = slice(None) + if spatial_tendon_ids is None: + spatial_tendon_ids = slice(None) + if env_ids != slice(None) and spatial_tendon_ids != slice(None): + env_ids = env_ids[:, None] + # set limit stiffness + self.data.spatial_tendon_limit_stiffness[env_ids, spatial_tendon_ids] = limit_stiffness + + def set_spatial_tendon_offset( + self, + offset: torch.Tensor, + spatial_tendon_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | None = None, + ): + """Set spatial tendon offset efforts into internal buffers. + + This function does not apply the tendon offset to the simulation. It only fills the buffers with + the desired values. To apply the tendon offset, call the + :meth:`write_spatial_tendon_properties_to_sim` method. + + Args: + offset: Spatial tendon offset. Shape is (len(env_ids), len(spatial_tendon_ids)). + spatial_tendon_ids: The tendon indices to set the offset for. Defaults to None (all spatial tendons). + env_ids: The environment indices to set the offset for. Defaults to None (all environments). + """ + # resolve indices + if env_ids is None: + env_ids = slice(None) + if spatial_tendon_ids is None: + spatial_tendon_ids = slice(None) + if env_ids != slice(None) and spatial_tendon_ids != slice(None): + env_ids = env_ids[:, None] + # set offset + self.data.spatial_tendon_offset[env_ids, spatial_tendon_ids] = offset + + def write_spatial_tendon_properties_to_sim( + self, + spatial_tendon_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | None = None, + ): + """Write spatial tendon properties into the simulation. + + Args: + spatial_tendon_ids: The spatial tendon indices to set the properties for. Defaults to None, + which means all spatial tendons. + env_ids: The environment indices to set the properties for. Defaults to None, + which means all environments. + """ + # resolve indices + physx_env_ids = env_ids + if env_ids is None: + physx_env_ids = self._ALL_INDICES + if spatial_tendon_ids is None: + spatial_tendon_ids = slice(None) + + # set into simulation + self.root_view.set_spatial_tendon_properties( + self.data.spatial_tendon_stiffness, + self.data.spatial_tendon_damping, + self.data.spatial_tendon_limit_stiffness, + self.data.spatial_tendon_offset, + indices=physx_env_ids, + ) + + """ + Internal helper. + """ + + def _initialize_impl(self): + # obtain global simulation view + self._physics_sim_view = SimulationManager.get_physics_sim_view() + + if self.cfg.articulation_root_prim_path is not None: + # The articulation root prim path is specified explicitly, so we can just use this. + root_prim_path_expr = self.cfg.prim_path + self.cfg.articulation_root_prim_path + else: + # No articulation root prim path was specified, so we need to search + # for it. We search for this in the first environment and then + # create a regex that matches all environments. + first_env_matching_prim = sim_utils.find_first_matching_prim(self.cfg.prim_path) + if first_env_matching_prim is None: + raise RuntimeError(f"Failed to find prim for expression: '{self.cfg.prim_path}'.") + first_env_matching_prim_path = first_env_matching_prim.GetPath().pathString + + # Find all articulation root prims in the first environment. + first_env_root_prims = sim_utils.get_all_matching_child_prims( + first_env_matching_prim_path, + predicate=lambda prim: prim.HasAPI(UsdPhysics.ArticulationRootAPI), + traverse_instance_prims=False, + ) + if len(first_env_root_prims) == 0: + raise RuntimeError( + f"Failed to find an articulation when resolving '{first_env_matching_prim_path}'." + " Please ensure that the prim has 'USD ArticulationRootAPI' applied." + ) + if len(first_env_root_prims) > 1: + raise RuntimeError( + f"Failed to find a single articulation when resolving '{first_env_matching_prim_path}'." + f" Found multiple '{first_env_root_prims}' under '{first_env_matching_prim_path}'." + " Please ensure that there is only one articulation in the prim path tree." + ) + + # Now we convert the found articulation root from the first + # environment back into a regex that matches all environments. + first_env_root_prim_path = first_env_root_prims[0].GetPath().pathString + root_prim_path_relative_to_prim_path = first_env_root_prim_path[len(first_env_matching_prim_path) :] + root_prim_path_expr = self.cfg.prim_path + root_prim_path_relative_to_prim_path + + # -- articulation + self._root_view = self._physics_sim_view.create_articulation_view(root_prim_path_expr.replace(".*", "*")) + + # check if the articulation was created + if self.root_view._backend is None: + raise RuntimeError(f"Failed to create articulation at: {root_prim_path_expr}. Please check PhysX logs.") + + # log information about the articulation + logger.info(f"Articulation initialized at: {self.cfg.prim_path} with root '{root_prim_path_expr}'.") + logger.info(f"Is fixed root: {self.is_fixed_base}") + logger.info(f"Number of bodies: {self.num_bodies}") + logger.info(f"Body names: {self.body_names}") + logger.info(f"Number of joints: {self.num_joints}") + logger.info(f"Joint names: {self.joint_names}") + logger.info(f"Number of fixed tendons: {self.num_fixed_tendons}") + + # container for data access + self._data = ArticulationData(self.root_view, self.device) + + # create buffers + self._create_buffers() + # process configuration + self._process_cfg() + self._process_actuators_cfg() + self._process_tendons() + # validate configuration + self._validate_cfg() + # update the robot data + self.update(0.0) + # log joint information + self._log_articulation_info() + # Let the articulation data know that it is fully instantiated and ready to use. + self.data.is_primed = True + + def _create_buffers(self): + self._ALL_INDICES = torch.arange(self.num_instances, dtype=torch.long, device=self.device) + self._ALL_BODY_INDICES = torch.arange(self.num_bodies, dtype=torch.long, device=self.device) + self._ALL_INDICES_WP = wp.from_torch(self._ALL_INDICES.to(torch.int32), dtype=wp.int32) + self._ALL_BODY_INDICES_WP = wp.from_torch(self._ALL_BODY_INDICES.to(torch.int32), dtype=wp.int32) + + # external wrench composer + self._instantaneous_wrench_composer = WrenchComposer(self) + self._permanent_wrench_composer = WrenchComposer(self) + + # asset named data + self.data.joint_names = self.joint_names + self.data.body_names = self.body_names + # tendon names are set in _process_tendons function + + # -- joint commands (sent to the simulation after actuator processing) + self._joint_pos_target_sim = torch.zeros_like(self.data.joint_pos_target) + self._joint_vel_target_sim = torch.zeros_like(self.data.joint_pos_target) + self._joint_effort_target_sim = torch.zeros_like(self.data.joint_pos_target) + + # soft joint position limits (recommended not to be too close to limits). + joint_pos_mean = (self.data.joint_pos_limits[..., 0] + self.data.joint_pos_limits[..., 1]) / 2 + joint_pos_range = self.data.joint_pos_limits[..., 1] - self.data.joint_pos_limits[..., 0] + soft_limit_factor = self.cfg.soft_joint_pos_limit_factor + # add to data + self.data.soft_joint_pos_limits[..., 0] = joint_pos_mean - 0.5 * joint_pos_range * soft_limit_factor + self.data.soft_joint_pos_limits[..., 1] = joint_pos_mean + 0.5 * joint_pos_range * soft_limit_factor + + def _process_cfg(self): + """Post processing of configuration parameters.""" + # default state + # -- root state + # Note we cast to tuple to avoid torch/numpy type mismatch. + default_root_state = ( + tuple(self.cfg.init_state.pos) + + tuple(self.cfg.init_state.rot) + + tuple(self.cfg.init_state.lin_vel) + + tuple(self.cfg.init_state.ang_vel) + ) + default_root_state = torch.tensor(default_root_state, dtype=torch.float, device=self.device) + self.data.default_root_state = default_root_state.repeat(self.num_instances, 1) + + # -- joint state + # joint pos + indices_list, _, values_list = string_utils.resolve_matching_names_values( + self.cfg.init_state.joint_pos, self.joint_names + ) + self.data.default_joint_pos[:, indices_list] = torch.tensor(values_list, device=self.device) + # joint vel + indices_list, _, values_list = string_utils.resolve_matching_names_values( + self.cfg.init_state.joint_vel, self.joint_names + ) + self.data.default_joint_vel[:, indices_list] = torch.tensor(values_list, device=self.device) + + """ + Internal simulation callbacks. + """ + + def _invalidate_initialize_callback(self, event): + """Invalidates the scene elements.""" + # call parent + super()._invalidate_initialize_callback(event) + self._root_view = None + + """ + Internal helpers -- Actuators. + """ + + def _process_actuators_cfg(self): + """Process and apply articulation joint properties.""" + # create actuators + self.actuators = dict() + # flag for implicit actuators + # if this is false, we by-pass certain checks when doing actuator-related operations + self._has_implicit_actuators = False + + # iterate over all actuator configurations + for actuator_name, actuator_cfg in self.cfg.actuators.items(): + # type annotation for type checkers + actuator_cfg: ActuatorBaseCfg + # create actuator group + joint_ids, joint_names = self.find_joints(actuator_cfg.joint_names_expr) + # check if any joints are found + if len(joint_names) == 0: + raise ValueError( + f"No joints found for actuator group: {actuator_name} with joint name expression:" + f" {actuator_cfg.joint_names_expr}." + ) + # resolve joint indices + # we pass a slice if all joints are selected to avoid indexing overhead + if len(joint_names) == self.num_joints: + joint_ids = slice(None) + else: + joint_ids = torch.tensor(joint_ids, device=self.device) + # create actuator collection + # note: for efficiency avoid indexing when over all indices + actuator: ActuatorBase = actuator_cfg.class_type( + cfg=actuator_cfg, + joint_names=joint_names, + joint_ids=joint_ids, + num_envs=self.num_instances, + device=self.device, + stiffness=self._data.joint_stiffness[:, joint_ids], + damping=self._data.joint_damping[:, joint_ids], + armature=self._data.joint_armature[:, joint_ids], + friction=self._data.joint_friction_coeff[:, joint_ids], + dynamic_friction=self._data.joint_dynamic_friction_coeff[:, joint_ids], + viscous_friction=self._data.joint_viscous_friction_coeff[:, joint_ids], + effort_limit=self._data.joint_effort_limits[:, joint_ids].clone(), + velocity_limit=self._data.joint_vel_limits[:, joint_ids], + ) + # log information on actuator groups + model_type = "implicit" if actuator.is_implicit_model else "explicit" + logger.info( + f"Actuator collection: {actuator_name} with model '{actuator_cfg.class_type.__name__}'" + f" (type: {model_type}) and joint names: {joint_names} [{joint_ids}]." + ) + # store actuator group + self.actuators[actuator_name] = actuator + # set the passed gains and limits into the simulation + if isinstance(actuator, ImplicitActuator): + self._has_implicit_actuators = True + # the gains and limits are set into the simulation since actuator model is implicit + self.write_joint_stiffness_to_sim(actuator.stiffness, joint_ids=actuator.joint_indices) + self.write_joint_damping_to_sim(actuator.damping, joint_ids=actuator.joint_indices) + else: + # the gains and limits are processed by the actuator model + # we set gains to zero, and torque limit to a high value in simulation to avoid any interference + self.write_joint_stiffness_to_sim(0.0, joint_ids=actuator.joint_indices) + self.write_joint_damping_to_sim(0.0, joint_ids=actuator.joint_indices) + + # Set common properties into the simulation + self.write_joint_effort_limit_to_sim(actuator.effort_limit_sim, joint_ids=actuator.joint_indices) + self.write_joint_velocity_limit_to_sim(actuator.velocity_limit_sim, joint_ids=actuator.joint_indices) + self.write_joint_armature_to_sim(actuator.armature, joint_ids=actuator.joint_indices) + self.write_joint_friction_coefficient_to_sim(actuator.friction, joint_ids=actuator.joint_indices) + if get_isaac_sim_version().major >= 5: + self.write_joint_dynamic_friction_coefficient_to_sim( + actuator.dynamic_friction, joint_ids=actuator.joint_indices + ) + self.write_joint_viscous_friction_coefficient_to_sim( + actuator.viscous_friction, joint_ids=actuator.joint_indices + ) + + # Store the configured values from the actuator model + # note: this is the value configured in the actuator model (for implicit and explicit actuators) + self._data.joint_stiffness[:, actuator.joint_indices] = actuator.stiffness + self._data.joint_damping[:, actuator.joint_indices] = actuator.damping + self._data.joint_armature[:, actuator.joint_indices] = actuator.armature + self._data.joint_friction_coeff[:, actuator.joint_indices] = actuator.friction + if get_isaac_sim_version().major >= 5: + self._data.joint_dynamic_friction_coeff[:, actuator.joint_indices] = actuator.dynamic_friction + self._data.joint_viscous_friction_coeff[:, actuator.joint_indices] = actuator.viscous_friction + + # perform some sanity checks to ensure actuators are prepared correctly + total_act_joints = sum(actuator.num_joints for actuator in self.actuators.values()) + if total_act_joints != (self.num_joints - self.num_fixed_tendons): + logger.warning( + "Not all actuators are configured! Total number of actuated joints not equal to number of" + f" joints available: {total_act_joints} != {self.num_joints - self.num_fixed_tendons}." + ) + + if self.cfg.actuator_value_resolution_debug_print: + t = PrettyTable(["Group", "Property", "Name", "ID", "USD Value", "ActutatorCfg Value", "Applied"]) + for actuator_group, actuator in self.actuators.items(): + group_count = 0 + for property, resolution_details in actuator.joint_property_resolution_table.items(): + for prop_idx, resolution_detail in enumerate(resolution_details): + actuator_group_str = actuator_group if group_count == 0 else "" + property_str = property if prop_idx == 0 else "" + fmt = [f"{v:.2e}" if isinstance(v, float) else str(v) for v in resolution_detail] + t.add_row([actuator_group_str, property_str, *fmt]) + group_count += 1 + logger.warning(f"\nActuatorCfg-USD Value Discrepancy Resolution (matching values are skipped): \n{t}") + + def _process_tendons(self): + """Process fixed and spatial tendons.""" + # create a list to store the fixed tendon names + self._fixed_tendon_names = list() + self._spatial_tendon_names = list() + # parse fixed tendons properties if they exist + if self.num_fixed_tendons > 0 or self.num_spatial_tendons > 0: + joint_paths = self.root_view.dof_paths[0] + + # iterate over all joints to find tendons attached to them + for j in range(self.num_joints): + usd_joint_path = joint_paths[j] + # check whether joint has tendons - tendon name follows the joint name it is attached to + joint = UsdPhysics.Joint.Get(self.stage, usd_joint_path) + if joint.GetPrim().HasAPI(PhysxSchema.PhysxTendonAxisRootAPI): + joint_name = usd_joint_path.split("/")[-1] + self._fixed_tendon_names.append(joint_name) + elif joint.GetPrim().HasAPI(PhysxSchema.PhysxTendonAttachmentRootAPI) or joint.GetPrim().HasAPI( + PhysxSchema.PhysxTendonAttachmentLeafAPI + ): + joint_name = usd_joint_path.split("/")[-1] + self._spatial_tendon_names.append(joint_name) + + # store the fixed tendon names + self._data.fixed_tendon_names = self._fixed_tendon_names + self._data.spatial_tendon_names = self._spatial_tendon_names + + def _apply_actuator_model(self): + """Processes joint commands for the articulation by forwarding them to the actuators. + + The actions are first processed using actuator models. Depending on the robot configuration, + the actuator models compute the joint level simulation commands and sets them into the PhysX buffers. + """ + # process actions per group + for actuator in self.actuators.values(): + # prepare input for actuator model based on cached data + # TODO : A tensor dict would be nice to do the indexing of all tensors together + control_action = ArticulationActions( + joint_positions=self._data.joint_pos_target[:, actuator.joint_indices], + joint_velocities=self._data.joint_vel_target[:, actuator.joint_indices], + joint_efforts=self._data.joint_effort_target[:, actuator.joint_indices], + joint_indices=actuator.joint_indices, + ) + # compute joint command from the actuator model + control_action = actuator.compute( + control_action, + joint_pos=self._data.joint_pos[:, actuator.joint_indices], + joint_vel=self._data.joint_vel[:, actuator.joint_indices], + ) + # update targets (these are set into the simulation) + if control_action.joint_positions is not None: + self._joint_pos_target_sim[:, actuator.joint_indices] = control_action.joint_positions + if control_action.joint_velocities is not None: + self._joint_vel_target_sim[:, actuator.joint_indices] = control_action.joint_velocities + if control_action.joint_efforts is not None: + self._joint_effort_target_sim[:, actuator.joint_indices] = control_action.joint_efforts + # update state of the actuator model + # -- torques + self._data.computed_torque[:, actuator.joint_indices] = actuator.computed_effort + self._data.applied_torque[:, actuator.joint_indices] = actuator.applied_effort + # -- actuator data + self._data.soft_joint_vel_limits[:, actuator.joint_indices] = actuator.velocity_limit + # TODO: find a cleaner way to handle gear ratio. Only needed for variable gear ratio actuators. + if hasattr(actuator, "gear_ratio"): + self._data.gear_ratio[:, actuator.joint_indices] = actuator.gear_ratio + + + """ + Internal helpers -- Debugging. + """ + + def _validate_cfg(self): + """Validate the configuration after processing. + + .. note:: + This function should be called only after the configuration has been processed and the buffers have been + created. Otherwise, some settings that are altered during processing may not be validated. + For instance, the actuator models may change the joint max velocity limits. + """ + # check that the default values are within the limits + joint_pos_limits = self.root_view.get_dof_limits()[0].to(self.device) + out_of_range = self._data.default_joint_pos[0] < joint_pos_limits[:, 0] + out_of_range |= self._data.default_joint_pos[0] > joint_pos_limits[:, 1] + violated_indices = torch.nonzero(out_of_range, as_tuple=False).squeeze(-1) + # throw error if any of the default joint positions are out of the limits + if len(violated_indices) > 0: + # prepare message for violated joints + msg = "The following joints have default positions out of the limits: \n" + for idx in violated_indices: + joint_name = self.data.joint_names[idx] + joint_limit = joint_pos_limits[idx] + joint_pos = self.data.default_joint_pos[0, idx] + # add to message + msg += f"\t- '{joint_name}': {joint_pos:.3f} not in [{joint_limit[0]:.3f}, {joint_limit[1]:.3f}]\n" + raise ValueError(msg) + + # check that the default joint velocities are within the limits + joint_max_vel = self.root_view.get_dof_max_velocities()[0].to(self.device) + out_of_range = torch.abs(self._data.default_joint_vel[0]) > joint_max_vel + violated_indices = torch.nonzero(out_of_range, as_tuple=False).squeeze(-1) + if len(violated_indices) > 0: + # prepare message for violated joints + msg = "The following joints have default velocities out of the limits: \n" + for idx in violated_indices: + joint_name = self.data.joint_names[idx] + joint_limit = [-joint_max_vel[idx], joint_max_vel[idx]] + joint_vel = self.data.default_joint_vel[0, idx] + # add to message + msg += f"\t- '{joint_name}': {joint_vel:.3f} not in [{joint_limit[0]:.3f}, {joint_limit[1]:.3f}]\n" + raise ValueError(msg) + + def _log_articulation_info(self): + """Log information about the articulation. + + .. note:: We purposefully read the values from the simulator to ensure that the values are configured as expected. + """ + + # define custom formatters for large numbers and limit ranges + def format_large_number(_, v: float) -> str: + """Format large numbers using scientific notation.""" + if abs(v) >= 1e3: + return f"{v:.1e}" + else: + return f"{v:.3f}" + + def format_limits(_, v: tuple[float, float]) -> str: + """Format limit ranges using scientific notation.""" + if abs(v[0]) >= 1e3 or abs(v[1]) >= 1e3: + return f"[{v[0]:.1e}, {v[1]:.1e}]" + else: + return f"[{v[0]:.3f}, {v[1]:.3f}]" + + # read out all joint parameters from simulation + # -- gains + stiffnesses = self.root_view.get_dof_stiffnesses()[0].tolist() + dampings = self.root_view.get_dof_dampings()[0].tolist() + # -- properties + armatures = self.root_view.get_dof_armatures()[0].tolist() + if get_isaac_sim_version().major < 5: + static_frictions = self.root_view.get_dof_friction_coefficients()[0].tolist() + else: + friction_props = self.root_view.get_dof_friction_properties() + static_frictions = friction_props[:, :, 0][0].tolist() + dynamic_frictions = friction_props[:, :, 1][0].tolist() + viscous_frictions = friction_props[:, :, 2][0].tolist() + # -- limits + position_limits = self.root_view.get_dof_limits()[0].tolist() + velocity_limits = self.root_view.get_dof_max_velocities()[0].tolist() + effort_limits = self.root_view.get_dof_max_forces()[0].tolist() + # create table for term information + joint_table = PrettyTable() + joint_table.title = f"Simulation Joint Information (Prim path: {self.cfg.prim_path})" + # build field names based on Isaac Sim version + field_names = ["Index", "Name", "Stiffness", "Damping", "Armature"] + if get_isaac_sim_version().major < 5: + field_names.append("Static Friction") + else: + field_names.extend(["Static Friction", "Dynamic Friction", "Viscous Friction"]) + field_names.extend(["Position Limits", "Velocity Limits", "Effort Limits"]) + joint_table.field_names = field_names + + # apply custom formatters to numeric columns + joint_table.custom_format["Stiffness"] = format_large_number + joint_table.custom_format["Damping"] = format_large_number + joint_table.custom_format["Armature"] = format_large_number + joint_table.custom_format["Static Friction"] = format_large_number + if get_isaac_sim_version().major >= 5: + joint_table.custom_format["Dynamic Friction"] = format_large_number + joint_table.custom_format["Viscous Friction"] = format_large_number + joint_table.custom_format["Position Limits"] = format_limits + joint_table.custom_format["Velocity Limits"] = format_large_number + joint_table.custom_format["Effort Limits"] = format_large_number + + # set alignment of table columns + joint_table.align["Name"] = "l" + # add info on each term + for index, name in enumerate(self.joint_names): + # build row data based on Isaac Sim version + row_data = [index, name, stiffnesses[index], dampings[index], armatures[index]] + if get_isaac_sim_version().major < 5: + row_data.append(static_frictions[index]) + else: + row_data.extend([static_frictions[index], dynamic_frictions[index], viscous_frictions[index]]) + row_data.extend([position_limits[index], velocity_limits[index], effort_limits[index]]) + # add row to table + joint_table.add_row(row_data) + # convert table to string + logger.info(f"Simulation parameters for joints in {self.cfg.prim_path}:\n" + joint_table.get_string()) + + # read out all fixed tendon parameters from simulation + if self.num_fixed_tendons > 0: + # -- gains + ft_stiffnesses = self.root_view.get_fixed_tendon_stiffnesses()[0].tolist() + ft_dampings = self.root_view.get_fixed_tendon_dampings()[0].tolist() + # -- limits + ft_limit_stiffnesses = self.root_view.get_fixed_tendon_limit_stiffnesses()[0].tolist() + ft_limits = self.root_view.get_fixed_tendon_limits()[0].tolist() + ft_rest_lengths = self.root_view.get_fixed_tendon_rest_lengths()[0].tolist() + ft_offsets = self.root_view.get_fixed_tendon_offsets()[0].tolist() + # create table for term information + tendon_table = PrettyTable() + tendon_table.title = f"Simulation Fixed Tendon Information (Prim path: {self.cfg.prim_path})" + tendon_table.field_names = [ + "Index", + "Stiffness", + "Damping", + "Limit Stiffness", + "Limits", + "Rest Length", + "Offset", + ] + tendon_table.float_format = ".3" + + # apply custom formatters to tendon table columns + tendon_table.custom_format["Stiffness"] = format_large_number + tendon_table.custom_format["Damping"] = format_large_number + tendon_table.custom_format["Limit Stiffness"] = format_large_number + tendon_table.custom_format["Limits"] = format_limits + tendon_table.custom_format["Rest Length"] = format_large_number + tendon_table.custom_format["Offset"] = format_large_number + + # add info on each term + for index in range(self.num_fixed_tendons): + tendon_table.add_row( + [ + index, + ft_stiffnesses[index], + ft_dampings[index], + ft_limit_stiffnesses[index], + ft_limits[index], + ft_rest_lengths[index], + ft_offsets[index], + ] + ) + # convert table to string + logger.info( + f"Simulation parameters for fixed tendons in {self.cfg.prim_path}:\n" + tendon_table.get_string() + ) + + if self.num_spatial_tendons > 0: + # -- gains + st_stiffnesses = self.root_view.get_spatial_tendon_stiffnesses()[0].tolist() + st_dampings = self.root_view.get_spatial_tendon_dampings()[0].tolist() + # -- limits + st_limit_stiffnesses = self.root_view.get_spatial_tendon_limit_stiffnesses()[0].tolist() + st_offsets = self.root_view.get_spatial_tendon_offsets()[0].tolist() + # create table for term information + tendon_table = PrettyTable() + tendon_table.title = f"Simulation Spatial Tendon Information (Prim path: {self.cfg.prim_path})" + tendon_table.field_names = [ + "Index", + "Stiffness", + "Damping", + "Limit Stiffness", + "Offset", + ] + tendon_table.float_format = ".3" + # add info on each term + for index in range(self.num_spatial_tendons): + tendon_table.add_row( + [ + index, + st_stiffnesses[index], + st_dampings[index], + st_limit_stiffnesses[index], + st_offsets[index], + ] + ) + # convert table to string + logger.info( + f"Simulation parameters for spatial tendons in {self.cfg.prim_path}:\n" + tendon_table.get_string() + ) + + """ + Deprecated methods. + """ + + def write_joint_friction_to_sim( + self, + joint_friction: torch.Tensor | float, + joint_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | None = None, + ): + """Write joint friction coefficients into the simulation. + + .. deprecated:: 2.1.0 + Please use :meth:`write_joint_friction_coefficient_to_sim` instead. + """ + logger.warning( + "The function 'write_joint_friction_to_sim' will be deprecated in a future release. Please" + " use 'write_joint_friction_coefficient_to_sim' instead." + ) + self.write_joint_friction_coefficient_to_sim(joint_friction, joint_ids=joint_ids, env_ids=env_ids) + + def write_joint_limits_to_sim( + self, + limits: torch.Tensor | float, + joint_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | None = None, + warn_limit_violation: bool = True, + ): + """Write joint limits into the simulation. + + .. deprecated:: 2.1.0 + Please use :meth:`write_joint_position_limit_to_sim` instead. + """ + logger.warning( + "The function 'write_joint_limits_to_sim' will be deprecated in a future release. Please" + " use 'write_joint_position_limit_to_sim' instead." + ) + self.write_joint_position_limit_to_sim( + limits, joint_ids=joint_ids, env_ids=env_ids, warn_limit_violation=warn_limit_violation + ) + + def set_fixed_tendon_limit( + self, + limit: torch.Tensor, + fixed_tendon_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | None = None, + ): + """Set fixed tendon position limits into internal buffers. + + .. deprecated:: 2.1.0 + Please use :meth:`set_fixed_tendon_position_limit` instead. + """ + logger.warning( + "The function 'set_fixed_tendon_limit' will be deprecated in a future release. Please" + " use 'set_fixed_tendon_position_limit' instead." + ) + self.set_fixed_tendon_position_limit(limit, fixed_tendon_ids=fixed_tendon_ids, env_ids=env_ids) \ No newline at end of file diff --git a/source/isaaclab_physx/isaaclab_physx/assets/articulation/articulation_data.py b/source/isaaclab_physx/isaaclab_physx/assets/articulation/articulation_data.py new file mode 100644 index 00000000000..110a55871b9 --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/assets/articulation/articulation_data.py @@ -0,0 +1,1479 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +import logging +import weakref + +import torch + +import omni.physics.tensors.impl.api as physx +from isaacsim.core.simulation_manager import SimulationManager + +from isaaclab.assets.articulation.base_articulation_data import BaseArticulationData +import isaaclab.utils.math as math_utils +from isaaclab.utils.buffers import TimestampedBuffer + +# import logger +logger = logging.getLogger(__name__) + + +class ArticulationData(BaseArticulationData): + """Data container for an articulation. + + This class contains the data for an articulation in the simulation. The data includes the state of + the root rigid body, the state of all the bodies in the articulation, and the joint state. The data is + stored in the simulation world frame unless otherwise specified. + + An articulation is comprised of multiple rigid bodies or links. For a rigid body, there are two frames + of reference that are used: + + - Actor frame: The frame of reference of the rigid body prim. This typically corresponds to the Xform prim + with the rigid body schema. + - Center of mass frame: The frame of reference of the center of mass of the rigid body. + + Depending on the settings, the two frames may not coincide with each other. In the robotics sense, the actor frame + can be interpreted as the link frame. + """ + + __backend_name__: str = "physx" + """The name of the backend for the articulation data.""" + + def __init__(self, root_view: physx.ArticulationView, device: str): + """Initializes the articulation data. + + Args: + root_view: The root articulation view. + device: The device used for processing. + """ + super().__init__(root_view, device) + # Set the root articulation view + # note: this is stored as a weak reference to avoid circular references between the asset class + # and the data container. This is important to avoid memory leaks. + self._root_view: physx.ArticulationView = weakref.proxy(root_view) + + # Set initial time stamp + self._sim_timestamp = 0.0 + self._is_primed = False + + # obtain global simulation view + self._physics_sim_view = SimulationManager.get_physics_sim_view() + gravity = self._physics_sim_view.get_gravity() + # Convert to direction vector + gravity_dir = torch.tensor((gravity[0], gravity[1], gravity[2]), device=self.device) + gravity_dir = math_utils.normalize(gravity_dir.unsqueeze(0)).squeeze(0) + + # Initialize constants + self.GRAVITY_VEC_W = gravity_dir.repeat(self._root_view.count, 1) + self.FORWARD_VEC_B = torch.tensor((1.0, 0.0, 0.0), device=self.device).repeat(self._root_view.count, 1) + + self._create_buffers() + + @property + def is_primed(self) -> bool: + """Whether the articulation data is fully instantiated and ready to use.""" + return self._is_primed + + @is_primed.setter + def is_primed(self, value: bool) -> None: + """Set whether the articulation data is fully instantiated and ready to use. + + .. note:: Once this quantity is set to True, it cannot be changed. + + Args: + value: The primed state. + + Raises: + ValueError: If the articulation data is already primed. + """ + if self._is_primed: + raise ValueError("The articulation data is already primed.") + self._is_primed = True + + + def update(self, dt: float) -> None: + """Updates the data for the articulation. + + Args: + dt: The time step for the update. This must be a positive value. + """ + # update the simulation timestamp + self._sim_timestamp += dt + # Trigger an update of the joint acceleration buffer at a higher frequency + # since we do finite differencing. + self.joint_acc + + """ + Names. + """ + + body_names: list[str] = None + """Body names in the order parsed by the simulation view.""" + + joint_names: list[str] = None + """Joint names in the order parsed by the simulation view.""" + + fixed_tendon_names: list[str] = None + """Fixed tendon names in the order parsed by the simulation view.""" + + spatial_tendon_names: list[str] = None + """Spatial tendon names in the order parsed by the simulation view.""" + + """ + Defaults - Initial state. + """ + + @property + def default_root_pose(self) -> torch.Tensor: + """Default root pose ``[pos, quat]`` in the local environment frame. Shape is (num_instances, 7). + + The position and quaternion are of the articulation root's actor frame. + """ + return self._default_root_pose + + @default_root_pose.setter + def default_root_pose(self, value: torch.Tensor) -> None: + """Set the default root pose. + + Args: + value: The default root pose. Shape is (num_instances, 7). + + Raises: + ValueError: If the articulation data is already primed. + """ + if self.is_primed: + raise ValueError("The articulation data is already primed.") + self._default_root_pose = value + + @property + def default_root_vel(self) -> torch.Tensor: + """Default root velocity ``[lin_vel, ang_vel]`` in the local environment frame. Shape is (num_instances, 6). + + The linear and angular velocities are of the articulation root's center of mass frame. + """ + return self._default_root_vel + + @default_root_vel.setter + def default_root_vel(self, value: torch.Tensor) -> None: + """Set the default root velocity. + + Args: + value: The default root velocity. Shape is (num_instances, 6). + + Raises: + ValueError: If the articulation data is already primed. + """ + if self.is_primed: + raise ValueError("The articulation data is already primed.") + self._default_root_vel = value + + @property + def default_root_state(self) -> torch.Tensor: + """Default root state ``[pos, quat, lin_vel, ang_vel]`` in the local environment frame. Shape is (num_instances, 13). + + The position and quaternion are of the articulation root's actor frame. Meanwhile, the linear and angular + velocities are of its center of mass frame. + + This quantity is configured through the :attr:`isaaclab.assets.ArticulationCfg.init_state` parameter. + """ + logger.warning("Reading the root state directly is deprecated since IsaacLab 3.0 and will be removed in a future version. Please use the default_root_pose and default_root_vel properties instead.") + return torch.cat([self._default_root_pose, self._default_root_vel], dim=1) + + @default_root_state.setter + def default_root_state(self, value: torch.Tensor) -> None: + """Set the default root state. + + Args: + value: The default root state. Shape is (num_instances, 13). + + Raises: + ValueError: If the articulation data is already primed. + """ + logger.warning("Setting the root state directly is deprecated since IsaacLab 3.0 and will be removed in a future version. Please use the default_root_pose and default_root_vel properties instead.") + if self.is_primed: + raise ValueError("The articulation data is already primed.") + self._default_root_pose = value[:, :7] + self._default_root_vel = value[:, 7:] + + @property + def default_joint_pos(self) -> torch.Tensor: + """Default joint positions of all joints. Shape is (num_instances, num_joints). + + This quantity is configured through the :attr:`isaaclab.assets.ArticulationCfg.init_state` parameter. + """ + return self._default_joint_pos + + @default_joint_pos.setter + def default_joint_pos(self, value: torch.Tensor) -> None: + """Set the default joint positions. + + Args: + value: The default joint positions. Shape is (num_instances, num_joints). + + Raises: + ValueError: If the articulation data is already primed. + """ + if self.is_primed: + raise ValueError("The articulation data is already primed.") + self._default_joint_pos = value + + @property + def default_joint_vel(self) -> torch.Tensor: + """Default joint velocities of all joints. Shape is (num_instances, num_joints). + + This quantity is configured through the :attr:`isaaclab.assets.ArticulationCfg.init_state` parameter. + """ + return self._default_joint_vel + + @default_joint_vel.setter + def default_joint_vel(self, value: torch.Tensor) -> None: + """Set the default joint velocities. + + Args: + value: The default joint velocities. Shape is (num_instances, num_joints). + + Raises: + ValueError: If the articulation data is already primed. + """ + if self.is_primed: + raise ValueError("The articulation data is already primed.") + self._default_joint_vel = value + + """ + Joint commands -- Set into simulation. + """ + + @property + def joint_pos_target(self) -> torch.Tensor: + """Joint position targets commanded by the user. Shape is (num_instances, num_joints). + + For an implicit actuator model, the targets are directly set into the simulation. + For an explicit actuator model, the targets are used to compute the joint torques (see :attr:`applied_torque`), + which are then set into the simulation. + """ + return self._joint_pos_target + + @property + def joint_vel_target(self) -> torch.Tensor: + """Joint velocity targets commanded by the user. Shape is (num_instances, num_joints). + + For an implicit actuator model, the targets are directly set into the simulation. + For an explicit actuator model, the targets are used to compute the joint torques (see :attr:`applied_torque`), + which are then set into the simulation. + """ + return self._joint_vel_target + + @property + def joint_effort_target(self) -> torch.Tensor: + """Joint effort targets commanded by the user. Shape is (num_instances, num_joints). + + For an implicit actuator model, the targets are directly set into the simulation. + For an explicit actuator model, the targets are used to compute the joint torques (see :attr:`applied_torque`), + which are then set into the simulation. + """ + return self._joint_effort_target + + """ + Joint commands -- Explicit actuators. + """ + + @property + def computed_torque(self) -> torch.Tensor: + """Joint torques computed from the actuator model (before clipping). Shape is (num_instances, num_joints). + + This quantity is the raw torque output from the actuator mode, before any clipping is applied. + It is exposed for users who want to inspect the computations inside the actuator model. + For instance, to penalize the learning agent for a difference between the computed and applied torques. + """ + return self._computed_torque + + @property + def applied_torque(self) -> torch.Tensor: + """Joint torques applied from the actuator model (after clipping). Shape is (num_instances, num_joints). + + These torques are set into the simulation, after clipping the :attr:`computed_torque` based on the + actuator model. + """ + return self._applied_torque + + """ + Joint properties + """ + + @property + def joint_stiffness(self) -> torch.Tensor: + """Joint stiffness provided to the simulation. Shape is (num_instances, num_joints). + + In the case of explicit actuators, the value for the corresponding joints is zero. + """ + return self._joint_stiffness + + @property + def joint_damping(self) -> torch.Tensor: + """Joint damping provided to the simulation. Shape is (num_instances, num_joints) + + In the case of explicit actuators, the value for the corresponding joints is zero. + """ + return self._joint_damping + + @property + def joint_armature(self) -> torch.Tensor: + """Joint armature provided to the simulation. Shape is (num_instances, num_joints).""" + return self._joint_armature + + @property + def joint_friction_coeff(self) -> torch.Tensor: + """Joint static friction coefficient provided to the simulation. Shape is (num_instances, num_joints).""" + return self._joint_friction_coeff + + @property + def joint_dynamic_friction_coeff(self) -> torch.Tensor: + """Joint dynamic friction coefficient provided to the simulation. Shape is (num_instances, num_joints).""" + return self._joint_dynamic_friction_coeff + + @property + def joint_viscous_friction_coeff(self) -> torch.Tensor: + """Joint viscous friction coefficient provided to the simulation. Shape is (num_instances, num_joints).""" + return self._joint_viscous_friction_coeff + + @property + def joint_pos_limits(self) -> torch.Tensor: + """Joint position limits provided to the simulation. Shape is (num_instances, num_joints, 2). + + The limits are in the order :math:`[lower, upper]`. + """ + return self._joint_pos_limits + + @property + def joint_vel_limits(self) -> torch.Tensor: + """Joint maximum velocity provided to the simulation. Shape is (num_instances, num_joints).""" + return self._joint_vel_limits + + @property + def joint_effort_limits(self) -> torch.Tensor: + """Joint maximum effort provided to the simulation. Shape is (num_instances, num_joints).""" + return self._joint_effort_limits + + """ + Joint properties - Custom. + """ + + @property + def soft_joint_pos_limits(self) -> torch.Tensor: + r"""Soft joint positions limits for all joints. Shape is (num_instances, num_joints, 2). + + The limits are in the order :math:`[lower, upper]`.The soft joint position limits are computed as + a sub-region of the :attr:`joint_pos_limits` based on the + :attr:`~isaaclab.assets.ArticulationCfg.soft_joint_pos_limit_factor` parameter. + + Consider the joint position limits :math:`[lower, upper]` and the soft joint position limits + :math:`[soft_lower, soft_upper]`. The soft joint position limits are computed as: + + .. math:: + + soft\_lower = (lower + upper) / 2 - factor * (upper - lower) / 2 + soft\_upper = (lower + upper) / 2 + factor * (upper - lower) / 2 + + The soft joint position limits help specify a safety region around the joint limits. It isn't used by the + simulation, but is useful for learning agents to prevent the joint positions from violating the limits. + """ + return self._soft_joint_pos_limits + + @property + def soft_joint_vel_limits(self) -> torch.Tensor: + """Soft joint velocity limits for all joints. Shape is (num_instances, num_joints). + + These are obtained from the actuator model. It may differ from :attr:`joint_vel_limits` if the actuator model + has a variable velocity limit model. For instance, in a variable gear ratio actuator model. + """ + return self._soft_joint_vel_limits + + @property + def gear_ratio(self) -> torch.Tensor: + """Gear ratio for relating motor torques to applied Joint torques. Shape is (num_instances, num_joints).""" + return self._gear_ratio + + """ + Fixed tendon properties. + """ + + @property + def fixed_tendon_stiffness(self) -> torch.Tensor: + """Fixed tendon stiffness provided to the simulation. Shape is (num_instances, num_fixed_tendons).""" + return self._fixed_tendon_stiffness + + @property + def fixed_tendon_damping(self) -> torch.Tensor: + """Fixed tendon damping provided to the simulation. Shape is (num_instances, num_fixed_tendons).""" + return self._fixed_tendon_damping + + @property + def fixed_tendon_limit_stiffness(self) -> torch.Tensor: + """Fixed tendon limit stiffness provided to the simulation. Shape is (num_instances, num_fixed_tendons).""" + return self._fixed_tendon_limit_stiffness + + @property + def fixed_tendon_rest_length(self) -> torch.Tensor: + """Fixed tendon rest length provided to the simulation. Shape is (num_instances, num_fixed_tendons).""" + return self._fixed_tendon_rest_length + + @property + def fixed_tendon_offset(self) -> torch.Tensor: + """Fixed tendon offset provided to the simulation. Shape is (num_instances, num_fixed_tendons).""" + return self._fixed_tendon_offset + + @property + def fixed_tendon_pos_limits(self) -> torch.Tensor: + """Fixed tendon position limits provided to the simulation. Shape is (num_instances, num_fixed_tendons, 2).""" + return self._fixed_tendon_pos_limits + + """ + Spatial tendon properties. + """ + + @property + def spatial_tendon_stiffness(self) -> torch.Tensor: + """Spatial tendon stiffness provided to the simulation. Shape is (num_instances, num_spatial_tendons).""" + return self._spatial_tendon_stiffness + + @property + def spatial_tendon_damping(self) -> torch.Tensor: + """Spatial tendon damping provided to the simulation. Shape is (num_instances, num_spatial_tendons).""" + return self._spatial_tendon_damping + + @property + def spatial_tendon_limit_stiffness(self) -> torch.Tensor: + """Spatial tendon limit stiffness provided to the simulation. Shape is (num_instances, num_spatial_tendons).""" + return self._spatial_tendon_limit_stiffness + + @property + def spatial_tendon_offset(self) -> torch.Tensor: + """Spatial tendon offset provided to the simulation. Shape is (num_instances, num_spatial_tendons).""" + return self._spatial_tendon_offset + + """ + Root state properties. + """ + + @property + def root_link_pose_w(self) -> torch.Tensor: + """Root link pose ``[pos, quat]`` in simulation world frame. Shape is (num_instances, 7). + + This quantity is the pose of the articulation root's actor frame relative to the world. + The orientation is provided in (w, x, y, z) format. + """ + if self._root_link_pose_w.timestamp < self._sim_timestamp: + # read data from simulation + pose = self._root_view.get_root_transforms().clone() + pose[:, 3:7] = math_utils.convert_quat(pose[:, 3:7], to="wxyz") + # set the buffer data and timestamp + self._root_link_pose_w.data = pose + self._root_link_pose_w.timestamp = self._sim_timestamp + + return self._root_link_pose_w.data + + @property + def root_link_vel_w(self) -> torch.Tensor: + """Root link velocity ``[lin_vel, ang_vel]`` in simulation world frame. Shape is (num_instances, 6). + + This quantity contains the linear and angular velocities of the articulation root's actor frame + relative to the world. + """ + if self._root_link_vel_w.timestamp < self._sim_timestamp: + # read the CoM velocity + vel = self.root_com_vel_w.clone() + # adjust linear velocity to link from center of mass + vel[:, :3] += torch.linalg.cross( + vel[:, 3:], math_utils.quat_apply(self.root_link_quat_w, -self.body_com_pos_b[:, 0]), dim=-1 + ) + # set the buffer data and timestamp + self._root_link_vel_w.data = vel + self._root_link_vel_w.timestamp = self._sim_timestamp + + return self._root_link_vel_w.data + + @property + def root_com_pose_w(self) -> torch.Tensor: + """Root center of mass pose ``[pos, quat]`` in simulation world frame. Shape is (num_instances, 7). + + This quantity is the pose of the articulation root's center of mass frame relative to the world. + The orientation is provided in (w, x, y, z) format. + """ + if self._root_com_pose_w.timestamp < self._sim_timestamp: + # apply local transform to center of mass frame + pos, quat = math_utils.combine_frame_transforms( + self.root_link_pos_w, self.root_link_quat_w, self.body_com_pos_b[:, 0], self.body_com_quat_b[:, 0] + ) + # set the buffer data and timestamp + self._root_com_pose_w.data = torch.cat((pos, quat), dim=-1) + self._root_com_pose_w.timestamp = self._sim_timestamp + + return self._root_com_pose_w.data + + @property + def root_com_vel_w(self) -> torch.Tensor: + """Root center of mass velocity ``[lin_vel, ang_vel]`` in simulation world frame. Shape is (num_instances, 6). + + This quantity contains the linear and angular velocities of the articulation root's center of mass frame + relative to the world. + """ + if self._root_com_vel_w.timestamp < self._sim_timestamp: + self._root_com_vel_w.data = self._root_view.get_root_velocities() + self._root_com_vel_w.timestamp = self._sim_timestamp + + return self._root_com_vel_w.data + + @property + def root_state_w(self) -> torch.Tensor: + """Root state ``[pos, quat, lin_vel, ang_vel]`` in simulation world frame. Shape is (num_instances, 13). + + The position and quaternion are of the articulation root's actor frame relative to the world. Meanwhile, + the linear and angular velocities are of the articulation root's center of mass frame. + """ + if self._root_state_w.timestamp < self._sim_timestamp: + self._root_state_w.data = torch.cat((self.root_link_pose_w, self.root_com_vel_w), dim=-1) + self._root_state_w.timestamp = self._sim_timestamp + + return self._root_state_w.data + + @property + def root_link_state_w(self) -> torch.Tensor: + """Root state ``[pos, quat, lin_vel, ang_vel]`` in simulation world frame. Shape is (num_instances, 13). + + The position, quaternion, and linear/angular velocity are of the articulation root's actor frame relative to the + world. + """ + if self._root_link_state_w.timestamp < self._sim_timestamp: + self._root_link_state_w.data = torch.cat((self.root_link_pose_w, self.root_link_vel_w), dim=-1) + self._root_link_state_w.timestamp = self._sim_timestamp + + return self._root_link_state_w.data + + @property + def root_com_state_w(self) -> torch.Tensor: + """Root center of mass state ``[pos, quat, lin_vel, ang_vel]`` in simulation world frame. Shape is (num_instances, 13). + + The position, quaternion, and linear/angular velocity are of the articulation root link's center of mass frame + relative to the world. Center of mass frame is assumed to be the same orientation as the link rather than the + orientation of the principle inertia. + """ + if self._root_com_state_w.timestamp < self._sim_timestamp: + self._root_com_state_w.data = torch.cat((self.root_com_pose_w, self.root_com_vel_w), dim=-1) + self._root_com_state_w.timestamp = self._sim_timestamp + + return self._root_com_state_w.data + + """ + Body state properties. + """ + + @property + def body_mass(self) -> torch.Tensor: + """Body mass ``wp.float32`` in the world frame. Shape is (num_instances, num_bodies).""" + return self._body_mass + + @property + def body_inertia(self) -> torch.Tensor: + """Body inertia ``wp.mat33`` in the world frame. Shape is (num_instances, num_bodies, 3, 3).""" + return self._body_inertia + + @property + def body_link_pose_w(self) -> torch.Tensor: + """Body link pose ``[pos, quat]`` in simulation world frame. + Shape is (num_instances, num_bodies, 7). + + This quantity is the pose of the articulation links' actor frame relative to the world. + The orientation is provided in (w, x, y, z) format. + """ + if self._body_link_pose_w.timestamp < self._sim_timestamp: + # perform forward kinematics (shouldn't cause overhead if it happened already) + self._physics_sim_view.update_articulations_kinematic() + # read data from simulation + poses = self._root_view.get_link_transforms().clone() + poses[..., 3:7] = math_utils.convert_quat(poses[..., 3:7], to="wxyz") + # set the buffer data and timestamp + self._body_link_pose_w.data = poses + self._body_link_pose_w.timestamp = self._sim_timestamp + + return self._body_link_pose_w.data + + @property + def body_link_vel_w(self) -> torch.Tensor: + """Body link velocity ``[lin_vel, ang_vel]`` in simulation world frame. + Shape is (num_instances, num_bodies, 6). + + This quantity contains the linear and angular velocities of the articulation links' actor frame + relative to the world. + """ + if self._body_link_vel_w.timestamp < self._sim_timestamp: + # read data from simulation + velocities = self.body_com_vel_w.clone() + # adjust linear velocity to link from center of mass + velocities[..., :3] += torch.linalg.cross( + velocities[..., 3:], math_utils.quat_apply(self.body_link_quat_w, -self.body_com_pos_b), dim=-1 + ) + # set the buffer data and timestamp + self._body_link_vel_w.data = velocities + self._body_link_vel_w.timestamp = self._sim_timestamp + + return self._body_link_vel_w.data + + @property + def body_com_pose_w(self) -> torch.Tensor: + """Body center of mass pose ``[pos, quat]`` in simulation world frame. + Shape is (num_instances, num_bodies, 7). + + This quantity is the pose of the center of mass frame of the articulation links relative to the world. + The orientation is provided in (w, x, y, z) format. + """ + if self._body_com_pose_w.timestamp < self._sim_timestamp: + # apply local transform to center of mass frame + pos, quat = math_utils.combine_frame_transforms( + self.body_link_pos_w, self.body_link_quat_w, self.body_com_pos_b, self.body_com_quat_b + ) + # set the buffer data and timestamp + self._body_com_pose_w.data = torch.cat((pos, quat), dim=-1) + self._body_com_pose_w.timestamp = self._sim_timestamp + + return self._body_com_pose_w.data + + @property + def body_com_vel_w(self) -> torch.Tensor: + """Body center of mass velocity ``[lin_vel, ang_vel]`` in simulation world frame. + Shape is (num_instances, num_bodies, 6). + + This quantity contains the linear and angular velocities of the articulation links' center of mass frame + relative to the world. + """ + if self._body_com_vel_w.timestamp < self._sim_timestamp: + self._body_com_vel_w.data = self._root_view.get_link_velocities() + self._body_com_vel_w.timestamp = self._sim_timestamp + + return self._body_com_vel_w.data + + @property + def body_state_w(self): + """State of all bodies `[pos, quat, lin_vel, ang_vel]` in simulation world frame. + Shape is (num_instances, num_bodies, 13). + + The position and quaternion are of all the articulation links' actor frame. Meanwhile, the linear and angular + velocities are of the articulation links's center of mass frame. + """ + if self._body_state_w.timestamp < self._sim_timestamp: + self._body_state_w.data = torch.cat((self.body_link_pose_w, self.body_com_vel_w), dim=-1) + self._body_state_w.timestamp = self._sim_timestamp + + return self._body_state_w.data + + @property + def body_link_state_w(self): + """State of all bodies' link frame`[pos, quat, lin_vel, ang_vel]` in simulation world frame. + Shape is (num_instances, num_bodies, 13). + + The position, quaternion, and linear/angular velocity are of the body's link frame relative to the world. + """ + if self._body_link_state_w.timestamp < self._sim_timestamp: + self._body_link_state_w.data = torch.cat((self.body_link_pose_w, self.body_link_vel_w), dim=-1) + self._body_link_state_w.timestamp = self._sim_timestamp + + return self._body_link_state_w.data + + @property + def body_com_state_w(self): + """State of all bodies center of mass `[pos, quat, lin_vel, ang_vel]` in simulation world frame. + Shape is (num_instances, num_bodies, 13). + + The position, quaternion, and linear/angular velocity are of the body's center of mass frame relative to the + world. Center of mass frame is assumed to be the same orientation as the link rather than the orientation of the + principle inertia. + """ + if self._body_com_state_w.timestamp < self._sim_timestamp: + self._body_com_state_w.data = torch.cat((self.body_com_pose_w, self.body_com_vel_w), dim=-1) + self._body_com_state_w.timestamp = self._sim_timestamp + + return self._body_com_state_w.data + + @property + def body_com_acc_w(self): + """Acceleration of all bodies center of mass ``[lin_acc, ang_acc]``. + Shape is (num_instances, num_bodies, 6). + + All values are relative to the world. + """ + if self._body_com_acc_w.timestamp < self._sim_timestamp: + # read data from simulation and set the buffer data and timestamp + self._body_com_acc_w.data = self._root_view.get_link_accelerations() + self._body_com_acc_w.timestamp = self._sim_timestamp + + return self._body_com_acc_w.data + + @property + def body_com_pose_b(self) -> torch.Tensor: + """Center of mass pose ``[pos, quat]`` of all bodies in their respective body's link frames. + Shape is (num_instances, 1, 7). + + This quantity is the pose of the center of mass frame of the rigid body relative to the body's link frame. + The orientation is provided in (w, x, y, z) format. + """ + if self._body_com_pose_b.timestamp < self._sim_timestamp: + # read data from simulation + pose = self._root_view.get_coms().to(self.device) + pose[..., 3:7] = math_utils.convert_quat(pose[..., 3:7], to="wxyz") + # set the buffer data and timestamp + self._body_com_pose_b.data = pose + self._body_com_pose_b.timestamp = self._sim_timestamp + + return self._body_com_pose_b.data + + @property + def body_incoming_joint_wrench_b(self) -> torch.Tensor: + """Joint reaction wrench applied from body parent to child body in parent body frame. + + Shape is (num_instances, num_bodies, 6). All body reaction wrenches are provided including the root body to the + world of an articulation. + + For more information on joint wrenches, please check the`PhysX documentation`_ and the underlying + `PhysX Tensor API`_. + + .. _`PhysX documentation`: https://nvidia-omniverse.github.io/PhysX/physx/5.5.1/docs/Articulations.html#link-incoming-joint-force + .. _`PhysX Tensor API`: https://docs.omniverse.nvidia.com/kit/docs/omni_physics/latest/extensions/runtime/source/omni.physics.tensors/docs/api/python.html#omni.physics.tensors.impl.api.ArticulationView.get_link_incoming_joint_force + """ + + if self._body_incoming_joint_wrench_b.timestamp < self._sim_timestamp: + self._body_incoming_joint_wrench_b.data = self._root_view.get_link_incoming_joint_force() + self._body_incoming_joint_wrench_b.timestamp = self._sim_timestamp + return self._body_incoming_joint_wrench_b.data + + """ + Joint state properties. + """ + + @property + def joint_pos(self) -> torch.Tensor: + """Joint positions of all joints. Shape is (num_instances, num_joints).""" + if self._joint_pos.timestamp < self._sim_timestamp: + # read data from simulation and set the buffer data and timestamp + self._joint_pos.data = self._root_view.get_dof_positions() + self._joint_pos.timestamp = self._sim_timestamp + return self._joint_pos.data + + @property + def joint_vel(self) -> torch.Tensor: + """Joint velocities of all joints. Shape is (num_instances, num_joints).""" + if self._joint_vel.timestamp < self._sim_timestamp: + # read data from simulation and set the buffer data and timestamp + self._joint_vel.data = self._root_view.get_dof_velocities() + self._joint_vel.timestamp = self._sim_timestamp + return self._joint_vel.data + + @property + def joint_acc(self) -> torch.Tensor: + """Joint acceleration of all joints. Shape is (num_instances, num_joints).""" + if self._joint_acc.timestamp < self._sim_timestamp: + # note: we use finite differencing to compute acceleration + time_elapsed = self._sim_timestamp - self._joint_acc.timestamp + self._joint_acc.data = (self.joint_vel - self._previous_joint_vel) / time_elapsed + self._joint_acc.timestamp = self._sim_timestamp + # update the previous joint velocity + self._previous_joint_vel[:] = self.joint_vel + return self._joint_acc.data + + """ + Derived Properties. + """ + + @property + def projected_gravity_b(self): + """Projection of the gravity direction on base frame. Shape is (num_instances, 3).""" + return math_utils.quat_apply_inverse(self.root_link_quat_w, self.GRAVITY_VEC_W) + + @property + def heading_w(self): + """Yaw heading of the base frame (in radians). Shape is (num_instances,). + + .. note:: + This quantity is computed by assuming that the forward-direction of the base + frame is along x-direction, i.e. :math:`(1, 0, 0)`. + """ + forward_w = math_utils.quat_apply(self.root_link_quat_w, self.FORWARD_VEC_B) + return torch.atan2(forward_w[:, 1], forward_w[:, 0]) + + @property + def root_link_lin_vel_b(self) -> torch.Tensor: + """Root link linear velocity in base frame. Shape is (num_instances, 3). + + This quantity is the linear velocity of the articulation root's actor frame with respect to the + its actor frame. + """ + return math_utils.quat_apply_inverse(self.root_link_quat_w, self.root_link_lin_vel_w) + + @property + def root_link_ang_vel_b(self) -> torch.Tensor: + """Root link angular velocity in base world frame. Shape is (num_instances, 3). + + This quantity is the angular velocity of the articulation root's actor frame with respect to the + its actor frame. + """ + return math_utils.quat_apply_inverse(self.root_link_quat_w, self.root_link_ang_vel_w) + + @property + def root_com_lin_vel_b(self) -> torch.Tensor: + """Root center of mass linear velocity in base frame. Shape is (num_instances, 3). + + This quantity is the linear velocity of the articulation root's center of mass frame with respect to the + its actor frame. + """ + return math_utils.quat_apply_inverse(self.root_link_quat_w, self.root_com_lin_vel_w) + + @property + def root_com_ang_vel_b(self) -> torch.Tensor: + """Root center of mass angular velocity in base world frame. Shape is (num_instances, 3). + + This quantity is the angular velocity of the articulation root's center of mass frame with respect to the + its actor frame. + """ + return math_utils.quat_apply_inverse(self.root_link_quat_w, self.root_com_ang_vel_w) + + """ + Sliced properties. + """ + + @property + def root_link_pos_w(self) -> torch.Tensor: + """Root link position in simulation world frame. Shape is (num_instances, 3). + + This quantity is the position of the actor frame of the root rigid body relative to the world. + """ + return self.root_link_pose_w[:, :3] + + @property + def root_link_quat_w(self) -> torch.Tensor: + """Root link orientation (w, x, y, z) in simulation world frame. Shape is (num_instances, 4). + + This quantity is the orientation of the actor frame of the root rigid body. + """ + return self.root_link_pose_w[:, 3:7] + + @property + def root_link_lin_vel_w(self) -> torch.Tensor: + """Root linear velocity in simulation world frame. Shape is (num_instances, 3). + + This quantity is the linear velocity of the root rigid body's actor frame relative to the world. + """ + return self.root_link_vel_w[:, :3] + + @property + def root_link_ang_vel_w(self) -> torch.Tensor: + """Root link angular velocity in simulation world frame. Shape is (num_instances, 3). + + This quantity is the angular velocity of the actor frame of the root rigid body relative to the world. + """ + return self.root_link_vel_w[:, 3:6] + + @property + def root_com_pos_w(self) -> torch.Tensor: + """Root center of mass position in simulation world frame. Shape is (num_instances, 3). + + This quantity is the position of the actor frame of the root rigid body relative to the world. + """ + return self.root_com_pose_w[:, :3] + + @property + def root_com_quat_w(self) -> torch.Tensor: + """Root center of mass orientation (w, x, y, z) in simulation world frame. Shape is (num_instances, 4). + + This quantity is the orientation of the actor frame of the root rigid body relative to the world. + """ + return self.root_com_pose_w[:, 3:7] + + @property + def root_com_lin_vel_w(self) -> torch.Tensor: + """Root center of mass linear velocity in simulation world frame. Shape is (num_instances, 3). + + This quantity is the linear velocity of the root rigid body's center of mass frame relative to the world. + """ + return self.root_com_vel_w[:, :3] + + @property + def root_com_ang_vel_w(self) -> torch.Tensor: + """Root center of mass angular velocity in simulation world frame. Shape is (num_instances, 3). + + This quantity is the angular velocity of the root rigid body's center of mass frame relative to the world. + """ + return self.root_com_vel_w[:, 3:6] + + @property + def body_link_pos_w(self) -> torch.Tensor: + """Positions of all bodies in simulation world frame. Shape is (num_instances, num_bodies, 3). + + This quantity is the position of the articulation bodies' actor frame relative to the world. + """ + return self.body_link_pose_w[..., :3] + + @property + def body_link_quat_w(self) -> torch.Tensor: + """Orientation (w, x, y, z) of all bodies in simulation world frame. Shape is (num_instances, num_bodies, 4). + + This quantity is the orientation of the articulation bodies' actor frame relative to the world. + """ + return self.body_link_pose_w[..., 3:7] + + @property + def body_link_lin_vel_w(self) -> torch.Tensor: + """Linear velocity of all bodies in simulation world frame. Shape is (num_instances, num_bodies, 3). + + This quantity is the linear velocity of the articulation bodies' center of mass frame relative to the world. + """ + return self.body_link_vel_w[..., :3] + + @property + def body_link_ang_vel_w(self) -> torch.Tensor: + """Angular velocity of all bodies in simulation world frame. Shape is (num_instances, num_bodies, 3). + + This quantity is the angular velocity of the articulation bodies' center of mass frame relative to the world. + """ + return self.body_link_vel_w[..., 3:6] + + @property + def body_com_pos_w(self) -> torch.Tensor: + """Positions of all bodies in simulation world frame. Shape is (num_instances, num_bodies, 3). + + This quantity is the position of the articulation bodies' actor frame. + """ + return self.body_com_pose_w[..., :3] + + @property + def body_com_quat_w(self) -> torch.Tensor: + """Orientation (w, x, y, z) of the principle axis of inertia of all bodies in simulation world frame. + Shape is (num_instances, num_bodies, 4). + + This quantity is the orientation of the articulation bodies' actor frame. + """ + return self.body_com_pose_w[..., 3:7] + + @property + def body_com_lin_vel_w(self) -> torch.Tensor: + """Linear velocity of all bodies in simulation world frame. Shape is (num_instances, num_bodies, 3). + + This quantity is the linear velocity of the articulation bodies' center of mass frame. + """ + return self.body_com_vel_w[..., :3] + + @property + def body_com_ang_vel_w(self) -> torch.Tensor: + """Angular velocity of all bodies in simulation world frame. Shape is (num_instances, num_bodies, 3). + + This quantity is the angular velocity of the articulation bodies' center of mass frame. + """ + return self.body_com_vel_w[..., 3:6] + + @property + def body_com_lin_acc_w(self) -> torch.Tensor: + """Linear acceleration of all bodies in simulation world frame. Shape is (num_instances, num_bodies, 3). + + This quantity is the linear acceleration of the articulation bodies' center of mass frame. + """ + return self.body_com_acc_w[..., :3] + + @property + def body_com_ang_acc_w(self) -> torch.Tensor: + """Angular acceleration of all bodies in simulation world frame. Shape is (num_instances, num_bodies, 3). + + This quantity is the angular acceleration of the articulation bodies' center of mass frame. + """ + return self.body_com_acc_w[..., 3:6] + + @property + def body_com_pos_b(self) -> torch.Tensor: + """Center of mass position of all of the bodies in their respective link frames. + Shape is (num_instances, num_bodies, 3). + + This quantity is the center of mass location relative to its body'slink frame. + """ + return self.body_com_pose_b[..., :3] + + @property + def body_com_quat_b(self) -> torch.Tensor: + """Orientation (w, x, y, z) of the principle axis of inertia of all of the bodies in their + respective link frames. Shape is (num_instances, num_bodies, 4). + + This quantity is the orientation of the principles axes of inertia relative to its body's link frame. + """ + return self.body_com_pose_b[..., 3:7] + + def _create_buffers(self) -> None: + # Initialize the lazy buffers. + # -- link frame w.r.t. world frame + self._root_link_pose_w = TimestampedBuffer() + self._root_link_vel_w = TimestampedBuffer() + self._body_link_pose_w = TimestampedBuffer() + self._body_link_vel_w = TimestampedBuffer() + # -- com frame w.r.t. link frame + self._body_com_pose_b = TimestampedBuffer() + # -- com frame w.r.t. world frame + self._root_com_pose_w = TimestampedBuffer() + self._root_com_vel_w = TimestampedBuffer() + self._body_com_pose_w = TimestampedBuffer() + self._body_com_vel_w = TimestampedBuffer() + self._body_com_acc_w = TimestampedBuffer() + # -- combined state (these are cached as they concatenate) + self._root_state_w = TimestampedBuffer() + self._root_link_state_w = TimestampedBuffer() + self._root_com_state_w = TimestampedBuffer() + self._body_state_w = TimestampedBuffer() + self._body_link_state_w = TimestampedBuffer() + self._body_com_state_w = TimestampedBuffer() + # -- joint state + self._joint_pos = TimestampedBuffer() + self._joint_vel = TimestampedBuffer() + self._joint_acc = TimestampedBuffer() + self._body_incoming_joint_wrench_b = TimestampedBuffer() + + num_dofs = self._root_view.shared_metatype.dof_count + num_fixed_tendons = self._root_view.max_fixed_tendons + num_spatial_tendons = self._root_view.max_spatial_tendons + + # Default root pose and velocity + self._default_root_pose = torch.zeros((self._root_view.count, 7), device=self.device) + self._default_root_vel = torch.zeros((self._root_view.count, 6), device=self.device) + self._default_joint_pos = torch.zeros((self._root_view.count, num_dofs), device=self.device) + self._default_joint_vel = torch.zeros((self._root_view.count, num_dofs), device=self.device) + + # Initialize history for finite differencing + self._previous_joint_vel = self._root_view.get_dof_velocities().clone() + + # Pre-allocated buffers + # -- Joint commands (set into simulation) + self._joint_pos_target = torch.zeros((self._root_view.count, num_dofs), device=self.device) + self._joint_vel_target = torch.zeros((self._root_view.count, num_dofs), device=self.device) + self._joint_effort_target = torch.zeros((self._root_view.count, num_dofs), device=self.device) + # -- Joint commands (explicit actuator model) + self._computed_torque = torch.zeros((self._root_view.count, num_dofs), device=self.device) + self._applied_torque = torch.zeros((self._root_view.count, num_dofs), device=self.device) + # -- Joint properties + self._joint_stiffness = self._root_view.get_dof_stiffnesses().to(self.device).clone() + self._joint_damping = self._root_view.get_dof_dampings().to(self.device).clone() + self._joint_armature = self._root_view.get_dof_armatures().to(self.device).clone() + friction_props = self._root_view.get_dof_friction_properties() + self._joint_friction_coeff = friction_props[:, :, 0].to(self.device).clone() + self._joint_dynamic_friction_coeff = friction_props[:, :, 1].to(self.device).clone() + self._joint_viscous_friction_coeff = friction_props[:, :, 2].to(self.device).clone() + self._joint_pos_limits = self._root_view.get_dof_limits().to(self.device).clone() + self._joint_vel_limits = self._root_view.get_dof_max_velocities().to(self.device).clone() + self._joint_effort_limits = self._root_view.get_dof_max_forces().to(self.device).clone() + # -- Joint properties (custom) + self._soft_joint_pos_limits = torch.zeros((self._root_view.count, num_dofs, 2), device=self.device) + self._soft_joint_vel_limits = torch.zeros((self._root_view.count, num_dofs), device=self.device) + self._gear_ratio = torch.ones((self._root_view.count, num_dofs), device=self.device) + # -- Fixed tendon properties + if num_fixed_tendons > 0: + self._fixed_tendon_stiffness = self._root_view.get_fixed_tendon_stiffnesses().to(self.device).clone() + self._fixed_tendon_damping = self._root_view.get_fixed_tendon_dampings().to(self.device).clone() + self._fixed_tendon_limit_stiffness = self._root_view.get_fixed_tendon_limit_stiffnesses().to(self.device).clone() + self._fixed_tendon_rest_length = self._root_view.get_fixed_tendon_rest_lengths().to(self.device).clone() + self._fixed_tendon_offset = self._root_view.get_fixed_tendon_offsets().to(self.device).clone() + self._fixed_tendon_pos_limits = self._root_view.get_fixed_tendon_limits().to(self.device).clone() + else: + self._fixed_tendon_stiffness = None + self._fixed_tendon_damping = None + self._fixed_tendon_limit_stiffness = None + self._fixed_tendon_rest_length = None + self._fixed_tendon_offset = None + self._fixed_tendon_pos_limits = None + # -- Spatial tendon properties + if num_spatial_tendons > 0: + self._spatial_tendon_stiffness = self._root_view.get_spatial_tendon_stiffnesses().to(self.device).clone() + self._spatial_tendon_damping = self._root_view.get_spatial_tendon_dampings().to(self.device).clone() + self._spatial_tendon_limit_stiffness = self._root_view.get_spatial_tendon_limit_stiffnesses().to(self.device).clone() + self._spatial_tendon_offset = self._root_view.get_spatial_tendon_offsets().to(self.device).clone() + else: + self._spatial_tendon_stiffness = None + self._spatial_tendon_damping = None + self._spatial_tendon_limit_stiffness = None + self._spatial_tendon_offset = None + # -- Body properties + self._body_mass = self._root_view.get_masses().to(self.device).clone() + self._body_inertia = self._root_view.get_inertias().to(self.device).clone() + + """ + Backward compatibility. (Deprecated properties) + """ + + @property + def root_pose_w(self) -> torch.Tensor: + """Deprecated property. Please use :attr:`root_link_pose_w` instead.""" + logger.warning( + "The `root_pose_w` property will be deprecated in a future release. Please use `root_link_pose_w` instead." + ) + return self.root_link_pose_w + + @property + def root_pos_w(self) -> torch.Tensor: + """Deprecated property. Please use :attr:`root_link_pos_w` instead.""" + logger.warning( + "The `root_pos_w` property will be deprecated in a future release. Please use `root_link_pos_w` instead." + ) + return self.root_link_pos_w + + @property + def root_quat_w(self) -> torch.Tensor: + """Deprecated property. Please use :attr:`root_link_quat_w` instead.""" + logger.warning( + "The `root_quat_w` property will be deprecated in a future release. Please use `root_link_quat_w` instead." + ) + return self.root_link_quat_w + + @property + def root_vel_w(self) -> torch.Tensor: + """Deprecated property. Please use :attr:`root_com_vel_w` instead.""" + logger.warning( + "The `root_vel_w` property will be deprecated in a future release. Please use `root_com_vel_w` instead." + ) + return self.root_com_vel_w + + @property + def root_lin_vel_w(self) -> torch.Tensor: + """Deprecated property. Please use :attr:`root_com_lin_vel_w` instead.""" + logger.warning( + "The `root_lin_vel_w` property will be deprecated in a future release. Please use" + " `root_com_lin_vel_w` instead." + ) + return self.root_com_lin_vel_w + + @property + def root_ang_vel_w(self) -> torch.Tensor: + """Deprecated property. Please use :attr:`root_com_ang_vel_w` instead.""" + logger.warning( + "The `root_ang_vel_w` property will be deprecated in a future release. Please use" + " `root_com_ang_vel_w` instead." + ) + return self.root_com_ang_vel_w + + @property + def root_lin_vel_b(self) -> torch.Tensor: + """Deprecated property. Please use :attr:`root_com_lin_vel_b` instead.""" + logger.warning( + "The `root_lin_vel_b` property will be deprecated in a future release. Please use" + " `root_com_lin_vel_b` instead." + ) + return self.root_com_lin_vel_b + + @property + def root_ang_vel_b(self) -> torch.Tensor: + """Deprecated property. Please use :attr:`root_com_ang_vel_b` instead.""" + logger.warning( + "The `root_ang_vel_b` property will be deprecated in a future release. Please use" + " `root_com_ang_vel_b` instead." + ) + return self.root_com_ang_vel_b + + @property + def body_pose_w(self) -> torch.Tensor: + """Deprecated property. Please use :attr:`body_link_pose_w` instead.""" + logger.warning( + "The `body_pose_w` property will be deprecated in a future release. Please use `body_link_pose_w` instead." + ) + return self.body_link_pose_w + + @property + def body_pos_w(self) -> torch.Tensor: + """Deprecated property. Please use :attr:`body_link_pos_w` instead.""" + logger.warning( + "The `body_pos_w` property will be deprecated in a future release. Please use `body_link_pos_w` instead." + ) + return self.body_link_pos_w + + @property + def body_quat_w(self) -> torch.Tensor: + """Deprecated property. Please use :attr:`body_link_quat_w` instead.""" + logger.warning( + "The `body_quat_w` property will be deprecated in a future release. Please use `body_link_quat_w` instead." + ) + return self.body_link_quat_w + + @property + def body_vel_w(self) -> torch.Tensor: + """Deprecated property. Please use :attr:`body_com_vel_w` instead.""" + logger.warning( + "The `body_vel_w` property will be deprecated in a future release. Please use `body_com_vel_w` instead." + ) + return self.body_com_vel_w + + @property + def body_lin_vel_w(self) -> torch.Tensor: + """Deprecated property. Please use :attr:`body_com_lin_vel_w` instead.""" + logger.warning( + "The `body_lin_vel_w` property will be deprecated in a future release. Please use" + " `body_com_lin_vel_w` instead." + ) + return self.body_com_lin_vel_w + + @property + def body_ang_vel_w(self) -> torch.Tensor: + """Deprecated property. Please use :attr:`body_com_ang_vel_w` instead.""" + logger.warning( + "The `body_ang_vel_w` property will be deprecated in a future release. Please use" + " `body_com_ang_vel_w` instead." + ) + return self.body_com_ang_vel_w + + @property + def body_acc_w(self) -> torch.Tensor: + """Deprecated property. Please use :attr:`body_com_acc_w` instead.""" + logger.warning( + "The `body_acc_w` property will be deprecated in a future release. Please use `body_com_acc_w` instead." + ) + return self.body_com_acc_w + + @property + def body_lin_acc_w(self) -> torch.Tensor: + """Deprecated property. Please use :attr:`body_com_lin_acc_w` instead.""" + logger.warning( + "The `body_lin_acc_w` property will be deprecated in a future release. Please use" + " `body_com_lin_acc_w` instead." + ) + return self.body_com_lin_acc_w + + @property + def body_ang_acc_w(self) -> torch.Tensor: + """Deprecated property. Please use :attr:`body_com_ang_acc_w` instead.""" + logger.warning( + "The `body_ang_acc_w` property will be deprecated in a future release. Please use" + " `body_com_ang_acc_w` instead." + ) + return self.body_com_ang_acc_w + + @property + def com_pos_b(self) -> torch.Tensor: + """Deprecated property. Please use :attr:`body_com_pos_b` instead.""" + logger.warning( + "The `com_pos_b` property will be deprecated in a future release. Please use `body_com_pos_b` instead." + ) + return self.body_com_pos_b + + @property + def com_quat_b(self) -> torch.Tensor: + """Deprecated property. Please use :attr:`body_com_quat_b` instead.""" + logger.warning( + "The `com_quat_b` property will be deprecated in a future release. Please use `body_com_quat_b` instead." + ) + return self.body_com_quat_b + + @property + def joint_limits(self) -> torch.Tensor: + """Deprecated property. Please use :attr:`joint_pos_limits` instead.""" + logger.warning( + "The `joint_limits` property will be deprecated in a future release. Please use `joint_pos_limits` instead." + ) + return self.joint_pos_limits + + @property + def default_joint_limits(self) -> torch.Tensor: + """Deprecated property. Please use :attr:`default_joint_pos_limits` instead.""" + logger.warning( + "The `default_joint_limits` property will be deprecated in a future release. Please use" + " `default_joint_pos_limits` instead." + ) + return self.default_joint_pos_limits + + @property + def joint_velocity_limits(self) -> torch.Tensor: + """Deprecated property. Please use :attr:`joint_vel_limits` instead.""" + logger.warning( + "The `joint_velocity_limits` property will be deprecated in a future release. Please use" + " `joint_vel_limits` instead." + ) + return self.joint_vel_limits + + @property + def joint_friction(self) -> torch.Tensor: + """Deprecated property. Please use :attr:`joint_friction_coeff` instead.""" + logger.warning( + "The `joint_friction` property will be deprecated in a future release. Please use" + " `joint_friction_coeff` instead." + ) + return self.joint_friction_coeff + + @property + def default_joint_friction(self) -> torch.Tensor: + """Deprecated property. Please use :attr:`default_joint_friction_coeff` instead.""" + logger.warning( + "The `default_joint_friction` property will be deprecated in a future release. Please use" + " `default_joint_friction_coeff` instead." + ) + return self.default_joint_friction_coeff + + @property + def fixed_tendon_limit(self) -> torch.Tensor: + """Deprecated property. Please use :attr:`fixed_tendon_pos_limits` instead.""" + logger.warning( + "The `fixed_tendon_limit` property will be deprecated in a future release. Please use" + " `fixed_tendon_pos_limits` instead." + ) + return self.fixed_tendon_pos_limits + + @property + def default_fixed_tendon_limit(self) -> torch.Tensor: + """Deprecated property. Please use :attr:`default_fixed_tendon_pos_limits` instead.""" + logger.warning( + "The `default_fixed_tendon_limit` property will be deprecated in a future release. Please use" + " `default_fixed_tendon_pos_limits` instead." + ) + return self.default_fixed_tendon_pos_limits + + """ + Removed - Default values are no longer stored. + """ + + @property + def default_mass(self) -> torch.Tensor: + """Removed: Default mass is no longer stored.""" + raise RuntimeError( + "The property 'default_mass' has been removed. Default values are no longer stored. " + "Please use 'body_mass' to get the current mass values from the simulation." + ) + + @property + def default_inertia(self) -> torch.Tensor: + """Removed: Default inertia is no longer stored.""" + raise RuntimeError( + "The property 'default_inertia' has been removed. Default values are no longer stored. " + "Please use 'body_inertia' to get the current inertia values from the simulation." + ) + + @property + def default_joint_stiffness(self) -> torch.Tensor: + """Removed: Default joint stiffness is no longer stored.""" + raise RuntimeError( + "The property 'default_joint_stiffness' has been removed. Default values are no longer stored. " + "Please use 'joint_stiffness' to get the current stiffness values from the simulation." + ) + + @property + def default_joint_damping(self) -> torch.Tensor: + """Removed: Default joint damping is no longer stored.""" + raise RuntimeError( + "The property 'default_joint_damping' has been removed. Default values are no longer stored. " + "Please use 'joint_damping' to get the current damping values from the simulation." + ) + + @property + def default_joint_armature(self) -> torch.Tensor: + """Removed: Default joint armature is no longer stored.""" + raise RuntimeError( + "The property 'default_joint_armature' has been removed. Default values are no longer stored. " + "Please use 'joint_armature' to get the current armature values from the simulation." + ) + + @property + def default_joint_friction_coeff(self) -> torch.Tensor: + """Removed: Default joint friction coefficient is no longer stored.""" + raise RuntimeError( + "The property 'default_joint_friction_coeff' has been removed. Default values are no longer stored. " + "Please use 'joint_friction_coeff' to get the current friction coefficient values from the simulation." + ) + + @property + def default_joint_dynamic_friction_coeff(self) -> torch.Tensor: + """Removed: Default joint dynamic friction coefficient is no longer stored.""" + raise RuntimeError( + "The property 'default_joint_dynamic_friction_coeff' has been removed. Default values are no longer stored. " + "Please use 'joint_dynamic_friction_coeff' to get the current dynamic friction coefficient values from the simulation." + ) + + @property + def default_joint_viscous_friction_coeff(self) -> torch.Tensor: + """Removed: Default joint viscous friction coefficient is no longer stored.""" + raise RuntimeError( + "The property 'default_joint_viscous_friction_coeff' has been removed. Default values are no longer stored. " + "Please use 'joint_viscous_friction_coeff' to get the current viscous friction coefficient values from the simulation." + ) + + @property + def default_joint_pos_limits(self) -> torch.Tensor: + """Removed: Default joint position limits are no longer stored.""" + raise RuntimeError( + "The property 'default_joint_pos_limits' has been removed. Default values are no longer stored. " + "Please use 'joint_pos_limits' to get the current position limits from the simulation." + ) + + @property + def default_fixed_tendon_stiffness(self) -> torch.Tensor: + """Removed: Default fixed tendon stiffness is no longer stored.""" + raise RuntimeError( + "The property 'default_fixed_tendon_stiffness' has been removed. Default values are no longer stored. " + "Please use 'fixed_tendon_stiffness' to get the current stiffness values from the simulation." + ) + + @property + def default_fixed_tendon_damping(self) -> torch.Tensor: + """Removed: Default fixed tendon damping is no longer stored.""" + raise RuntimeError( + "The property 'default_fixed_tendon_damping' has been removed. Default values are no longer stored. " + "Please use 'fixed_tendon_damping' to get the current damping values from the simulation." + ) + + @property + def default_fixed_tendon_limit_stiffness(self) -> torch.Tensor: + """Removed: Default fixed tendon limit stiffness is no longer stored.""" + raise RuntimeError( + "The property 'default_fixed_tendon_limit_stiffness' has been removed. Default values are no longer stored. " + "Please use 'fixed_tendon_limit_stiffness' to get the current limit stiffness values from the simulation." + ) + + @property + def default_fixed_tendon_rest_length(self) -> torch.Tensor: + """Removed: Default fixed tendon rest length is no longer stored.""" + raise RuntimeError( + "The property 'default_fixed_tendon_rest_length' has been removed. Default values are no longer stored. " + "Please use 'fixed_tendon_rest_length' to get the current rest length values from the simulation." + ) + + @property + def default_fixed_tendon_offset(self) -> torch.Tensor: + """Removed: Default fixed tendon offset is no longer stored.""" + raise RuntimeError( + "The property 'default_fixed_tendon_offset' has been removed. Default values are no longer stored. " + "Please use 'fixed_tendon_offset' to get the current offset values from the simulation." + ) + + @property + def default_fixed_tendon_pos_limits(self) -> torch.Tensor: + """Removed: Default fixed tendon position limits are no longer stored.""" + raise RuntimeError( + "The property 'default_fixed_tendon_pos_limits' has been removed. Default values are no longer stored. " + "Please use 'fixed_tendon_pos_limits' to get the current position limits from the simulation." + ) + + @property + def default_spatial_tendon_stiffness(self) -> torch.Tensor: + """Removed: Default spatial tendon stiffness is no longer stored.""" + raise RuntimeError( + "The property 'default_spatial_tendon_stiffness' has been removed. Default values are no longer stored. " + "Please use 'spatial_tendon_stiffness' to get the current stiffness values from the simulation." + ) + + @property + def default_spatial_tendon_damping(self) -> torch.Tensor: + """Removed: Default spatial tendon damping is no longer stored.""" + raise RuntimeError( + "The property 'default_spatial_tendon_damping' has been removed. Default values are no longer stored. " + "Please use 'spatial_tendon_damping' to get the current damping values from the simulation." + ) + + @property + def default_spatial_tendon_limit_stiffness(self) -> torch.Tensor: + """Removed: Default spatial tendon limit stiffness is no longer stored.""" + raise RuntimeError( + "The property 'default_spatial_tendon_limit_stiffness' has been removed. Default values are no longer stored. " + "Please use 'spatial_tendon_limit_stiffness' to get the current limit stiffness values from the simulation." + ) + + @property + def default_spatial_tendon_offset(self) -> torch.Tensor: + """Removed: Default spatial tendon offset is no longer stored.""" + raise RuntimeError( + "The property 'default_spatial_tendon_offset' has been removed. Default values are no longer stored. " + "Please use 'spatial_tendon_offset' to get the current offset values from the simulation." + ) \ No newline at end of file diff --git a/source/isaaclab_physx/isaaclab_physx/assets/articulation/articulation_data_old.py b/source/isaaclab_physx/isaaclab_physx/assets/articulation/articulation_data_old.py new file mode 100644 index 00000000000..47902edc0a5 --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/assets/articulation/articulation_data_old.py @@ -0,0 +1,1165 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +import logging +import weakref + +import torch + +import omni.physics.tensors.impl.api as physx +from isaacsim.core.simulation_manager import SimulationManager + +import isaaclab.utils.math as math_utils +from isaaclab.utils.buffers import TimestampedBuffer + +# import logger +logger = logging.getLogger(__name__) + + +class ArticulationData: + """Data container for an articulation. + + This class contains the data for an articulation in the simulation. The data includes the state of + the root rigid body, the state of all the bodies in the articulation, and the joint state. The data is + stored in the simulation world frame unless otherwise specified. + + An articulation is comprised of multiple rigid bodies or links. For a rigid body, there are two frames + of reference that are used: + + - Actor frame: The frame of reference of the rigid body prim. This typically corresponds to the Xform prim + with the rigid body schema. + - Center of mass frame: The frame of reference of the center of mass of the rigid body. + + Depending on the settings, the two frames may not coincide with each other. In the robotics sense, the actor frame + can be interpreted as the link frame. + """ + + def __init__(self, root_physx_view: physx.ArticulationView, device: str): + """Initializes the articulation data. + + Args: + root_physx_view: The root articulation view. + device: The device used for processing. + """ + # Set the parameters + self.device = device + # Set the root articulation view + # note: this is stored as a weak reference to avoid circular references between the asset class + # and the data container. This is important to avoid memory leaks. + self._root_physx_view: physx.ArticulationView = weakref.proxy(root_physx_view) + + # Set initial time stamp + self._sim_timestamp = 0.0 + + # obtain global simulation view + self._physics_sim_view = SimulationManager.get_physics_sim_view() + gravity = self._physics_sim_view.get_gravity() + # Convert to direction vector + gravity_dir = torch.tensor((gravity[0], gravity[1], gravity[2]), device=self.device) + gravity_dir = math_utils.normalize(gravity_dir.unsqueeze(0)).squeeze(0) + + # Initialize constants + self.GRAVITY_VEC_W = gravity_dir.repeat(self._root_physx_view.count, 1) + self.FORWARD_VEC_B = torch.tensor((1.0, 0.0, 0.0), device=self.device).repeat(self._root_physx_view.count, 1) + + # Initialize history for finite differencing + self._previous_joint_vel = self._root_physx_view.get_dof_velocities().clone() + + # Initialize the lazy buffers. + # -- link frame w.r.t. world frame + self._root_link_pose_w = TimestampedBuffer() + self._root_link_vel_w = TimestampedBuffer() + self._body_link_pose_w = TimestampedBuffer() + self._body_link_vel_w = TimestampedBuffer() + # -- com frame w.r.t. link frame + self._body_com_pose_b = TimestampedBuffer() + # -- com frame w.r.t. world frame + self._root_com_pose_w = TimestampedBuffer() + self._root_com_vel_w = TimestampedBuffer() + self._body_com_pose_w = TimestampedBuffer() + self._body_com_vel_w = TimestampedBuffer() + self._body_com_acc_w = TimestampedBuffer() + # -- combined state (these are cached as they concatenate) + self._root_state_w = TimestampedBuffer() + self._root_link_state_w = TimestampedBuffer() + self._root_com_state_w = TimestampedBuffer() + self._body_state_w = TimestampedBuffer() + self._body_link_state_w = TimestampedBuffer() + self._body_com_state_w = TimestampedBuffer() + # -- joint state + self._joint_pos = TimestampedBuffer() + self._joint_vel = TimestampedBuffer() + self._joint_acc = TimestampedBuffer() + self._body_incoming_joint_wrench_b = TimestampedBuffer() + + def update(self, dt: float): + # update the simulation timestamp + self._sim_timestamp += dt + # Trigger an update of the joint acceleration buffer at a higher frequency + # since we do finite differencing. + self.joint_acc + + ## + # Names. + ## + + body_names: list[str] = None + """Body names in the order parsed by the simulation view.""" + + joint_names: list[str] = None + """Joint names in the order parsed by the simulation view.""" + + fixed_tendon_names: list[str] = None + """Fixed tendon names in the order parsed by the simulation view.""" + + spatial_tendon_names: list[str] = None + """Spatial tendon names in the order parsed by the simulation view.""" + + ## + # Defaults - Initial state. + ## + + default_root_state: torch.Tensor = None + """Default root state ``[pos, quat, lin_vel, ang_vel]`` in the local environment frame. + Shape is (num_instances, 13). + + The position and quaternion are of the articulation root's actor frame. Meanwhile, the linear and angular + velocities are of its center of mass frame. + + This quantity is configured through the :attr:`isaaclab.assets.ArticulationCfg.init_state` parameter. + """ + + default_joint_pos: torch.Tensor = None + """Default joint positions of all joints. Shape is (num_instances, num_joints). + + This quantity is configured through the :attr:`isaaclab.assets.ArticulationCfg.init_state` parameter. + """ + + default_joint_vel: torch.Tensor = None + """Default joint velocities of all joints. Shape is (num_instances, num_joints). + + This quantity is configured through the :attr:`isaaclab.assets.ArticulationCfg.init_state` parameter. + """ + + ## + # Defaults - Physical properties. + ## + + default_mass: torch.Tensor = None + """Default mass for all the bodies in the articulation. Shape is (num_instances, num_bodies). + + This quantity is parsed from the USD schema at the time of initialization. + """ + + default_inertia: torch.Tensor = None + """Default inertia for all the bodies in the articulation. Shape is (num_instances, num_bodies, 9). + + The inertia tensor should be given with respect to the center of mass, expressed in the articulation links' + actor frame. The values are stored in the order + :math:`[I_{xx}, I_{yx}, I_{zx}, I_{xy}, I_{yy}, I_{zy}, I_{xz}, I_{yz}, I_{zz}]`. However, due to the + symmetry of inertia tensors, row- and column-major orders are equivalent. + + This quantity is parsed from the USD schema at the time of initialization. + """ + + default_joint_stiffness: torch.Tensor = None + """Default joint stiffness of all joints. Shape is (num_instances, num_joints). + + This quantity is configured through the actuator model's :attr:`isaaclab.actuators.ActuatorBaseCfg.stiffness` + parameter. If the parameter's value is None, the value parsed from the USD schema, at the time of initialization, + is used. + + .. attention:: + The default stiffness is the value configured by the user or the value parsed from the USD schema. + It should not be confused with :attr:`joint_stiffness`, which is the value set into the simulation. + """ + + default_joint_damping: torch.Tensor = None + """Default joint damping of all joints. Shape is (num_instances, num_joints). + + This quantity is configured through the actuator model's :attr:`isaaclab.actuators.ActuatorBaseCfg.damping` + parameter. If the parameter's value is None, the value parsed from the USD schema, at the time of initialization, + is used. + + .. attention:: + The default stiffness is the value configured by the user or the value parsed from the USD schema. + It should not be confused with :attr:`joint_damping`, which is the value set into the simulation. + """ + + default_joint_armature: torch.Tensor = None + """Default joint armature of all joints. Shape is (num_instances, num_joints). + + This quantity is configured through the actuator model's :attr:`isaaclab.actuators.ActuatorBaseCfg.armature` + parameter. If the parameter's value is None, the value parsed from the USD schema, at the time of initialization, + is used. + """ + + default_joint_friction_coeff: torch.Tensor = None + """Default joint static friction coefficient of all joints. Shape is (num_instances, num_joints). + + This quantity is configured through the actuator model's :attr:`isaaclab.actuators.ActuatorBaseCfg.friction` + parameter. If the parameter's value is None, the value parsed from the USD schema, at the time of initialization, + is used. + + Note: + In Isaac Sim 4.5, this parameter is modeled as a coefficient. In Isaac Sim 5.0 and later, + it is modeled as an effort (torque or force). + """ + + default_joint_dynamic_friction_coeff: torch.Tensor = None + """Default joint dynamic friction coefficient of all joints. Shape is (num_instances, num_joints). + + This quantity is configured through the actuator model's + :attr:`isaaclab.actuators.ActuatorBaseCfg.dynamic_friction` parameter. If the parameter's value is None, + the value parsed from the USD schema, at the time of initialization, is used. + + Note: + In Isaac Sim 4.5, this parameter is modeled as a coefficient. In Isaac Sim 5.0 and later, + it is modeled as an effort (torque or force). + """ + + default_joint_viscous_friction_coeff: torch.Tensor = None + """Default joint viscous friction coefficient of all joints. Shape is (num_instances, num_joints). + + This quantity is configured through the actuator model's + :attr:`isaaclab.actuators.ActuatorBaseCfg.viscous_friction` parameter. If the parameter's value is None, + the value parsed from the USD schema, at the time of initialization, is used. + """ + + default_joint_pos_limits: torch.Tensor = None + """Default joint position limits of all joints. Shape is (num_instances, num_joints, 2). + + The limits are in the order :math:`[lower, upper]`. They are parsed from the USD schema at the + time of initialization. + """ + + default_fixed_tendon_stiffness: torch.Tensor = None + """Default tendon stiffness of all fixed tendons. Shape is (num_instances, num_fixed_tendons). + + This quantity is parsed from the USD schema at the time of initialization. + """ + + default_fixed_tendon_damping: torch.Tensor = None + """Default tendon damping of all fixed tendons. Shape is (num_instances, num_fixed_tendons). + + This quantity is parsed from the USD schema at the time of initialization. + """ + + default_fixed_tendon_limit_stiffness: torch.Tensor = None + """Default tendon limit stiffness of all fixed tendons. Shape is (num_instances, num_fixed_tendons). + + This quantity is parsed from the USD schema at the time of initialization. + """ + + default_fixed_tendon_rest_length: torch.Tensor = None + """Default tendon rest length of all fixed tendons. Shape is (num_instances, num_fixed_tendons). + + This quantity is parsed from the USD schema at the time of initialization. + """ + + default_fixed_tendon_offset: torch.Tensor = None + """Default tendon offset of all fixed tendons. Shape is (num_instances, num_fixed_tendons). + + This quantity is parsed from the USD schema at the time of initialization. + """ + + default_fixed_tendon_pos_limits: torch.Tensor = None + """Default tendon position limits of all fixed tendons. Shape is (num_instances, num_fixed_tendons, 2). + + The position limits are in the order :math:`[lower, upper]`. They are parsed from the USD schema at the time of + initialization. + """ + + default_spatial_tendon_stiffness: torch.Tensor = None + """Default tendon stiffness of all spatial tendons. Shape is (num_instances, num_spatial_tendons). + + This quantity is parsed from the USD schema at the time of initialization. + """ + + default_spatial_tendon_damping: torch.Tensor = None + """Default tendon damping of all spatial tendons. Shape is (num_instances, num_spatial_tendons). + + This quantity is parsed from the USD schema at the time of initialization. + """ + + default_spatial_tendon_limit_stiffness: torch.Tensor = None + """Default tendon limit stiffness of all spatial tendons. Shape is (num_instances, num_spatial_tendons). + + This quantity is parsed from the USD schema at the time of initialization. + """ + + default_spatial_tendon_offset: torch.Tensor = None + """Default tendon offset of all spatial tendons. Shape is (num_instances, num_spatial_tendons). + + This quantity is parsed from the USD schema at the time of initialization. + """ + + ## + # Joint commands -- Set into simulation. + ## + + joint_pos_target: torch.Tensor = None + """Joint position targets commanded by the user. Shape is (num_instances, num_joints). + + For an implicit actuator model, the targets are directly set into the simulation. + For an explicit actuator model, the targets are used to compute the joint torques (see :attr:`applied_torque`), + which are then set into the simulation. + """ + + joint_vel_target: torch.Tensor = None + """Joint velocity targets commanded by the user. Shape is (num_instances, num_joints). + + For an implicit actuator model, the targets are directly set into the simulation. + For an explicit actuator model, the targets are used to compute the joint torques (see :attr:`applied_torque`), + which are then set into the simulation. + """ + + joint_effort_target: torch.Tensor = None + """Joint effort targets commanded by the user. Shape is (num_instances, num_joints). + + For an implicit actuator model, the targets are directly set into the simulation. + For an explicit actuator model, the targets are used to compute the joint torques (see :attr:`applied_torque`), + which are then set into the simulation. + """ + + ## + # Joint commands -- Explicit actuators. + ## + + computed_torque: torch.Tensor = None + """Joint torques computed from the actuator model (before clipping). Shape is (num_instances, num_joints). + + This quantity is the raw torque output from the actuator mode, before any clipping is applied. + It is exposed for users who want to inspect the computations inside the actuator model. + For instance, to penalize the learning agent for a difference between the computed and applied torques. + """ + + applied_torque: torch.Tensor = None + """Joint torques applied from the actuator model (after clipping). Shape is (num_instances, num_joints). + + These torques are set into the simulation, after clipping the :attr:`computed_torque` based on the + actuator model. + """ + + ## + # Joint properties. + ## + + joint_stiffness: torch.Tensor = None + """Joint stiffness provided to the simulation. Shape is (num_instances, num_joints). + + In the case of explicit actuators, the value for the corresponding joints is zero. + """ + + joint_damping: torch.Tensor = None + """Joint damping provided to the simulation. Shape is (num_instances, num_joints) + + In the case of explicit actuators, the value for the corresponding joints is zero. + """ + + joint_armature: torch.Tensor = None + """Joint armature provided to the simulation. Shape is (num_instances, num_joints).""" + + joint_friction_coeff: torch.Tensor = None + """Joint static friction coefficient provided to the simulation. Shape is (num_instances, num_joints). + + Note: In Isaac Sim 4.5, this parameter is modeled as a coefficient. In Isaac Sim 5.0 and later, + it is modeled as an effort (torque or force). + """ + + joint_dynamic_friction_coeff: torch.Tensor = None + """Joint dynamic friction coefficient provided to the simulation. Shape is (num_instances, num_joints). + + Note: In Isaac Sim 4.5, this parameter is modeled as a coefficient. In Isaac Sim 5.0 and later, + it is modeled as an effort (torque or force). + """ + + joint_viscous_friction_coeff: torch.Tensor = None + """Joint viscous friction coefficient provided to the simulation. Shape is (num_instances, num_joints).""" + + joint_pos_limits: torch.Tensor = None + """Joint position limits provided to the simulation. Shape is (num_instances, num_joints, 2). + + The limits are in the order :math:`[lower, upper]`. + """ + + joint_vel_limits: torch.Tensor = None + """Joint maximum velocity provided to the simulation. Shape is (num_instances, num_joints).""" + + joint_effort_limits: torch.Tensor = None + """Joint maximum effort provided to the simulation. Shape is (num_instances, num_joints).""" + + ## + # Joint properties - Custom. + ## + + soft_joint_pos_limits: torch.Tensor = None + r"""Soft joint positions limits for all joints. Shape is (num_instances, num_joints, 2). + + The limits are in the order :math:`[lower, upper]`.The soft joint position limits are computed as + a sub-region of the :attr:`joint_pos_limits` based on the + :attr:`~isaaclab.assets.ArticulationCfg.soft_joint_pos_limit_factor` parameter. + + Consider the joint position limits :math:`[lower, upper]` and the soft joint position limits + :math:`[soft_lower, soft_upper]`. The soft joint position limits are computed as: + + .. math:: + + soft\_lower = (lower + upper) / 2 - factor * (upper - lower) / 2 + soft\_upper = (lower + upper) / 2 + factor * (upper - lower) / 2 + + The soft joint position limits help specify a safety region around the joint limits. It isn't used by the + simulation, but is useful for learning agents to prevent the joint positions from violating the limits. + """ + + soft_joint_vel_limits: torch.Tensor = None + """Soft joint velocity limits for all joints. Shape is (num_instances, num_joints). + + These are obtained from the actuator model. It may differ from :attr:`joint_vel_limits` if the actuator model + has a variable velocity limit model. For instance, in a variable gear ratio actuator model. + """ + + gear_ratio: torch.Tensor = None + """Gear ratio for relating motor torques to applied Joint torques. Shape is (num_instances, num_joints).""" + + ## + # Fixed tendon properties. + ## + + fixed_tendon_stiffness: torch.Tensor = None + """Fixed tendon stiffness provided to the simulation. Shape is (num_instances, num_fixed_tendons).""" + + fixed_tendon_damping: torch.Tensor = None + """Fixed tendon damping provided to the simulation. Shape is (num_instances, num_fixed_tendons).""" + + fixed_tendon_limit_stiffness: torch.Tensor = None + """Fixed tendon limit stiffness provided to the simulation. Shape is (num_instances, num_fixed_tendons).""" + + fixed_tendon_rest_length: torch.Tensor = None + """Fixed tendon rest length provided to the simulation. Shape is (num_instances, num_fixed_tendons).""" + + fixed_tendon_offset: torch.Tensor = None + """Fixed tendon offset provided to the simulation. Shape is (num_instances, num_fixed_tendons).""" + + fixed_tendon_pos_limits: torch.Tensor = None + """Fixed tendon position limits provided to the simulation. Shape is (num_instances, num_fixed_tendons, 2).""" + + ## + # Spatial tendon properties. + ## + + spatial_tendon_stiffness: torch.Tensor = None + """Spatial tendon stiffness provided to the simulation. Shape is (num_instances, num_spatial_tendons).""" + + spatial_tendon_damping: torch.Tensor = None + """Spatial tendon damping provided to the simulation. Shape is (num_instances, num_spatial_tendons).""" + + spatial_tendon_limit_stiffness: torch.Tensor = None + """Spatial tendon limit stiffness provided to the simulation. Shape is (num_instances, num_spatial_tendons).""" + + spatial_tendon_offset: torch.Tensor = None + """Spatial tendon offset provided to the simulation. Shape is (num_instances, num_spatial_tendons).""" + + ## + # Root state properties. + ## + + @property + def root_link_pose_w(self) -> torch.Tensor: + """Root link pose ``[pos, quat]`` in simulation world frame. Shape is (num_instances, 7). + + This quantity is the pose of the articulation root's actor frame relative to the world. + The orientation is provided in (w, x, y, z) format. + """ + if self._root_link_pose_w.timestamp < self._sim_timestamp: + # read data from simulation + pose = self._root_physx_view.get_root_transforms().clone() + pose[:, 3:7] = math_utils.convert_quat(pose[:, 3:7], to="wxyz") + # set the buffer data and timestamp + self._root_link_pose_w.data = pose + self._root_link_pose_w.timestamp = self._sim_timestamp + + return self._root_link_pose_w.data + + @property + def root_link_vel_w(self) -> torch.Tensor: + """Root link velocity ``[lin_vel, ang_vel]`` in simulation world frame. Shape is (num_instances, 6). + + This quantity contains the linear and angular velocities of the articulation root's actor frame + relative to the world. + """ + if self._root_link_vel_w.timestamp < self._sim_timestamp: + # read the CoM velocity + vel = self.root_com_vel_w.clone() + # adjust linear velocity to link from center of mass + vel[:, :3] += torch.linalg.cross( + vel[:, 3:], math_utils.quat_apply(self.root_link_quat_w, -self.body_com_pos_b[:, 0]), dim=-1 + ) + # set the buffer data and timestamp + self._root_link_vel_w.data = vel + self._root_link_vel_w.timestamp = self._sim_timestamp + + return self._root_link_vel_w.data + + @property + def root_com_pose_w(self) -> torch.Tensor: + """Root center of mass pose ``[pos, quat]`` in simulation world frame. Shape is (num_instances, 7). + + This quantity is the pose of the articulation root's center of mass frame relative to the world. + The orientation is provided in (w, x, y, z) format. + """ + if self._root_com_pose_w.timestamp < self._sim_timestamp: + # apply local transform to center of mass frame + pos, quat = math_utils.combine_frame_transforms( + self.root_link_pos_w, self.root_link_quat_w, self.body_com_pos_b[:, 0], self.body_com_quat_b[:, 0] + ) + # set the buffer data and timestamp + self._root_com_pose_w.data = torch.cat((pos, quat), dim=-1) + self._root_com_pose_w.timestamp = self._sim_timestamp + + return self._root_com_pose_w.data + + @property + def root_com_vel_w(self) -> torch.Tensor: + """Root center of mass velocity ``[lin_vel, ang_vel]`` in simulation world frame. Shape is (num_instances, 6). + + This quantity contains the linear and angular velocities of the articulation root's center of mass frame + relative to the world. + """ + if self._root_com_vel_w.timestamp < self._sim_timestamp: + self._root_com_vel_w.data = self._root_physx_view.get_root_velocities() + self._root_com_vel_w.timestamp = self._sim_timestamp + + return self._root_com_vel_w.data + + @property + def root_state_w(self): + """Root state ``[pos, quat, lin_vel, ang_vel]`` in simulation world frame. Shape is (num_instances, 13). + + The position and quaternion are of the articulation root's actor frame relative to the world. Meanwhile, + the linear and angular velocities are of the articulation root's center of mass frame. + """ + if self._root_state_w.timestamp < self._sim_timestamp: + self._root_state_w.data = torch.cat((self.root_link_pose_w, self.root_com_vel_w), dim=-1) + self._root_state_w.timestamp = self._sim_timestamp + + return self._root_state_w.data + + @property + def root_link_state_w(self): + """Root state ``[pos, quat, lin_vel, ang_vel]`` in simulation world frame. Shape is (num_instances, 13). + + The position, quaternion, and linear/angular velocity are of the articulation root's actor frame relative to the + world. + """ + if self._root_link_state_w.timestamp < self._sim_timestamp: + self._root_link_state_w.data = torch.cat((self.root_link_pose_w, self.root_link_vel_w), dim=-1) + self._root_link_state_w.timestamp = self._sim_timestamp + + return self._root_link_state_w.data + + @property + def root_com_state_w(self): + """Root center of mass state ``[pos, quat, lin_vel, ang_vel]`` in simulation world frame. + Shape is (num_instances, 13). + + The position, quaternion, and linear/angular velocity are of the articulation root link's center of mass frame + relative to the world. Center of mass frame is assumed to be the same orientation as the link rather than the + orientation of the principle inertia. + """ + if self._root_com_state_w.timestamp < self._sim_timestamp: + self._root_com_state_w.data = torch.cat((self.root_com_pose_w, self.root_com_vel_w), dim=-1) + self._root_com_state_w.timestamp = self._sim_timestamp + + return self._root_com_state_w.data + + ## + # Body state properties. + ## + + @property + def body_link_pose_w(self) -> torch.Tensor: + """Body link pose ``[pos, quat]`` in simulation world frame. + Shape is (num_instances, num_bodies, 7). + + This quantity is the pose of the articulation links' actor frame relative to the world. + The orientation is provided in (w, x, y, z) format. + """ + if self._body_link_pose_w.timestamp < self._sim_timestamp: + # perform forward kinematics (shouldn't cause overhead if it happened already) + self._physics_sim_view.update_articulations_kinematic() + # read data from simulation + poses = self._root_physx_view.get_link_transforms().clone() + poses[..., 3:7] = math_utils.convert_quat(poses[..., 3:7], to="wxyz") + # set the buffer data and timestamp + self._body_link_pose_w.data = poses + self._body_link_pose_w.timestamp = self._sim_timestamp + + return self._body_link_pose_w.data + + @property + def body_link_vel_w(self) -> torch.Tensor: + """Body link velocity ``[lin_vel, ang_vel]`` in simulation world frame. + Shape is (num_instances, num_bodies, 6). + + This quantity contains the linear and angular velocities of the articulation links' actor frame + relative to the world. + """ + if self._body_link_vel_w.timestamp < self._sim_timestamp: + # read data from simulation + velocities = self.body_com_vel_w.clone() + # adjust linear velocity to link from center of mass + velocities[..., :3] += torch.linalg.cross( + velocities[..., 3:], math_utils.quat_apply(self.body_link_quat_w, -self.body_com_pos_b), dim=-1 + ) + # set the buffer data and timestamp + self._body_link_vel_w.data = velocities + self._body_link_vel_w.timestamp = self._sim_timestamp + + return self._body_link_vel_w.data + + @property + def body_com_pose_w(self) -> torch.Tensor: + """Body center of mass pose ``[pos, quat]`` in simulation world frame. + Shape is (num_instances, num_bodies, 7). + + This quantity is the pose of the center of mass frame of the articulation links relative to the world. + The orientation is provided in (w, x, y, z) format. + """ + if self._body_com_pose_w.timestamp < self._sim_timestamp: + # apply local transform to center of mass frame + pos, quat = math_utils.combine_frame_transforms( + self.body_link_pos_w, self.body_link_quat_w, self.body_com_pos_b, self.body_com_quat_b + ) + # set the buffer data and timestamp + self._body_com_pose_w.data = torch.cat((pos, quat), dim=-1) + self._body_com_pose_w.timestamp = self._sim_timestamp + + return self._body_com_pose_w.data + + @property + def body_com_vel_w(self) -> torch.Tensor: + """Body center of mass velocity ``[lin_vel, ang_vel]`` in simulation world frame. + Shape is (num_instances, num_bodies, 6). + + This quantity contains the linear and angular velocities of the articulation links' center of mass frame + relative to the world. + """ + if self._body_com_vel_w.timestamp < self._sim_timestamp: + self._body_com_vel_w.data = self._root_physx_view.get_link_velocities() + self._body_com_vel_w.timestamp = self._sim_timestamp + + return self._body_com_vel_w.data + + @property + def body_state_w(self): + """State of all bodies `[pos, quat, lin_vel, ang_vel]` in simulation world frame. + Shape is (num_instances, num_bodies, 13). + + The position and quaternion are of all the articulation links' actor frame. Meanwhile, the linear and angular + velocities are of the articulation links's center of mass frame. + """ + if self._body_state_w.timestamp < self._sim_timestamp: + self._body_state_w.data = torch.cat((self.body_link_pose_w, self.body_com_vel_w), dim=-1) + self._body_state_w.timestamp = self._sim_timestamp + + return self._body_state_w.data + + @property + def body_link_state_w(self): + """State of all bodies' link frame`[pos, quat, lin_vel, ang_vel]` in simulation world frame. + Shape is (num_instances, num_bodies, 13). + + The position, quaternion, and linear/angular velocity are of the body's link frame relative to the world. + """ + if self._body_link_state_w.timestamp < self._sim_timestamp: + self._body_link_state_w.data = torch.cat((self.body_link_pose_w, self.body_link_vel_w), dim=-1) + self._body_link_state_w.timestamp = self._sim_timestamp + + return self._body_link_state_w.data + + @property + def body_com_state_w(self): + """State of all bodies center of mass `[pos, quat, lin_vel, ang_vel]` in simulation world frame. + Shape is (num_instances, num_bodies, 13). + + The position, quaternion, and linear/angular velocity are of the body's center of mass frame relative to the + world. Center of mass frame is assumed to be the same orientation as the link rather than the orientation of the + principle inertia. + """ + if self._body_com_state_w.timestamp < self._sim_timestamp: + self._body_com_state_w.data = torch.cat((self.body_com_pose_w, self.body_com_vel_w), dim=-1) + self._body_com_state_w.timestamp = self._sim_timestamp + + return self._body_com_state_w.data + + @property + def body_com_acc_w(self): + """Acceleration of all bodies center of mass ``[lin_acc, ang_acc]``. + Shape is (num_instances, num_bodies, 6). + + All values are relative to the world. + """ + if self._body_com_acc_w.timestamp < self._sim_timestamp: + # read data from simulation and set the buffer data and timestamp + self._body_com_acc_w.data = self._root_physx_view.get_link_accelerations() + self._body_com_acc_w.timestamp = self._sim_timestamp + + return self._body_com_acc_w.data + + @property + def body_com_pose_b(self) -> torch.Tensor: + """Center of mass pose ``[pos, quat]`` of all bodies in their respective body's link frames. + Shape is (num_instances, 1, 7). + + This quantity is the pose of the center of mass frame of the rigid body relative to the body's link frame. + The orientation is provided in (w, x, y, z) format. + """ + if self._body_com_pose_b.timestamp < self._sim_timestamp: + # read data from simulation + pose = self._root_physx_view.get_coms().to(self.device) + pose[..., 3:7] = math_utils.convert_quat(pose[..., 3:7], to="wxyz") + # set the buffer data and timestamp + self._body_com_pose_b.data = pose + self._body_com_pose_b.timestamp = self._sim_timestamp + + return self._body_com_pose_b.data + + @property + def body_incoming_joint_wrench_b(self) -> torch.Tensor: + """Joint reaction wrench applied from body parent to child body in parent body frame. + + Shape is (num_instances, num_bodies, 6). All body reaction wrenches are provided including the root body to the + world of an articulation. + + For more information on joint wrenches, please check the`PhysX documentation`_ and the underlying + `PhysX Tensor API`_. + + .. _`PhysX documentation`: https://nvidia-omniverse.github.io/PhysX/physx/5.5.1/docs/Articulations.html#link-incoming-joint-force + .. _`PhysX Tensor API`: https://docs.omniverse.nvidia.com/kit/docs/omni_physics/latest/extensions/runtime/source/omni.physics.tensors/docs/api/python.html#omni.physics.tensors.impl.api.ArticulationView.get_link_incoming_joint_force + """ + + if self._body_incoming_joint_wrench_b.timestamp < self._sim_timestamp: + self._body_incoming_joint_wrench_b.data = self._root_physx_view.get_link_incoming_joint_force() + self._body_incoming_joint_wrench_b.time_stamp = self._sim_timestamp + return self._body_incoming_joint_wrench_b.data + + ## + # Joint state properties. + ## + + @property + def joint_pos(self): + """Joint positions of all joints. Shape is (num_instances, num_joints).""" + if self._joint_pos.timestamp < self._sim_timestamp: + # read data from simulation and set the buffer data and timestamp + self._joint_pos.data = self._root_physx_view.get_dof_positions() + self._joint_pos.timestamp = self._sim_timestamp + return self._joint_pos.data + + @property + def joint_vel(self): + """Joint velocities of all joints. Shape is (num_instances, num_joints).""" + if self._joint_vel.timestamp < self._sim_timestamp: + # read data from simulation and set the buffer data and timestamp + self._joint_vel.data = self._root_physx_view.get_dof_velocities() + self._joint_vel.timestamp = self._sim_timestamp + return self._joint_vel.data + + @property + def joint_acc(self): + """Joint acceleration of all joints. Shape is (num_instances, num_joints).""" + if self._joint_acc.timestamp < self._sim_timestamp: + # note: we use finite differencing to compute acceleration + time_elapsed = self._sim_timestamp - self._joint_acc.timestamp + self._joint_acc.data = (self.joint_vel - self._previous_joint_vel) / time_elapsed + self._joint_acc.timestamp = self._sim_timestamp + # update the previous joint velocity + self._previous_joint_vel[:] = self.joint_vel + return self._joint_acc.data + + ## + # Derived Properties. + ## + + @property + def projected_gravity_b(self): + """Projection of the gravity direction on base frame. Shape is (num_instances, 3).""" + return math_utils.quat_apply_inverse(self.root_link_quat_w, self.GRAVITY_VEC_W) + + @property + def heading_w(self): + """Yaw heading of the base frame (in radians). Shape is (num_instances,). + + Note: + This quantity is computed by assuming that the forward-direction of the base + frame is along x-direction, i.e. :math:`(1, 0, 0)`. + """ + forward_w = math_utils.quat_apply(self.root_link_quat_w, self.FORWARD_VEC_B) + return torch.atan2(forward_w[:, 1], forward_w[:, 0]) + + @property + def root_link_lin_vel_b(self) -> torch.Tensor: + """Root link linear velocity in base frame. Shape is (num_instances, 3). + + This quantity is the linear velocity of the articulation root's actor frame with respect to the + its actor frame. + """ + return math_utils.quat_apply_inverse(self.root_link_quat_w, self.root_link_lin_vel_w) + + @property + def root_link_ang_vel_b(self) -> torch.Tensor: + """Root link angular velocity in base world frame. Shape is (num_instances, 3). + + This quantity is the angular velocity of the articulation root's actor frame with respect to the + its actor frame. + """ + return math_utils.quat_apply_inverse(self.root_link_quat_w, self.root_link_ang_vel_w) + + @property + def root_com_lin_vel_b(self) -> torch.Tensor: + """Root center of mass linear velocity in base frame. Shape is (num_instances, 3). + + This quantity is the linear velocity of the articulation root's center of mass frame with respect to the + its actor frame. + """ + return math_utils.quat_apply_inverse(self.root_link_quat_w, self.root_com_lin_vel_w) + + @property + def root_com_ang_vel_b(self) -> torch.Tensor: + """Root center of mass angular velocity in base world frame. Shape is (num_instances, 3). + + This quantity is the angular velocity of the articulation root's center of mass frame with respect to the + its actor frame. + """ + return math_utils.quat_apply_inverse(self.root_link_quat_w, self.root_com_ang_vel_w) + + ## + # Sliced properties. + ## + + @property + def root_link_pos_w(self) -> torch.Tensor: + """Root link position in simulation world frame. Shape is (num_instances, 3). + + This quantity is the position of the actor frame of the root rigid body relative to the world. + """ + return self.root_link_pose_w[:, :3] + + @property + def root_link_quat_w(self) -> torch.Tensor: + """Root link orientation (w, x, y, z) in simulation world frame. Shape is (num_instances, 4). + + This quantity is the orientation of the actor frame of the root rigid body. + """ + return self.root_link_pose_w[:, 3:7] + + @property + def root_link_lin_vel_w(self) -> torch.Tensor: + """Root linear velocity in simulation world frame. Shape is (num_instances, 3). + + This quantity is the linear velocity of the root rigid body's actor frame relative to the world. + """ + return self.root_link_vel_w[:, :3] + + @property + def root_link_ang_vel_w(self) -> torch.Tensor: + """Root link angular velocity in simulation world frame. Shape is (num_instances, 3). + + This quantity is the angular velocity of the actor frame of the root rigid body relative to the world. + """ + return self.root_link_vel_w[:, 3:6] + + @property + def root_com_pos_w(self) -> torch.Tensor: + """Root center of mass position in simulation world frame. Shape is (num_instances, 3). + + This quantity is the position of the actor frame of the root rigid body relative to the world. + """ + return self.root_com_pose_w[:, :3] + + @property + def root_com_quat_w(self) -> torch.Tensor: + """Root center of mass orientation (w, x, y, z) in simulation world frame. Shape is (num_instances, 4). + + This quantity is the orientation of the actor frame of the root rigid body relative to the world. + """ + return self.root_com_pose_w[:, 3:7] + + @property + def root_com_lin_vel_w(self) -> torch.Tensor: + """Root center of mass linear velocity in simulation world frame. Shape is (num_instances, 3). + + This quantity is the linear velocity of the root rigid body's center of mass frame relative to the world. + """ + return self.root_com_vel_w[:, :3] + + @property + def root_com_ang_vel_w(self) -> torch.Tensor: + """Root center of mass angular velocity in simulation world frame. Shape is (num_instances, 3). + + This quantity is the angular velocity of the root rigid body's center of mass frame relative to the world. + """ + return self.root_com_vel_w[:, 3:6] + + @property + def body_link_pos_w(self) -> torch.Tensor: + """Positions of all bodies in simulation world frame. Shape is (num_instances, num_bodies, 3). + + This quantity is the position of the articulation bodies' actor frame relative to the world. + """ + return self.body_link_pose_w[..., :3] + + @property + def body_link_quat_w(self) -> torch.Tensor: + """Orientation (w, x, y, z) of all bodies in simulation world frame. Shape is (num_instances, num_bodies, 4). + + This quantity is the orientation of the articulation bodies' actor frame relative to the world. + """ + return self.body_link_pose_w[..., 3:7] + + @property + def body_link_lin_vel_w(self) -> torch.Tensor: + """Linear velocity of all bodies in simulation world frame. Shape is (num_instances, num_bodies, 3). + + This quantity is the linear velocity of the articulation bodies' center of mass frame relative to the world. + """ + return self.body_link_vel_w[..., :3] + + @property + def body_link_ang_vel_w(self) -> torch.Tensor: + """Angular velocity of all bodies in simulation world frame. Shape is (num_instances, num_bodies, 3). + + This quantity is the angular velocity of the articulation bodies' center of mass frame relative to the world. + """ + return self.body_link_vel_w[..., 3:6] + + @property + def body_com_pos_w(self) -> torch.Tensor: + """Positions of all bodies in simulation world frame. Shape is (num_instances, num_bodies, 3). + + This quantity is the position of the articulation bodies' actor frame. + """ + return self.body_com_pose_w[..., :3] + + @property + def body_com_quat_w(self) -> torch.Tensor: + """Orientation (w, x, y, z) of the principle axis of inertia of all bodies in simulation world frame. + Shape is (num_instances, num_bodies, 4). + + This quantity is the orientation of the articulation bodies' actor frame. + """ + return self.body_com_pose_w[..., 3:7] + + @property + def body_com_lin_vel_w(self) -> torch.Tensor: + """Linear velocity of all bodies in simulation world frame. Shape is (num_instances, num_bodies, 3). + + This quantity is the linear velocity of the articulation bodies' center of mass frame. + """ + return self.body_com_vel_w[..., :3] + + @property + def body_com_ang_vel_w(self) -> torch.Tensor: + """Angular velocity of all bodies in simulation world frame. Shape is (num_instances, num_bodies, 3). + + This quantity is the angular velocity of the articulation bodies' center of mass frame. + """ + return self.body_com_vel_w[..., 3:6] + + @property + def body_com_lin_acc_w(self) -> torch.Tensor: + """Linear acceleration of all bodies in simulation world frame. Shape is (num_instances, num_bodies, 3). + + This quantity is the linear acceleration of the articulation bodies' center of mass frame. + """ + return self.body_com_acc_w[..., :3] + + @property + def body_com_ang_acc_w(self) -> torch.Tensor: + """Angular acceleration of all bodies in simulation world frame. Shape is (num_instances, num_bodies, 3). + + This quantity is the angular acceleration of the articulation bodies' center of mass frame. + """ + return self.body_com_acc_w[..., 3:6] + + @property + def body_com_pos_b(self) -> torch.Tensor: + """Center of mass position of all of the bodies in their respective link frames. + Shape is (num_instances, num_bodies, 3). + + This quantity is the center of mass location relative to its body'slink frame. + """ + return self.body_com_pose_b[..., :3] + + @property + def body_com_quat_b(self) -> torch.Tensor: + """Orientation (w, x, y, z) of the principle axis of inertia of all of the bodies in their + respective link frames. Shape is (num_instances, num_bodies, 4). + + This quantity is the orientation of the principles axes of inertia relative to its body's link frame. + """ + return self.body_com_pose_b[..., 3:7] + + ## + # Backward compatibility. + ## + + @property + def root_pose_w(self) -> torch.Tensor: + """Same as :attr:`root_link_pose_w`.""" + return self.root_link_pose_w + + @property + def root_pos_w(self) -> torch.Tensor: + """Same as :attr:`root_link_pos_w`.""" + return self.root_link_pos_w + + @property + def root_quat_w(self) -> torch.Tensor: + """Same as :attr:`root_link_quat_w`.""" + return self.root_link_quat_w + + @property + def root_vel_w(self) -> torch.Tensor: + """Same as :attr:`root_com_vel_w`.""" + return self.root_com_vel_w + + @property + def root_lin_vel_w(self) -> torch.Tensor: + """Same as :attr:`root_com_lin_vel_w`.""" + return self.root_com_lin_vel_w + + @property + def root_ang_vel_w(self) -> torch.Tensor: + """Same as :attr:`root_com_ang_vel_w`.""" + return self.root_com_ang_vel_w + + @property + def root_lin_vel_b(self) -> torch.Tensor: + """Same as :attr:`root_com_lin_vel_b`.""" + return self.root_com_lin_vel_b + + @property + def root_ang_vel_b(self) -> torch.Tensor: + """Same as :attr:`root_com_ang_vel_b`.""" + return self.root_com_ang_vel_b + + @property + def body_pose_w(self) -> torch.Tensor: + """Same as :attr:`body_link_pose_w`.""" + return self.body_link_pose_w + + @property + def body_pos_w(self) -> torch.Tensor: + """Same as :attr:`body_link_pos_w`.""" + return self.body_link_pos_w + + @property + def body_quat_w(self) -> torch.Tensor: + """Same as :attr:`body_link_quat_w`.""" + return self.body_link_quat_w + + @property + def body_vel_w(self) -> torch.Tensor: + """Same as :attr:`body_com_vel_w`.""" + return self.body_com_vel_w + + @property + def body_lin_vel_w(self) -> torch.Tensor: + """Same as :attr:`body_com_lin_vel_w`.""" + return self.body_com_lin_vel_w + + @property + def body_ang_vel_w(self) -> torch.Tensor: + """Same as :attr:`body_com_ang_vel_w`.""" + return self.body_com_ang_vel_w + + @property + def body_acc_w(self) -> torch.Tensor: + """Same as :attr:`body_com_acc_w`.""" + return self.body_com_acc_w + + @property + def body_lin_acc_w(self) -> torch.Tensor: + """Same as :attr:`body_com_lin_acc_w`.""" + return self.body_com_lin_acc_w + + @property + def body_ang_acc_w(self) -> torch.Tensor: + """Same as :attr:`body_com_ang_acc_w`.""" + return self.body_com_ang_acc_w + + @property + def com_pos_b(self) -> torch.Tensor: + """Same as :attr:`body_com_pos_b`.""" + return self.body_com_pos_b + + @property + def com_quat_b(self) -> torch.Tensor: + """Same as :attr:`body_com_quat_b`.""" + return self.body_com_quat_b + + @property + def joint_limits(self) -> torch.Tensor: + """Deprecated property. Please use :attr:`joint_pos_limits` instead.""" + logger.warning( + "The `joint_limits` property will be deprecated in a future release. Please use `joint_pos_limits` instead." + ) + return self.joint_pos_limits + + @property + def default_joint_limits(self) -> torch.Tensor: + """Deprecated property. Please use :attr:`default_joint_pos_limits` instead.""" + logger.warning( + "The `default_joint_limits` property will be deprecated in a future release. Please use" + " `default_joint_pos_limits` instead." + ) + return self.default_joint_pos_limits + + @property + def joint_velocity_limits(self) -> torch.Tensor: + """Deprecated property. Please use :attr:`joint_vel_limits` instead.""" + logger.warning( + "The `joint_velocity_limits` property will be deprecated in a future release. Please use" + " `joint_vel_limits` instead." + ) + return self.joint_vel_limits + + @property + def joint_friction(self) -> torch.Tensor: + """Deprecated property. Please use :attr:`joint_friction_coeff` instead.""" + logger.warning( + "The `joint_friction` property will be deprecated in a future release. Please use" + " `joint_friction_coeff` instead." + ) + return self.joint_friction_coeff + + @property + def default_joint_friction(self) -> torch.Tensor: + """Deprecated property. Please use :attr:`default_joint_friction_coeff` instead.""" + logger.warning( + "The `default_joint_friction` property will be deprecated in a future release. Please use" + " `default_joint_friction_coeff` instead." + ) + return self.default_joint_friction_coeff + + @property + def fixed_tendon_limit(self) -> torch.Tensor: + """Deprecated property. Please use :attr:`fixed_tendon_pos_limits` instead.""" + logger.warning( + "The `fixed_tendon_limit` property will be deprecated in a future release. Please use" + " `fixed_tendon_pos_limits` instead." + ) + return self.fixed_tendon_pos_limits + + @property + def default_fixed_tendon_limit(self) -> torch.Tensor: + """Deprecated property. Please use :attr:`default_fixed_tendon_pos_limits` instead.""" + logger.warning( + "The `default_fixed_tendon_limit` property will be deprecated in a future release. Please use" + " `default_fixed_tendon_pos_limits` instead." + ) + return self.default_fixed_tendon_pos_limits diff --git a/source/isaaclab_physx/isaaclab_physx/assets/articulation/articulation_old.py b/source/isaaclab_physx/isaaclab_physx/assets/articulation/articulation_old.py new file mode 100644 index 00000000000..ee6b6c04fae --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/assets/articulation/articulation_old.py @@ -0,0 +1,2143 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +# Flag for pyright to ignore type errors in this file. +# pyright: reportPrivateUsage=false + +from __future__ import annotations + +import logging +from collections.abc import Sequence +from typing import TYPE_CHECKING + +import torch +import warp as wp +from prettytable import PrettyTable + +import omni.physics.tensors.impl.api as physx +from isaacsim.core.simulation_manager import SimulationManager +from pxr import PhysxSchema, UsdPhysics + +import isaaclab.sim as sim_utils +import isaaclab.utils.math as math_utils +import isaaclab.utils.string as string_utils +from isaaclab.actuators import ActuatorBase, ActuatorBaseCfg, ImplicitActuator +from isaaclab.utils.types import ArticulationActions +from isaaclab.utils.version import get_isaac_sim_version +from isaaclab.utils.wrench_composer import WrenchComposer + +from ..asset_base import AssetBase +from .articulation_data import ArticulationData + +if TYPE_CHECKING: + from .articulation_cfg import ArticulationCfg + +# import logger +logger = logging.getLogger(__name__) + + +class Articulation(AssetBase): + """An articulation asset class. + + An articulation is a collection of rigid bodies connected by joints. The joints can be either + fixed or actuated. The joints can be of different types, such as revolute, prismatic, D-6, etc. + However, the articulation class has currently been tested with revolute and prismatic joints. + The class supports both floating-base and fixed-base articulations. The type of articulation + is determined based on the root joint of the articulation. If the root joint is fixed, then + the articulation is considered a fixed-base system. Otherwise, it is considered a floating-base + system. This can be checked using the :attr:`Articulation.is_fixed_base` attribute. + + For an asset to be considered an articulation, the root prim of the asset must have the + `USD ArticulationRootAPI`_. This API is used to define the sub-tree of the articulation using + the reduced coordinate formulation. On playing the simulation, the physics engine parses the + articulation root prim and creates the corresponding articulation in the physics engine. The + articulation root prim can be specified using the :attr:`AssetBaseCfg.prim_path` attribute. + + The articulation class also provides the functionality to augment the simulation of an articulated + system with custom actuator models. These models can either be explicit or implicit, as detailed in + the :mod:`isaaclab.actuators` module. The actuator models are specified using the + :attr:`ArticulationCfg.actuators` attribute. These are then parsed and used to initialize the + corresponding actuator models, when the simulation is played. + + During the simulation step, the articulation class first applies the actuator models to compute + the joint commands based on the user-specified targets. These joint commands are then applied + into the simulation. The joint commands can be either position, velocity, or effort commands. + As an example, the following snippet shows how this can be used for position commands: + + .. code-block:: python + + # an example instance of the articulation class + my_articulation = Articulation(cfg) + + # set joint position targets + my_articulation.set_joint_position_target(position) + # propagate the actuator models and apply the computed commands into the simulation + my_articulation.write_data_to_sim() + + # step the simulation using the simulation context + sim_context.step() + + # update the articulation state, where dt is the simulation time step + my_articulation.update(dt) + + .. _`USD ArticulationRootAPI`: https://openusd.org/dev/api/class_usd_physics_articulation_root_a_p_i.html + + """ + + cfg: ArticulationCfg + """Configuration instance for the articulations.""" + + actuators: dict[str, ActuatorBase] + """Dictionary of actuator instances for the articulation. + + The keys are the actuator names and the values are the actuator instances. The actuator instances + are initialized based on the actuator configurations specified in the :attr:`ArticulationCfg.actuators` + attribute. They are used to compute the joint commands during the :meth:`write_data_to_sim` function. + """ + + def __init__(self, cfg: ArticulationCfg): + """Initialize the articulation. + + Args: + cfg: A configuration instance. + """ + super().__init__(cfg) + + """ + Properties + """ + + @property + def data(self) -> ArticulationData: + return self._data + + @property + def num_instances(self) -> int: + return self.root_physx_view.count + + @property + def is_fixed_base(self) -> bool: + """Whether the articulation is a fixed-base or floating-base system.""" + return self.root_physx_view.shared_metatype.fixed_base + + @property + def num_joints(self) -> int: + """Number of joints in articulation.""" + return self.root_physx_view.shared_metatype.dof_count + + @property + def num_fixed_tendons(self) -> int: + """Number of fixed tendons in articulation.""" + return self.root_physx_view.max_fixed_tendons + + @property + def num_spatial_tendons(self) -> int: + """Number of spatial tendons in articulation.""" + return self.root_physx_view.max_spatial_tendons + + @property + def num_bodies(self) -> int: + """Number of bodies in articulation.""" + return self.root_physx_view.shared_metatype.link_count + + @property + def joint_names(self) -> list[str]: + """Ordered names of joints in articulation.""" + return self.root_physx_view.shared_metatype.dof_names + + @property + def fixed_tendon_names(self) -> list[str]: + """Ordered names of fixed tendons in articulation.""" + return self._fixed_tendon_names + + @property + def spatial_tendon_names(self) -> list[str]: + """Ordered names of spatial tendons in articulation.""" + return self._spatial_tendon_names + + @property + def body_names(self) -> list[str]: + """Ordered names of bodies in articulation.""" + return self.root_physx_view.shared_metatype.link_names + + @property + def root_physx_view(self) -> physx.ArticulationView: + """Articulation view for the asset (PhysX). + + Note: + Use this view with caution. It requires handling of tensors in a specific way. + """ + return self._root_physx_view + + @property + def instantaneous_wrench_composer(self) -> WrenchComposer: + """Instantaneous wrench composer. + + Returns a :class:`~isaaclab.utils.wrench_composer.WrenchComposer` instance. Wrenches added or set to this wrench + composer are only valid for the current simulation step. At the end of the simulation step, the wrenches set + to this object are discarded. This is useful to apply forces that change all the time, things like drag forces + for instance. + + Note: + Permanent wrenches are composed into the instantaneous wrench before the instantaneous wrenches are + applied to the simulation. + """ + return self._instantaneous_wrench_composer + + @property + def permanent_wrench_composer(self) -> WrenchComposer: + """Permanent wrench composer. + + Returns a :class:`~isaaclab.utils.wrench_composer.WrenchComposer` instance. Wrenches added or set to this wrench + composer are persistent and are applied to the simulation at every step. This is useful to apply forces that + are constant over a period of time, things like the thrust of a motor for instance. + + Note: + Permanent wrenches are composed into the instantaneous wrench before the instantaneous wrenches are + applied to the simulation. + """ + return self._permanent_wrench_composer + + """ + Operations. + """ + + def reset(self, env_ids: Sequence[int] | None = None): + # use ellipses object to skip initial indices. + if env_ids is None: + env_ids = slice(None) + # reset actuators + for actuator in self.actuators.values(): + actuator.reset(env_ids) + # reset external wrenches. + self._instantaneous_wrench_composer.reset(env_ids) + self._permanent_wrench_composer.reset(env_ids) + + def write_data_to_sim(self): + """Write external wrenches and joint commands to the simulation. + + If any explicit actuators are present, then the actuator models are used to compute the + joint commands. Otherwise, the joint commands are directly set into the simulation. + + Note: + We write external wrench to the simulation here since this function is called before the simulation step. + This ensures that the external wrench is applied at every simulation step. + """ + # write external wrench + if self._instantaneous_wrench_composer.active or self._permanent_wrench_composer.active: + if self._instantaneous_wrench_composer.active: + # Compose instantaneous wrench with permanent wrench + self._instantaneous_wrench_composer.add_forces_and_torques( + forces=self._permanent_wrench_composer.composed_force, + torques=self._permanent_wrench_composer.composed_torque, + body_ids=self._ALL_BODY_INDICES_WP, + env_ids=self._ALL_INDICES_WP, + ) + # Apply both instantaneous and permanent wrench to the simulation + self.root_physx_view.apply_forces_and_torques_at_position( + force_data=self._instantaneous_wrench_composer.composed_force_as_torch.view(-1, 3), + torque_data=self._instantaneous_wrench_composer.composed_torque_as_torch.view(-1, 3), + position_data=None, + indices=self._ALL_INDICES, + is_global=False, + ) + else: + # Apply permanent wrench to the simulation + self.root_physx_view.apply_forces_and_torques_at_position( + force_data=self._permanent_wrench_composer.composed_force_as_torch.view(-1, 3), + torque_data=self._permanent_wrench_composer.composed_torque_as_torch.view(-1, 3), + position_data=None, + indices=self._ALL_INDICES, + is_global=False, + ) + self._instantaneous_wrench_composer.reset() + + # apply actuator models + self._apply_actuator_model() + # write actions into simulation + self.root_physx_view.set_dof_actuation_forces(self._joint_effort_target_sim, self._ALL_INDICES) + # position and velocity targets only for implicit actuators + if self._has_implicit_actuators: + self.root_physx_view.set_dof_position_targets(self._joint_pos_target_sim, self._ALL_INDICES) + self.root_physx_view.set_dof_velocity_targets(self._joint_vel_target_sim, self._ALL_INDICES) + + def update(self, dt: float): + self._data.update(dt) + + """ + Operations - Finders. + """ + + def find_bodies(self, name_keys: str | Sequence[str], preserve_order: bool = False) -> tuple[list[int], list[str]]: + """Find bodies in the articulation based on the name keys. + + Please check the :meth:`isaaclab.utils.string_utils.resolve_matching_names` function for more + information on the name matching. + + Args: + name_keys: A regular expression or a list of regular expressions to match the body names. + preserve_order: Whether to preserve the order of the name keys in the output. Defaults to False. + + Returns: + A tuple of lists containing the body indices and names. + """ + return string_utils.resolve_matching_names(name_keys, self.body_names, preserve_order) + + def find_joints( + self, name_keys: str | Sequence[str], joint_subset: list[str] | None = None, preserve_order: bool = False + ) -> tuple[list[int], list[str]]: + """Find joints in the articulation based on the name keys. + + Please see the :func:`isaaclab.utils.string.resolve_matching_names` function for more information + on the name matching. + + Args: + name_keys: A regular expression or a list of regular expressions to match the joint names. + joint_subset: A subset of joints to search for. Defaults to None, which means all joints + in the articulation are searched. + preserve_order: Whether to preserve the order of the name keys in the output. Defaults to False. + + Returns: + A tuple of lists containing the joint indices and names. + """ + if joint_subset is None: + joint_subset = self.joint_names + # find joints + return string_utils.resolve_matching_names(name_keys, joint_subset, preserve_order) + + def find_fixed_tendons( + self, name_keys: str | Sequence[str], tendon_subsets: list[str] | None = None, preserve_order: bool = False + ) -> tuple[list[int], list[str]]: + """Find fixed tendons in the articulation based on the name keys. + + Please see the :func:`isaaclab.utils.string.resolve_matching_names` function for more information + on the name matching. + + Args: + name_keys: A regular expression or a list of regular expressions to match the joint + names with fixed tendons. + tendon_subsets: A subset of joints with fixed tendons to search for. Defaults to None, which means + all joints in the articulation are searched. + preserve_order: Whether to preserve the order of the name keys in the output. Defaults to False. + + Returns: + A tuple of lists containing the tendon indices and names. + """ + if tendon_subsets is None: + # tendons follow the joint names they are attached to + tendon_subsets = self.fixed_tendon_names + # find tendons + return string_utils.resolve_matching_names(name_keys, tendon_subsets, preserve_order) + + def find_spatial_tendons( + self, name_keys: str | Sequence[str], tendon_subsets: list[str] | None = None, preserve_order: bool = False + ) -> tuple[list[int], list[str]]: + """Find spatial tendons in the articulation based on the name keys. + + Please see the :func:`isaaclab.utils.string.resolve_matching_names` function for more information + on the name matching. + + Args: + name_keys: A regular expression or a list of regular expressions to match the tendon names. + tendon_subsets: A subset of tendons to search for. Defaults to None, which means all tendons + in the articulation are searched. + preserve_order: Whether to preserve the order of the name keys in the output. Defaults to False. + + Returns: + A tuple of lists containing the tendon indices and names. + """ + if tendon_subsets is None: + tendon_subsets = self.spatial_tendon_names + # find tendons + return string_utils.resolve_matching_names(name_keys, tendon_subsets, preserve_order) + + """ + Operations - State Writers. + """ + + def write_root_state_to_sim(self, root_state: torch.Tensor, env_ids: Sequence[int] | None = None): + """Set the root state over selected environment indices into the simulation. + + The root state comprises of the cartesian position, quaternion orientation in (w, x, y, z), and linear + and angular velocity. All the quantities are in the simulation frame. + + Args: + root_state: Root state in simulation frame. Shape is (len(env_ids), 13). + env_ids: Environment indices. If None, then all indices are used. + """ + self.write_root_link_pose_to_sim(root_state[:, :7], env_ids=env_ids) + self.write_root_com_velocity_to_sim(root_state[:, 7:], env_ids=env_ids) + + def write_root_com_state_to_sim(self, root_state: torch.Tensor, env_ids: Sequence[int] | None = None): + """Set the root center of mass state over selected environment indices into the simulation. + + The root state comprises of the cartesian position, quaternion orientation in (w, x, y, z), and linear + and angular velocity. All the quantities are in the simulation frame. + + Args: + root_state: Root state in simulation frame. Shape is (len(env_ids), 13). + env_ids: Environment indices. If None, then all indices are used. + """ + self.write_root_com_pose_to_sim(root_state[:, :7], env_ids=env_ids) + self.write_root_com_velocity_to_sim(root_state[:, 7:], env_ids=env_ids) + + def write_root_link_state_to_sim(self, root_state: torch.Tensor, env_ids: Sequence[int] | None = None): + """Set the root link state over selected environment indices into the simulation. + + The root state comprises of the cartesian position, quaternion orientation in (w, x, y, z), and linear + and angular velocity. All the quantities are in the simulation frame. + + Args: + root_state: Root state in simulation frame. Shape is (len(env_ids), 13). + env_ids: Environment indices. If None, then all indices are used. + """ + self.write_root_link_pose_to_sim(root_state[:, :7], env_ids=env_ids) + self.write_root_link_velocity_to_sim(root_state[:, 7:], env_ids=env_ids) + + def write_root_pose_to_sim(self, root_pose: torch.Tensor, env_ids: Sequence[int] | None = None): + """Set the root pose over selected environment indices into the simulation. + + The root pose comprises of the cartesian position and quaternion orientation in (w, x, y, z). + + Args: + root_pose: Root poses in simulation frame. Shape is (len(env_ids), 7). + env_ids: Environment indices. If None, then all indices are used. + """ + self.write_root_link_pose_to_sim(root_pose, env_ids=env_ids) + + def write_root_link_pose_to_sim(self, root_pose: torch.Tensor, env_ids: Sequence[int] | None = None): + """Set the root link pose over selected environment indices into the simulation. + + The root pose comprises of the cartesian position and quaternion orientation in (w, x, y, z). + + Args: + root_pose: Root poses in simulation frame. Shape is (len(env_ids), 7). + env_ids: Environment indices. If None, then all indices are used. + """ + # resolve all indices + physx_env_ids = env_ids + if env_ids is None: + env_ids = slice(None) + physx_env_ids = self._ALL_INDICES + + # note: we need to do this here since tensors are not set into simulation until step. + # set into internal buffers + self._data.root_link_pose_w[env_ids] = root_pose.clone() + # update these buffers only if the user is using them. Otherwise this adds to overhead. + if self._data._root_link_state_w.data is not None: + self._data.root_link_state_w[env_ids, :7] = self._data.root_link_pose_w[env_ids] + if self._data._root_state_w.data is not None: + self._data.root_state_w[env_ids, :7] = self._data.root_link_pose_w[env_ids] + + # convert root quaternion from wxyz to xyzw + root_poses_xyzw = self._data.root_link_pose_w.clone() + root_poses_xyzw[:, 3:] = math_utils.convert_quat(root_poses_xyzw[:, 3:], to="xyzw") + + # Need to invalidate the buffer to trigger the update with the new state. + self._data._body_link_pose_w.timestamp = -1.0 + self._data._body_com_pose_w.timestamp = -1.0 + self._data._body_state_w.timestamp = -1.0 + self._data._body_link_state_w.timestamp = -1.0 + self._data._body_com_state_w.timestamp = -1.0 + + # set into simulation + self.root_physx_view.set_root_transforms(root_poses_xyzw, indices=physx_env_ids) + + def write_root_com_pose_to_sim(self, root_pose: torch.Tensor, env_ids: Sequence[int] | None = None): + """Set the root center of mass pose over selected environment indices into the simulation. + + The root pose comprises of the cartesian position and quaternion orientation in (w, x, y, z). + The orientation is the orientation of the principle axes of inertia. + + Args: + root_pose: Root center of mass poses in simulation frame. Shape is (len(env_ids), 7). + env_ids: Environment indices. If None, then all indices are used. + """ + # resolve all indices + if env_ids is None: + local_env_ids = slice(env_ids) + else: + local_env_ids = env_ids + + # set into internal buffers + self._data.root_com_pose_w[local_env_ids] = root_pose.clone() + # update these buffers only if the user is using them. Otherwise this adds to overhead. + if self._data._root_com_state_w.data is not None: + self._data.root_com_state_w[local_env_ids, :7] = self._data.root_com_pose_w[local_env_ids] + + # get CoM pose in link frame + com_pos_b = self.data.body_com_pos_b[local_env_ids, 0, :] + com_quat_b = self.data.body_com_quat_b[local_env_ids, 0, :] + # transform input CoM pose to link frame + root_link_pos, root_link_quat = math_utils.combine_frame_transforms( + root_pose[..., :3], + root_pose[..., 3:7], + math_utils.quat_apply(math_utils.quat_inv(com_quat_b), -com_pos_b), + math_utils.quat_inv(com_quat_b), + ) + root_link_pose = torch.cat((root_link_pos, root_link_quat), dim=-1) + + # write transformed pose in link frame to sim + self.write_root_link_pose_to_sim(root_pose=root_link_pose, env_ids=env_ids) + + def write_root_velocity_to_sim(self, root_velocity: torch.Tensor, env_ids: Sequence[int] | None = None): + """Set the root center of mass velocity over selected environment indices into the simulation. + + The velocity comprises linear velocity (x, y, z) and angular velocity (x, y, z) in that order. + NOTE: This sets the velocity of the root's center of mass rather than the roots frame. + + Args: + root_velocity: Root center of mass velocities in simulation world frame. Shape is (len(env_ids), 6). + env_ids: Environment indices. If None, then all indices are used. + """ + self.write_root_com_velocity_to_sim(root_velocity=root_velocity, env_ids=env_ids) + + def write_root_com_velocity_to_sim(self, root_velocity: torch.Tensor, env_ids: Sequence[int] | None = None): + """Set the root center of mass velocity over selected environment indices into the simulation. + + The velocity comprises linear velocity (x, y, z) and angular velocity (x, y, z) in that order. + NOTE: This sets the velocity of the root's center of mass rather than the roots frame. + + Args: + root_velocity: Root center of mass velocities in simulation world frame. Shape is (len(env_ids), 6). + env_ids: Environment indices. If None, then all indices are used. + """ + # resolve all indices + physx_env_ids = env_ids + if env_ids is None: + env_ids = slice(None) + physx_env_ids = self._ALL_INDICES + + # note: we need to do this here since tensors are not set into simulation until step. + # set into internal buffers + self._data.root_com_vel_w[env_ids] = root_velocity.clone() + # update these buffers only if the user is using them. Otherwise this adds to overhead. + if self._data._root_com_state_w.data is not None: + self._data.root_com_state_w[env_ids, 7:] = self._data.root_com_vel_w[env_ids] + if self._data._root_state_w.data is not None: + self._data.root_state_w[env_ids, 7:] = self._data.root_com_vel_w[env_ids] + # make the acceleration zero to prevent reporting old values + self._data.body_acc_w[env_ids] = 0.0 + + # set into simulation + self.root_physx_view.set_root_velocities(self._data.root_com_vel_w, indices=physx_env_ids) + + def write_root_link_velocity_to_sim(self, root_velocity: torch.Tensor, env_ids: Sequence[int] | None = None): + """Set the root link velocity over selected environment indices into the simulation. + + The velocity comprises linear velocity (x, y, z) and angular velocity (x, y, z) in that order. + NOTE: This sets the velocity of the root's frame rather than the roots center of mass. + + Args: + root_velocity: Root frame velocities in simulation world frame. Shape is (len(env_ids), 6). + env_ids: Environment indices. If None, then all indices are used. + """ + # resolve all indices + if env_ids is None: + local_env_ids = slice(env_ids) + else: + local_env_ids = env_ids + + # set into internal buffers + self._data.root_link_vel_w[local_env_ids] = root_velocity.clone() + # update these buffers only if the user is using them. Otherwise this adds to overhead. + if self._data._root_link_state_w.data is not None: + self._data.root_link_state_w[local_env_ids, 7:] = self._data.root_link_vel_w[local_env_ids] + + # get CoM pose in link frame + quat = self.data.root_link_quat_w[local_env_ids] + com_pos_b = self.data.body_com_pos_b[local_env_ids, 0, :] + # transform input velocity to center of mass frame + root_com_velocity = root_velocity.clone() + root_com_velocity[:, :3] += torch.linalg.cross( + root_com_velocity[:, 3:], math_utils.quat_apply(quat, com_pos_b), dim=-1 + ) + + # write transformed velocity in CoM frame to sim + self.write_root_com_velocity_to_sim(root_velocity=root_com_velocity, env_ids=env_ids) + + def write_joint_state_to_sim( + self, + position: torch.Tensor, + velocity: torch.Tensor, + joint_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | slice | None = None, + ): + """Write joint positions and velocities to the simulation. + + Args: + position: Joint positions. Shape is (len(env_ids), len(joint_ids)). + velocity: Joint velocities. Shape is (len(env_ids), len(joint_ids)). + joint_ids: The joint indices to set the targets for. Defaults to None (all joints). + env_ids: The environment indices to set the targets for. Defaults to None (all environments). + """ + # set into simulation + self.write_joint_position_to_sim(position, joint_ids=joint_ids, env_ids=env_ids) + self.write_joint_velocity_to_sim(velocity, joint_ids=joint_ids, env_ids=env_ids) + + def write_joint_position_to_sim( + self, + position: torch.Tensor, + joint_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | slice | None = None, + ): + """Write joint positions to the simulation. + + Args: + position: Joint positions. Shape is (len(env_ids), len(joint_ids)). + joint_ids: The joint indices to set the targets for. Defaults to None (all joints). + env_ids: The environment indices to set the targets for. Defaults to None (all environments). + """ + # resolve indices + physx_env_ids = env_ids + if env_ids is None: + env_ids = slice(None) + physx_env_ids = self._ALL_INDICES + if joint_ids is None: + joint_ids = slice(None) + # broadcast env_ids if needed to allow double indexing + if env_ids != slice(None) and joint_ids != slice(None): + env_ids = env_ids[:, None] + # set into internal buffers + self._data.joint_pos[env_ids, joint_ids] = position + # Need to invalidate the buffer to trigger the update with the new root pose. + self._data._body_com_vel_w.timestamp = -1.0 + self._data._body_link_vel_w.timestamp = -1.0 + self._data._body_com_pose_b.timestamp = -1.0 + self._data._body_com_pose_w.timestamp = -1.0 + self._data._body_link_pose_w.timestamp = -1.0 + + self._data._body_state_w.timestamp = -1.0 + self._data._body_link_state_w.timestamp = -1.0 + self._data._body_com_state_w.timestamp = -1.0 + # set into simulation + self.root_physx_view.set_dof_positions(self._data.joint_pos, indices=physx_env_ids) + + def write_joint_velocity_to_sim( + self, + velocity: torch.Tensor, + joint_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | slice | None = None, + ): + """Write joint velocities to the simulation. + + Args: + velocity: Joint velocities. Shape is (len(env_ids), len(joint_ids)). + joint_ids: The joint indices to set the targets for. Defaults to None (all joints). + env_ids: The environment indices to set the targets for. Defaults to None (all environments). + """ + # resolve indices + physx_env_ids = env_ids + if env_ids is None: + env_ids = slice(None) + physx_env_ids = self._ALL_INDICES + if joint_ids is None: + joint_ids = slice(None) + # broadcast env_ids if needed to allow double indexing + if env_ids != slice(None) and joint_ids != slice(None): + env_ids = env_ids[:, None] + # set into internal buffers + self._data.joint_vel[env_ids, joint_ids] = velocity + self._data._previous_joint_vel[env_ids, joint_ids] = velocity + self._data.joint_acc[env_ids, joint_ids] = 0.0 + # set into simulation + self.root_physx_view.set_dof_velocities(self._data.joint_vel, indices=physx_env_ids) + + """ + Operations - Simulation Parameters Writers. + """ + + def write_joint_stiffness_to_sim( + self, + stiffness: torch.Tensor | float, + joint_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | None = None, + ): + """Write joint stiffness into the simulation. + + Args: + stiffness: Joint stiffness. Shape is (len(env_ids), len(joint_ids)). + joint_ids: The joint indices to set the stiffness for. Defaults to None (all joints). + env_ids: The environment indices to set the stiffness for. Defaults to None (all environments). + """ + # note: This function isn't setting the values for actuator models. (#128) + # resolve indices + physx_env_ids = env_ids + if env_ids is None: + env_ids = slice(None) + physx_env_ids = self._ALL_INDICES + if joint_ids is None: + joint_ids = slice(None) + # broadcast env_ids if needed to allow double indexing + if env_ids != slice(None) and joint_ids != slice(None): + env_ids = env_ids[:, None] + # set into internal buffers + self._data.joint_stiffness[env_ids, joint_ids] = stiffness + # set into simulation + self.root_physx_view.set_dof_stiffnesses(self._data.joint_stiffness.cpu(), indices=physx_env_ids.cpu()) + + def write_joint_damping_to_sim( + self, + damping: torch.Tensor | float, + joint_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | None = None, + ): + """Write joint damping into the simulation. + + Args: + damping: Joint damping. Shape is (len(env_ids), len(joint_ids)). + joint_ids: The joint indices to set the damping for. Defaults to None (all joints). + env_ids: The environment indices to set the damping for. Defaults to None (all environments). + """ + # note: This function isn't setting the values for actuator models. (#128) + # resolve indices + physx_env_ids = env_ids + if env_ids is None: + env_ids = slice(None) + physx_env_ids = self._ALL_INDICES + if joint_ids is None: + joint_ids = slice(None) + # broadcast env_ids if needed to allow double indexing + if env_ids != slice(None) and joint_ids != slice(None): + env_ids = env_ids[:, None] + # set into internal buffers + self._data.joint_damping[env_ids, joint_ids] = damping + # set into simulation + self.root_physx_view.set_dof_dampings(self._data.joint_damping.cpu(), indices=physx_env_ids.cpu()) + + def write_joint_position_limit_to_sim( + self, + limits: torch.Tensor | float, + joint_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | None = None, + warn_limit_violation: bool = True, + ): + """Write joint position limits into the simulation. + + Args: + limits: Joint limits. Shape is (len(env_ids), len(joint_ids), 2). + joint_ids: The joint indices to set the limits for. Defaults to None (all joints). + env_ids: The environment indices to set the limits for. Defaults to None (all environments). + warn_limit_violation: Whether to use warning or info level logging when default joint positions + exceed the new limits. Defaults to True. + """ + # note: This function isn't setting the values for actuator models. (#128) + # resolve indices + physx_env_ids = env_ids + if env_ids is None: + env_ids = slice(None) + physx_env_ids = self._ALL_INDICES + if joint_ids is None: + joint_ids = slice(None) + # broadcast env_ids if needed to allow double indexing + if env_ids != slice(None) and joint_ids != slice(None): + env_ids = env_ids[:, None] + # set into internal buffers + self._data.joint_pos_limits[env_ids, joint_ids] = limits + # update default joint pos to stay within the new limits + if torch.any( + (self._data.default_joint_pos[env_ids, joint_ids] < limits[..., 0]) + | (self._data.default_joint_pos[env_ids, joint_ids] > limits[..., 1]) + ): + self._data.default_joint_pos[env_ids, joint_ids] = torch.clamp( + self._data.default_joint_pos[env_ids, joint_ids], limits[..., 0], limits[..., 1] + ) + violation_message = ( + "Some default joint positions are outside of the range of the new joint limits. Default joint positions" + " will be clamped to be within the new joint limits." + ) + if warn_limit_violation: + # warn level will show in console + logger.warning(violation_message) + else: + # info level is only written to log file + logger.info(violation_message) + # set into simulation + self.root_physx_view.set_dof_limits(self._data.joint_pos_limits.cpu(), indices=physx_env_ids.cpu()) + + # compute the soft limits based on the joint limits + # TODO: Optimize this computation for only selected joints + # soft joint position limits (recommended not to be too close to limits). + joint_pos_mean = (self._data.joint_pos_limits[..., 0] + self._data.joint_pos_limits[..., 1]) / 2 + joint_pos_range = self._data.joint_pos_limits[..., 1] - self._data.joint_pos_limits[..., 0] + soft_limit_factor = self.cfg.soft_joint_pos_limit_factor + # add to data + self._data.soft_joint_pos_limits[..., 0] = joint_pos_mean - 0.5 * joint_pos_range * soft_limit_factor + self._data.soft_joint_pos_limits[..., 1] = joint_pos_mean + 0.5 * joint_pos_range * soft_limit_factor + + def write_joint_velocity_limit_to_sim( + self, + limits: torch.Tensor | float, + joint_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | None = None, + ): + """Write joint max velocity to the simulation. + + The velocity limit is used to constrain the joint velocities in the physics engine. The joint will only + be able to reach this velocity if the joint's effort limit is sufficiently large. If the joint is moving + faster than this velocity, the physics engine will actually try to brake the joint to reach this velocity. + + Args: + limits: Joint max velocity. Shape is (len(env_ids), len(joint_ids)). + joint_ids: The joint indices to set the max velocity for. Defaults to None (all joints). + env_ids: The environment indices to set the max velocity for. Defaults to None (all environments). + """ + # resolve indices + physx_env_ids = env_ids + if env_ids is None: + env_ids = slice(None) + physx_env_ids = self._ALL_INDICES + if joint_ids is None: + joint_ids = slice(None) + # broadcast env_ids if needed to allow double indexing + if env_ids != slice(None) and joint_ids != slice(None): + env_ids = env_ids[:, None] + # move tensor to cpu if needed + if isinstance(limits, torch.Tensor): + limits = limits.to(self.device) + # set into internal buffers + self._data.joint_vel_limits[env_ids, joint_ids] = limits + # set into simulation + self.root_physx_view.set_dof_max_velocities(self._data.joint_vel_limits.cpu(), indices=physx_env_ids.cpu()) + + def write_joint_effort_limit_to_sim( + self, + limits: torch.Tensor | float, + joint_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | None = None, + ): + """Write joint effort limits into the simulation. + + The effort limit is used to constrain the computed joint efforts in the physics engine. If the + computed effort exceeds this limit, the physics engine will clip the effort to this value. + + Args: + limits: Joint torque limits. Shape is (len(env_ids), len(joint_ids)). + joint_ids: The joint indices to set the joint torque limits for. Defaults to None (all joints). + env_ids: The environment indices to set the joint torque limits for. Defaults to None (all environments). + """ + # note: This function isn't setting the values for actuator models. (#128) + # resolve indices + physx_env_ids = env_ids + if env_ids is None: + env_ids = slice(None) + physx_env_ids = self._ALL_INDICES + if joint_ids is None: + joint_ids = slice(None) + # broadcast env_ids if needed to allow double indexing + if env_ids != slice(None) and joint_ids != slice(None): + env_ids = env_ids[:, None] + # move tensor to cpu if needed + if isinstance(limits, torch.Tensor): + limits = limits.to(self.device) + # set into internal buffers + self._data.joint_effort_limits[env_ids, joint_ids] = limits + # set into simulation + self.root_physx_view.set_dof_max_forces(self._data.joint_effort_limits.cpu(), indices=physx_env_ids.cpu()) + + def write_joint_armature_to_sim( + self, + armature: torch.Tensor | float, + joint_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | None = None, + ): + """Write joint armature into the simulation. + + The armature is directly added to the corresponding joint-space inertia. It helps improve the + simulation stability by reducing the joint velocities. + + Args: + armature: Joint armature. Shape is (len(env_ids), len(joint_ids)). + joint_ids: The joint indices to set the joint torque limits for. Defaults to None (all joints). + env_ids: The environment indices to set the joint torque limits for. Defaults to None (all environments). + """ + # resolve indices + physx_env_ids = env_ids + if env_ids is None: + env_ids = slice(None) + physx_env_ids = self._ALL_INDICES + if joint_ids is None: + joint_ids = slice(None) + # broadcast env_ids if needed to allow double indexing + if env_ids != slice(None) and joint_ids != slice(None): + env_ids = env_ids[:, None] + # set into internal buffers + self._data.joint_armature[env_ids, joint_ids] = armature + # set into simulation + self.root_physx_view.set_dof_armatures(self._data.joint_armature.cpu(), indices=physx_env_ids.cpu()) + + def write_joint_friction_coefficient_to_sim( + self, + joint_friction_coeff: torch.Tensor | float, + joint_dynamic_friction_coeff: torch.Tensor | float | None = None, + joint_viscous_friction_coeff: torch.Tensor | float | None = None, + joint_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | None = None, + ): + r"""Write joint friction coefficients into the simulation. + + For Isaac Sim versions below 5.0, only the static friction coefficient is set. + This limits the resisting force or torque up to a maximum proportional to the transmitted + spatial force: :math:`\|F_{resist}\| \leq \mu_s \, \|F_{spatial}\|`. + + For Isaac Sim versions 5.0 and above, the static, dynamic, and viscous friction coefficients + are set. The model combines Coulomb (static & dynamic) friction with a viscous term: + + - Static friction :math:`\mu_s` defines the maximum effort that prevents motion at rest. + - Dynamic friction :math:`\mu_d` applies once motion begins and remains constant during motion. + - Viscous friction :math:`c_v` is a velocity-proportional resistive term. + + Args: + joint_friction_coeff: Static friction coefficient :math:`\mu_s`. + Shape is (len(env_ids), len(joint_ids)). Scalars are broadcast to all selections. + joint_dynamic_friction_coeff: Dynamic (Coulomb) friction coefficient :math:`\mu_d`. + Same shape as above. If None, the dynamic coefficient is not updated. + joint_viscous_friction_coeff: Viscous friction coefficient :math:`c_v`. + Same shape as above. If None, the viscous coefficient is not updated. + joint_ids: The joint indices to set the friction coefficients for. Defaults to None (all joints). + env_ids: The environment indices to set the friction coefficients for. Defaults to None (all environments). + """ + # resolve indices + physx_env_ids = env_ids + if env_ids is None: + env_ids = slice(None) + physx_env_ids = self._ALL_INDICES + if joint_ids is None: + joint_ids = slice(None) + # broadcast env_ids if needed to allow double indexing + if env_ids != slice(None) and joint_ids != slice(None): + env_ids = env_ids[:, None] + # set into internal buffers + self._data.joint_friction_coeff[env_ids, joint_ids] = joint_friction_coeff + + # if dynamic or viscous friction coeffs are provided, set them too + if joint_dynamic_friction_coeff is not None: + self._data.joint_dynamic_friction_coeff[env_ids, joint_ids] = joint_dynamic_friction_coeff + if joint_viscous_friction_coeff is not None: + self._data.joint_viscous_friction_coeff[env_ids, joint_ids] = joint_viscous_friction_coeff + + # move the indices to cpu + physx_envs_ids_cpu = physx_env_ids.cpu() + + # set into simulation + if get_isaac_sim_version().major < 5: + self.root_physx_view.set_dof_friction_coefficients( + self._data.joint_friction_coeff.cpu(), indices=physx_envs_ids_cpu + ) + else: + friction_props = self.root_physx_view.get_dof_friction_properties() + friction_props[physx_envs_ids_cpu, :, 0] = self._data.joint_friction_coeff[physx_envs_ids_cpu, :].cpu() + + # only set dynamic and viscous friction if provided + if joint_dynamic_friction_coeff is not None: + friction_props[physx_envs_ids_cpu, :, 1] = self._data.joint_dynamic_friction_coeff[ + physx_envs_ids_cpu, : + ].cpu() + + # only set viscous friction if provided + if joint_viscous_friction_coeff is not None: + friction_props[physx_envs_ids_cpu, :, 2] = self._data.joint_viscous_friction_coeff[ + physx_envs_ids_cpu, : + ].cpu() + + self.root_physx_view.set_dof_friction_properties(friction_props, indices=physx_envs_ids_cpu) + + def write_joint_dynamic_friction_coefficient_to_sim( + self, + joint_dynamic_friction_coeff: torch.Tensor | float, + joint_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | None = None, + ): + if get_isaac_sim_version().major < 5: + logger.warning("Setting joint dynamic friction coefficients are not supported in Isaac Sim < 5.0") + return + # resolve indices + physx_env_ids = env_ids + if env_ids is None: + env_ids = slice(None) + physx_env_ids = self._ALL_INDICES + if joint_ids is None: + joint_ids = slice(None) + # broadcast env_ids if needed to allow double indexing + if env_ids != slice(None) and joint_ids != slice(None): + env_ids = env_ids[:, None] + # set into internal buffers + self._data.joint_dynamic_friction_coeff[env_ids, joint_ids] = joint_dynamic_friction_coeff + # set into simulation + friction_props = self.root_physx_view.get_dof_friction_properties() + friction_props[physx_env_ids.cpu(), :, 1] = self._data.joint_dynamic_friction_coeff[physx_env_ids, :].cpu() + self.root_physx_view.set_dof_friction_properties(friction_props, indices=physx_env_ids.cpu()) + + def write_joint_viscous_friction_coefficient_to_sim( + self, + joint_viscous_friction_coeff: torch.Tensor | float, + joint_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | None = None, + ): + if get_isaac_sim_version().major < 5: + logger.warning("Setting joint viscous friction coefficients are not supported in Isaac Sim < 5.0") + return + # resolve indices + physx_env_ids = env_ids + if env_ids is None: + env_ids = slice(None) + physx_env_ids = self._ALL_INDICES + if joint_ids is None: + joint_ids = slice(None) + # broadcast env_ids if needed to allow double indexing + if env_ids != slice(None) and joint_ids != slice(None): + env_ids = env_ids[:, None] + # set into internal buffers + self._data.joint_viscous_friction_coeff[env_ids, joint_ids] = joint_viscous_friction_coeff + # set into simulation + friction_props = self.root_physx_view.get_dof_friction_properties() + friction_props[physx_env_ids.cpu(), :, 2] = self._data.joint_viscous_friction_coeff[physx_env_ids, :].cpu() + self.root_physx_view.set_dof_friction_properties(friction_props, indices=physx_env_ids.cpu()) + + """ + Operations - Setters. + """ + + def set_external_force_and_torque( + self, + forces: torch.Tensor, + torques: torch.Tensor, + positions: torch.Tensor | None = None, + body_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | None = None, + is_global: bool = False, + ): + """Set external force and torque to apply on the asset's bodies in their local frame. + + For many applications, we want to keep the applied external force on rigid bodies constant over a period of + time (for instance, during the policy control). This function allows us to store the external force and torque + into buffers which are then applied to the simulation at every step. Optionally, set the position to apply the + external wrench at (in the local link frame of the bodies). + + .. caution:: + If the function is called with empty forces and torques, then this function disables the application + of external wrench to the simulation. + + .. code-block:: python + + # example of disabling external wrench + asset.set_external_force_and_torque(forces=torch.zeros(0, 3), torques=torch.zeros(0, 3)) + + .. note:: + This function does not apply the external wrench to the simulation. It only fills the buffers with + the desired values. To apply the external wrench, call the :meth:`write_data_to_sim` function + right before the simulation step. + + Args: + forces: External forces in bodies' local frame. Shape is (len(env_ids), len(body_ids), 3). + torques: External torques in bodies' local frame. Shape is (len(env_ids), len(body_ids), 3). + positions: Positions to apply external wrench. Shape is (len(env_ids), len(body_ids), 3). Defaults to None. + body_ids: Body indices to apply external wrench to. Defaults to None (all bodies). + env_ids: Environment indices to apply external wrench to. Defaults to None (all instances). + is_global: Whether to apply the external wrench in the global frame. Defaults to False. If set to False, + the external wrench is applied in the link frame of the articulations' bodies. + """ + logger.warning( + "The function 'set_external_force_and_torque' will be deprecated in a future release. Please" + " use 'permanent_wrench_composer.set_forces_and_torques' instead." + ) + if forces is None and torques is None: + logger.warning("No forces or torques provided. No permanent external wrench will be applied.") + + # resolve all indices + # -- env_ids + if env_ids is None: + env_ids = self._ALL_INDICES_WP + elif not isinstance(env_ids, torch.Tensor): + env_ids = wp.array(env_ids, dtype=wp.int32, device=self.device) + else: + env_ids = wp.from_torch(env_ids.to(torch.int32), dtype=wp.int32) + # -- body_ids + if body_ids is None: + body_ids = self._ALL_BODY_INDICES_WP + elif isinstance(body_ids, slice): + body_ids = wp.from_torch( + torch.arange(self.num_bodies, dtype=torch.int32, device=self.device)[body_ids], dtype=wp.int32 + ) + elif not isinstance(body_ids, torch.Tensor): + body_ids = wp.array(body_ids, dtype=wp.int32, device=self.device) + else: + body_ids = wp.from_torch(body_ids.to(torch.int32), dtype=wp.int32) + + # Write to wrench composer + self._permanent_wrench_composer.set_forces_and_torques( + forces=wp.from_torch(forces, dtype=wp.vec3f) if forces is not None else None, + torques=wp.from_torch(torques, dtype=wp.vec3f) if torques is not None else None, + positions=wp.from_torch(positions, dtype=wp.vec3f) if positions is not None else None, + body_ids=body_ids, + env_ids=env_ids, + is_global=is_global, + ) + + def set_joint_position_target( + self, target: torch.Tensor, joint_ids: Sequence[int] | slice | None = None, env_ids: Sequence[int] | None = None + ): + """Set joint position targets into internal buffers. + + This function does not apply the joint targets to the simulation. It only fills the buffers with + the desired values. To apply the joint targets, call the :meth:`write_data_to_sim` function. + + Args: + target: Joint position targets. Shape is (len(env_ids), len(joint_ids)). + joint_ids: The joint indices to set the targets for. Defaults to None (all joints). + env_ids: The environment indices to set the targets for. Defaults to None (all environments). + """ + # resolve indices + if env_ids is None: + env_ids = slice(None) + if joint_ids is None: + joint_ids = slice(None) + # broadcast env_ids if needed to allow double indexing + if env_ids != slice(None) and joint_ids != slice(None): + env_ids = env_ids[:, None] + # set targets + self._data.joint_pos_target[env_ids, joint_ids] = target + + def set_joint_velocity_target( + self, target: torch.Tensor, joint_ids: Sequence[int] | slice | None = None, env_ids: Sequence[int] | None = None + ): + """Set joint velocity targets into internal buffers. + + This function does not apply the joint targets to the simulation. It only fills the buffers with + the desired values. To apply the joint targets, call the :meth:`write_data_to_sim` function. + + Args: + target: Joint velocity targets. Shape is (len(env_ids), len(joint_ids)). + joint_ids: The joint indices to set the targets for. Defaults to None (all joints). + env_ids: The environment indices to set the targets for. Defaults to None (all environments). + """ + # resolve indices + if env_ids is None: + env_ids = slice(None) + if joint_ids is None: + joint_ids = slice(None) + # broadcast env_ids if needed to allow double indexing + if env_ids != slice(None) and joint_ids != slice(None): + env_ids = env_ids[:, None] + # set targets + self._data.joint_vel_target[env_ids, joint_ids] = target + + def set_joint_effort_target( + self, target: torch.Tensor, joint_ids: Sequence[int] | slice | None = None, env_ids: Sequence[int] | None = None + ): + """Set joint efforts into internal buffers. + + This function does not apply the joint targets to the simulation. It only fills the buffers with + the desired values. To apply the joint targets, call the :meth:`write_data_to_sim` function. + + Args: + target: Joint effort targets. Shape is (len(env_ids), len(joint_ids)). + joint_ids: The joint indices to set the targets for. Defaults to None (all joints). + env_ids: The environment indices to set the targets for. Defaults to None (all environments). + """ + # resolve indices + if env_ids is None: + env_ids = slice(None) + if joint_ids is None: + joint_ids = slice(None) + # broadcast env_ids if needed to allow double indexing + if env_ids != slice(None) and joint_ids != slice(None): + env_ids = env_ids[:, None] + # set targets + self._data.joint_effort_target[env_ids, joint_ids] = target + + """ + Operations - Tendons. + """ + + def set_fixed_tendon_stiffness( + self, + stiffness: torch.Tensor, + fixed_tendon_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | None = None, + ): + """Set fixed tendon stiffness into internal buffers. + + This function does not apply the tendon stiffness to the simulation. It only fills the buffers with + the desired values. To apply the tendon stiffness, call the + :meth:`write_fixed_tendon_properties_to_sim` method. + + Args: + stiffness: Fixed tendon stiffness. Shape is (len(env_ids), len(fixed_tendon_ids)). + fixed_tendon_ids: The tendon indices to set the stiffness for. Defaults to None (all fixed tendons). + env_ids: The environment indices to set the stiffness for. Defaults to None (all environments). + """ + # resolve indices + if env_ids is None: + env_ids = slice(None) + if fixed_tendon_ids is None: + fixed_tendon_ids = slice(None) + if env_ids != slice(None) and fixed_tendon_ids != slice(None): + env_ids = env_ids[:, None] + # set stiffness + self._data.fixed_tendon_stiffness[env_ids, fixed_tendon_ids] = stiffness + + def set_fixed_tendon_damping( + self, + damping: torch.Tensor, + fixed_tendon_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | None = None, + ): + """Set fixed tendon damping into internal buffers. + + This function does not apply the tendon damping to the simulation. It only fills the buffers with + the desired values. To apply the tendon damping, call the :meth:`write_fixed_tendon_properties_to_sim` function. + + Args: + damping: Fixed tendon damping. Shape is (len(env_ids), len(fixed_tendon_ids)). + fixed_tendon_ids: The tendon indices to set the damping for. Defaults to None (all fixed tendons). + env_ids: The environment indices to set the damping for. Defaults to None (all environments). + """ + # resolve indices + if env_ids is None: + env_ids = slice(None) + if fixed_tendon_ids is None: + fixed_tendon_ids = slice(None) + if env_ids != slice(None) and fixed_tendon_ids != slice(None): + env_ids = env_ids[:, None] + # set damping + self._data.fixed_tendon_damping[env_ids, fixed_tendon_ids] = damping + + def set_fixed_tendon_limit_stiffness( + self, + limit_stiffness: torch.Tensor, + fixed_tendon_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | None = None, + ): + """Set fixed tendon limit stiffness efforts into internal buffers. + + This function does not apply the tendon limit stiffness to the simulation. It only fills the buffers with + the desired values. To apply the tendon limit stiffness, call the + :meth:`write_fixed_tendon_properties_to_sim` method. + + Args: + limit_stiffness: Fixed tendon limit stiffness. Shape is (len(env_ids), len(fixed_tendon_ids)). + fixed_tendon_ids: The tendon indices to set the limit stiffness for. Defaults to None (all fixed tendons). + env_ids: The environment indices to set the limit stiffness for. Defaults to None (all environments). + """ + # resolve indices + if env_ids is None: + env_ids = slice(None) + if fixed_tendon_ids is None: + fixed_tendon_ids = slice(None) + if env_ids != slice(None) and fixed_tendon_ids != slice(None): + env_ids = env_ids[:, None] + # set limit_stiffness + self._data.fixed_tendon_limit_stiffness[env_ids, fixed_tendon_ids] = limit_stiffness + + def set_fixed_tendon_position_limit( + self, + limit: torch.Tensor, + fixed_tendon_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | None = None, + ): + """Set fixed tendon limit efforts into internal buffers. + + This function does not apply the tendon limit to the simulation. It only fills the buffers with + the desired values. To apply the tendon limit, call the :meth:`write_fixed_tendon_properties_to_sim` function. + + Args: + limit: Fixed tendon limit. Shape is (len(env_ids), len(fixed_tendon_ids)). + fixed_tendon_ids: The tendon indices to set the limit for. Defaults to None (all fixed tendons). + env_ids: The environment indices to set the limit for. Defaults to None (all environments). + """ + # resolve indices + if env_ids is None: + env_ids = slice(None) + if fixed_tendon_ids is None: + fixed_tendon_ids = slice(None) + if env_ids != slice(None) and fixed_tendon_ids != slice(None): + env_ids = env_ids[:, None] + # set limit + self._data.fixed_tendon_pos_limits[env_ids, fixed_tendon_ids] = limit + + def set_fixed_tendon_rest_length( + self, + rest_length: torch.Tensor, + fixed_tendon_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | None = None, + ): + """Set fixed tendon rest length efforts into internal buffers. + + This function does not apply the tendon rest length to the simulation. It only fills the buffers with + the desired values. To apply the tendon rest length, call the + :meth:`write_fixed_tendon_properties_to_sim` method. + + Args: + rest_length: Fixed tendon rest length. Shape is (len(env_ids), len(fixed_tendon_ids)). + fixed_tendon_ids: The tendon indices to set the rest length for. Defaults to None (all fixed tendons). + env_ids: The environment indices to set the rest length for. Defaults to None (all environments). + """ + # resolve indices + if env_ids is None: + env_ids = slice(None) + if fixed_tendon_ids is None: + fixed_tendon_ids = slice(None) + if env_ids != slice(None) and fixed_tendon_ids != slice(None): + env_ids = env_ids[:, None] + # set rest_length + self._data.fixed_tendon_rest_length[env_ids, fixed_tendon_ids] = rest_length + + def set_fixed_tendon_offset( + self, + offset: torch.Tensor, + fixed_tendon_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | None = None, + ): + """Set fixed tendon offset efforts into internal buffers. + + This function does not apply the tendon offset to the simulation. It only fills the buffers with + the desired values. To apply the tendon offset, call the :meth:`write_fixed_tendon_properties_to_sim` function. + + Args: + offset: Fixed tendon offset. Shape is (len(env_ids), len(fixed_tendon_ids)). + fixed_tendon_ids: The tendon indices to set the offset for. Defaults to None (all fixed tendons). + env_ids: The environment indices to set the offset for. Defaults to None (all environments). + """ + # resolve indices + if env_ids is None: + env_ids = slice(None) + if fixed_tendon_ids is None: + fixed_tendon_ids = slice(None) + if env_ids != slice(None) and fixed_tendon_ids != slice(None): + env_ids = env_ids[:, None] + # set offset + self._data.fixed_tendon_offset[env_ids, fixed_tendon_ids] = offset + + def write_fixed_tendon_properties_to_sim( + self, + fixed_tendon_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | None = None, + ): + """Write fixed tendon properties into the simulation. + + Args: + fixed_tendon_ids: The fixed tendon indices to set the limits for. Defaults to None (all fixed tendons). + env_ids: The environment indices to set the limits for. Defaults to None (all environments). + """ + # resolve indices + physx_env_ids = env_ids + if env_ids is None: + physx_env_ids = self._ALL_INDICES + if fixed_tendon_ids is None: + fixed_tendon_ids = slice(None) + + # set into simulation + self.root_physx_view.set_fixed_tendon_properties( + self._data.fixed_tendon_stiffness, + self._data.fixed_tendon_damping, + self._data.fixed_tendon_limit_stiffness, + self._data.fixed_tendon_pos_limits, + self._data.fixed_tendon_rest_length, + self._data.fixed_tendon_offset, + indices=physx_env_ids, + ) + + def set_spatial_tendon_stiffness( + self, + stiffness: torch.Tensor, + spatial_tendon_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | None = None, + ): + """Set spatial tendon stiffness into internal buffers. + + This function does not apply the tendon stiffness to the simulation. It only fills the buffers with + the desired values. To apply the tendon stiffness, call the + :meth:`write_spatial_tendon_properties_to_sim` method. + + Args: + stiffness: Spatial tendon stiffness. Shape is (len(env_ids), len(spatial_tendon_ids)). + spatial_tendon_ids: The tendon indices to set the stiffness for. Defaults to None (all spatial tendons). + env_ids: The environment indices to set the stiffness for. Defaults to None (all environments). + """ + if get_isaac_sim_version().major < 5: + logger.warning( + "Spatial tendons are not supported in Isaac Sim < 5.0. Please update to Isaac Sim 5.0 or later." + ) + return + # resolve indices + if env_ids is None: + env_ids = slice(None) + if spatial_tendon_ids is None: + spatial_tendon_ids = slice(None) + if env_ids != slice(None) and spatial_tendon_ids != slice(None): + env_ids = env_ids[:, None] + # set stiffness + self._data.spatial_tendon_stiffness[env_ids, spatial_tendon_ids] = stiffness + + def set_spatial_tendon_damping( + self, + damping: torch.Tensor, + spatial_tendon_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | None = None, + ): + """Set spatial tendon damping into internal buffers. + + This function does not apply the tendon damping to the simulation. It only fills the buffers with + the desired values. To apply the tendon damping, call the + :meth:`write_spatial_tendon_properties_to_sim` method. + + Args: + damping: Spatial tendon damping. Shape is (len(env_ids), len(spatial_tendon_ids)). + spatial_tendon_ids: The tendon indices to set the damping for. Defaults to None, + which means all spatial tendons. + env_ids: The environment indices to set the damping for. Defaults to None, which means all environments. + """ + if get_isaac_sim_version().major < 5: + logger.warning( + "Spatial tendons are not supported in Isaac Sim < 5.0. Please update to Isaac Sim 5.0 or later." + ) + return + # resolve indices + if env_ids is None: + env_ids = slice(None) + if spatial_tendon_ids is None: + spatial_tendon_ids = slice(None) + if env_ids != slice(None) and spatial_tendon_ids != slice(None): + env_ids = env_ids[:, None] + # set damping + self._data.spatial_tendon_damping[env_ids, spatial_tendon_ids] = damping + + def set_spatial_tendon_limit_stiffness( + self, + limit_stiffness: torch.Tensor, + spatial_tendon_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | None = None, + ): + """Set spatial tendon limit stiffness into internal buffers. + + This function does not apply the tendon limit stiffness to the simulation. It only fills the buffers with + the desired values. To apply the tendon limit stiffness, call the + :meth:`write_spatial_tendon_properties_to_sim` method. + + Args: + limit_stiffness: Spatial tendon limit stiffness. Shape is (len(env_ids), len(spatial_tendon_ids)). + spatial_tendon_ids: The tendon indices to set the limit stiffness for. Defaults to None, + which means all spatial tendons. + env_ids: The environment indices to set the limit stiffness for. Defaults to None (all environments). + """ + if get_isaac_sim_version().major < 5: + logger.warning( + "Spatial tendons are not supported in Isaac Sim < 5.0. Please update to Isaac Sim 5.0 or later." + ) + return + # resolve indices + if env_ids is None: + env_ids = slice(None) + if spatial_tendon_ids is None: + spatial_tendon_ids = slice(None) + if env_ids != slice(None) and spatial_tendon_ids != slice(None): + env_ids = env_ids[:, None] + # set limit stiffness + self._data.spatial_tendon_limit_stiffness[env_ids, spatial_tendon_ids] = limit_stiffness + + def set_spatial_tendon_offset( + self, + offset: torch.Tensor, + spatial_tendon_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | None = None, + ): + """Set spatial tendon offset efforts into internal buffers. + + This function does not apply the tendon offset to the simulation. It only fills the buffers with + the desired values. To apply the tendon offset, call the + :meth:`write_spatial_tendon_properties_to_sim` method. + + Args: + offset: Spatial tendon offset. Shape is (len(env_ids), len(spatial_tendon_ids)). + spatial_tendon_ids: The tendon indices to set the offset for. Defaults to None (all spatial tendons). + env_ids: The environment indices to set the offset for. Defaults to None (all environments). + """ + if get_isaac_sim_version().major < 5: + logger.warning( + "Spatial tendons are not supported in Isaac Sim < 5.0. Please update to Isaac Sim 5.0 or later." + ) + return + # resolve indices + if env_ids is None: + env_ids = slice(None) + if spatial_tendon_ids is None: + spatial_tendon_ids = slice(None) + if env_ids != slice(None) and spatial_tendon_ids != slice(None): + env_ids = env_ids[:, None] + # set offset + self._data.spatial_tendon_offset[env_ids, spatial_tendon_ids] = offset + + def write_spatial_tendon_properties_to_sim( + self, + spatial_tendon_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | None = None, + ): + """Write spatial tendon properties into the simulation. + + Args: + spatial_tendon_ids: The spatial tendon indices to set the properties for. Defaults to None, + which means all spatial tendons. + env_ids: The environment indices to set the properties for. Defaults to None, + which means all environments. + """ + # resolve indices + physx_env_ids = env_ids + if env_ids is None: + physx_env_ids = self._ALL_INDICES + if spatial_tendon_ids is None: + spatial_tendon_ids = slice(None) + + # set into simulation + self.root_physx_view.set_spatial_tendon_properties( + self._data.spatial_tendon_stiffness, + self._data.spatial_tendon_damping, + self._data.spatial_tendon_limit_stiffness, + self._data.spatial_tendon_offset, + indices=physx_env_ids, + ) + + """ + Internal helper. + """ + + def _initialize_impl(self): + # obtain global simulation view + self._physics_sim_view = SimulationManager.get_physics_sim_view() + + if self.cfg.articulation_root_prim_path is not None: + # The articulation root prim path is specified explicitly, so we can just use this. + root_prim_path_expr = self.cfg.prim_path + self.cfg.articulation_root_prim_path + else: + # No articulation root prim path was specified, so we need to search + # for it. We search for this in the first environment and then + # create a regex that matches all environments. + first_env_matching_prim = sim_utils.find_first_matching_prim(self.cfg.prim_path) + if first_env_matching_prim is None: + raise RuntimeError(f"Failed to find prim for expression: '{self.cfg.prim_path}'.") + first_env_matching_prim_path = first_env_matching_prim.GetPath().pathString + + # Find all articulation root prims in the first environment. + first_env_root_prims = sim_utils.get_all_matching_child_prims( + first_env_matching_prim_path, + predicate=lambda prim: prim.HasAPI(UsdPhysics.ArticulationRootAPI), + traverse_instance_prims=False, + ) + if len(first_env_root_prims) == 0: + raise RuntimeError( + f"Failed to find an articulation when resolving '{first_env_matching_prim_path}'." + " Please ensure that the prim has 'USD ArticulationRootAPI' applied." + ) + if len(first_env_root_prims) > 1: + raise RuntimeError( + f"Failed to find a single articulation when resolving '{first_env_matching_prim_path}'." + f" Found multiple '{first_env_root_prims}' under '{first_env_matching_prim_path}'." + " Please ensure that there is only one articulation in the prim path tree." + ) + + # Now we convert the found articulation root from the first + # environment back into a regex that matches all environments. + first_env_root_prim_path = first_env_root_prims[0].GetPath().pathString + root_prim_path_relative_to_prim_path = first_env_root_prim_path[len(first_env_matching_prim_path) :] + root_prim_path_expr = self.cfg.prim_path + root_prim_path_relative_to_prim_path + + # -- articulation + self._root_physx_view = self._physics_sim_view.create_articulation_view(root_prim_path_expr.replace(".*", "*")) + + # check if the articulation was created + if self._root_physx_view._backend is None: + raise RuntimeError(f"Failed to create articulation at: {root_prim_path_expr}. Please check PhysX logs.") + + if get_isaac_sim_version().major < 5: + logger.warning( + "Spatial tendons are not supported in Isaac Sim < 5.0: patching spatial-tendon getter" + " and setter to use dummy value" + ) + self._root_physx_view.max_spatial_tendons = 0 + self._root_physx_view.get_spatial_tendon_stiffnesses = lambda: torch.empty(0, device=self.device) + self._root_physx_view.get_spatial_tendon_dampings = lambda: torch.empty(0, device=self.device) + self._root_physx_view.get_spatial_tendon_limit_stiffnesses = lambda: torch.empty(0, device=self.device) + self._root_physx_view.get_spatial_tendon_offsets = lambda: torch.empty(0, device=self.device) + self._root_physx_view.set_spatial_tendon_properties = lambda *args, **kwargs: logger.warning( + "Spatial tendons are not supported in Isaac Sim < 5.0: Calling" + " set_spatial_tendon_properties has no effect" + ) + + # log information about the articulation + logger.info(f"Articulation initialized at: {self.cfg.prim_path} with root '{root_prim_path_expr}'.") + logger.info(f"Is fixed root: {self.is_fixed_base}") + logger.info(f"Number of bodies: {self.num_bodies}") + logger.info(f"Body names: {self.body_names}") + logger.info(f"Number of joints: {self.num_joints}") + logger.info(f"Joint names: {self.joint_names}") + logger.info(f"Number of fixed tendons: {self.num_fixed_tendons}") + + # container for data access + self._data = ArticulationData(self.root_physx_view, self.device) + + # create buffers + self._create_buffers() + # process configuration + self._process_cfg() + self._process_actuators_cfg() + self._process_tendons() + # validate configuration + self._validate_cfg() + # update the robot data + self.update(0.0) + # log joint information + self._log_articulation_info() + + def _create_buffers(self): + # constants + self._ALL_INDICES = torch.arange(self.num_instances, dtype=torch.long, device=self.device) + self._ALL_BODY_INDICES = torch.arange(self.num_bodies, dtype=torch.long, device=self.device) + self._ALL_INDICES_WP = wp.from_torch(self._ALL_INDICES.to(torch.int32), dtype=wp.int32) + self._ALL_BODY_INDICES_WP = wp.from_torch(self._ALL_BODY_INDICES.to(torch.int32), dtype=wp.int32) + + # external wrench composer + self._instantaneous_wrench_composer = WrenchComposer(self) + self._permanent_wrench_composer = WrenchComposer(self) + + # asset named data + self._data.joint_names = self.joint_names + self._data.body_names = self.body_names + # tendon names are set in _process_tendons function + + # -- joint properties + self._data.default_joint_pos_limits = self.root_physx_view.get_dof_limits().to(self.device).clone() + self._data.default_joint_stiffness = self.root_physx_view.get_dof_stiffnesses().to(self.device).clone() + self._data.default_joint_damping = self.root_physx_view.get_dof_dampings().to(self.device).clone() + self._data.default_joint_armature = self.root_physx_view.get_dof_armatures().to(self.device).clone() + if get_isaac_sim_version().major < 5: + self._data.default_joint_friction_coeff = ( + self.root_physx_view.get_dof_friction_coefficients().to(self.device).clone() + ) + self._data.default_joint_dynamic_friction_coeff = torch.zeros_like(self._data.default_joint_friction_coeff) + self._data.default_joint_viscous_friction_coeff = torch.zeros_like(self._data.default_joint_friction_coeff) + else: + friction_props = self.root_physx_view.get_dof_friction_properties() + self._data.default_joint_friction_coeff = friction_props[:, :, 0].to(self.device).clone() + self._data.default_joint_dynamic_friction_coeff = friction_props[:, :, 1].to(self.device).clone() + self._data.default_joint_viscous_friction_coeff = friction_props[:, :, 2].to(self.device).clone() + + self._data.joint_pos_limits = self._data.default_joint_pos_limits.clone() + self._data.joint_vel_limits = self.root_physx_view.get_dof_max_velocities().to(self.device).clone() + self._data.joint_effort_limits = self.root_physx_view.get_dof_max_forces().to(self.device).clone() + self._data.joint_stiffness = self._data.default_joint_stiffness.clone() + self._data.joint_damping = self._data.default_joint_damping.clone() + self._data.joint_armature = self._data.default_joint_armature.clone() + self._data.joint_friction_coeff = self._data.default_joint_friction_coeff.clone() + self._data.joint_dynamic_friction_coeff = self._data.default_joint_dynamic_friction_coeff.clone() + self._data.joint_viscous_friction_coeff = self._data.default_joint_viscous_friction_coeff.clone() + + # -- body properties + self._data.default_mass = self.root_physx_view.get_masses().clone() + self._data.default_inertia = self.root_physx_view.get_inertias().clone() + + # -- joint commands (sent to the actuator from the user) + self._data.joint_pos_target = torch.zeros(self.num_instances, self.num_joints, device=self.device) + self._data.joint_vel_target = torch.zeros_like(self._data.joint_pos_target) + self._data.joint_effort_target = torch.zeros_like(self._data.joint_pos_target) + # -- joint commands (sent to the simulation after actuator processing) + self._joint_pos_target_sim = torch.zeros_like(self._data.joint_pos_target) + self._joint_vel_target_sim = torch.zeros_like(self._data.joint_pos_target) + self._joint_effort_target_sim = torch.zeros_like(self._data.joint_pos_target) + + # -- computed joint efforts from the actuator models + self._data.computed_torque = torch.zeros_like(self._data.joint_pos_target) + self._data.applied_torque = torch.zeros_like(self._data.joint_pos_target) + + # -- other data that are filled based on explicit actuator models + self._data.soft_joint_vel_limits = torch.zeros(self.num_instances, self.num_joints, device=self.device) + self._data.gear_ratio = torch.ones(self.num_instances, self.num_joints, device=self.device) + + # soft joint position limits (recommended not to be too close to limits). + joint_pos_mean = (self._data.joint_pos_limits[..., 0] + self._data.joint_pos_limits[..., 1]) / 2 + joint_pos_range = self._data.joint_pos_limits[..., 1] - self._data.joint_pos_limits[..., 0] + soft_limit_factor = self.cfg.soft_joint_pos_limit_factor + # add to data + self._data.soft_joint_pos_limits = torch.zeros(self.num_instances, self.num_joints, 2, device=self.device) + self._data.soft_joint_pos_limits[..., 0] = joint_pos_mean - 0.5 * joint_pos_range * soft_limit_factor + self._data.soft_joint_pos_limits[..., 1] = joint_pos_mean + 0.5 * joint_pos_range * soft_limit_factor + + def _process_cfg(self): + """Post processing of configuration parameters.""" + # default state + # -- root state + # note: we cast to tuple to avoid torch/numpy type mismatch. + default_root_state = ( + tuple(self.cfg.init_state.pos) + + tuple(self.cfg.init_state.rot) + + tuple(self.cfg.init_state.lin_vel) + + tuple(self.cfg.init_state.ang_vel) + ) + default_root_state = torch.tensor(default_root_state, dtype=torch.float, device=self.device) + self._data.default_root_state = default_root_state.repeat(self.num_instances, 1) + + # -- joint state + self._data.default_joint_pos = torch.zeros(self.num_instances, self.num_joints, device=self.device) + self._data.default_joint_vel = torch.zeros_like(self._data.default_joint_pos) + # joint pos + indices_list, _, values_list = string_utils.resolve_matching_names_values( + self.cfg.init_state.joint_pos, self.joint_names + ) + self._data.default_joint_pos[:, indices_list] = torch.tensor(values_list, device=self.device) + # joint vel + indices_list, _, values_list = string_utils.resolve_matching_names_values( + self.cfg.init_state.joint_vel, self.joint_names + ) + self._data.default_joint_vel[:, indices_list] = torch.tensor(values_list, device=self.device) + + """ + Internal simulation callbacks. + """ + + def _invalidate_initialize_callback(self, event): + """Invalidates the scene elements.""" + # call parent + super()._invalidate_initialize_callback(event) + self._root_physx_view = None + + """ + Internal helpers -- Actuators. + """ + + def _process_actuators_cfg(self): + """Process and apply articulation joint properties.""" + # create actuators + self.actuators = dict() + # flag for implicit actuators + # if this is false, we by-pass certain checks when doing actuator-related operations + self._has_implicit_actuators = False + + # iterate over all actuator configurations + for actuator_name, actuator_cfg in self.cfg.actuators.items(): + # type annotation for type checkers + actuator_cfg: ActuatorBaseCfg + # create actuator group + joint_ids, joint_names = self.find_joints(actuator_cfg.joint_names_expr) + # check if any joints are found + if len(joint_names) == 0: + raise ValueError( + f"No joints found for actuator group: {actuator_name} with joint name expression:" + f" {actuator_cfg.joint_names_expr}." + ) + # resolve joint indices + # we pass a slice if all joints are selected to avoid indexing overhead + if len(joint_names) == self.num_joints: + joint_ids = slice(None) + else: + joint_ids = torch.tensor(joint_ids, device=self.device) + # create actuator collection + # note: for efficiency avoid indexing when over all indices + actuator: ActuatorBase = actuator_cfg.class_type( + cfg=actuator_cfg, + joint_names=joint_names, + joint_ids=joint_ids, + num_envs=self.num_instances, + device=self.device, + stiffness=self._data.default_joint_stiffness[:, joint_ids], + damping=self._data.default_joint_damping[:, joint_ids], + armature=self._data.default_joint_armature[:, joint_ids], + friction=self._data.default_joint_friction_coeff[:, joint_ids], + dynamic_friction=self._data.default_joint_dynamic_friction_coeff[:, joint_ids], + viscous_friction=self._data.default_joint_viscous_friction_coeff[:, joint_ids], + effort_limit=self._data.joint_effort_limits[:, joint_ids].clone(), + velocity_limit=self._data.joint_vel_limits[:, joint_ids], + ) + # log information on actuator groups + model_type = "implicit" if actuator.is_implicit_model else "explicit" + logger.info( + f"Actuator collection: {actuator_name} with model '{actuator_cfg.class_type.__name__}'" + f" (type: {model_type}) and joint names: {joint_names} [{joint_ids}]." + ) + # store actuator group + self.actuators[actuator_name] = actuator + # set the passed gains and limits into the simulation + if isinstance(actuator, ImplicitActuator): + self._has_implicit_actuators = True + # the gains and limits are set into the simulation since actuator model is implicit + self.write_joint_stiffness_to_sim(actuator.stiffness, joint_ids=actuator.joint_indices) + self.write_joint_damping_to_sim(actuator.damping, joint_ids=actuator.joint_indices) + else: + # the gains and limits are processed by the actuator model + # we set gains to zero, and torque limit to a high value in simulation to avoid any interference + self.write_joint_stiffness_to_sim(0.0, joint_ids=actuator.joint_indices) + self.write_joint_damping_to_sim(0.0, joint_ids=actuator.joint_indices) + + # Set common properties into the simulation + self.write_joint_effort_limit_to_sim(actuator.effort_limit_sim, joint_ids=actuator.joint_indices) + self.write_joint_velocity_limit_to_sim(actuator.velocity_limit_sim, joint_ids=actuator.joint_indices) + self.write_joint_armature_to_sim(actuator.armature, joint_ids=actuator.joint_indices) + self.write_joint_friction_coefficient_to_sim(actuator.friction, joint_ids=actuator.joint_indices) + if get_isaac_sim_version().major >= 5: + self.write_joint_dynamic_friction_coefficient_to_sim( + actuator.dynamic_friction, joint_ids=actuator.joint_indices + ) + self.write_joint_viscous_friction_coefficient_to_sim( + actuator.viscous_friction, joint_ids=actuator.joint_indices + ) + + # Store the configured values from the actuator model + # note: this is the value configured in the actuator model (for implicit and explicit actuators) + self._data.default_joint_stiffness[:, actuator.joint_indices] = actuator.stiffness + self._data.default_joint_damping[:, actuator.joint_indices] = actuator.damping + self._data.default_joint_armature[:, actuator.joint_indices] = actuator.armature + self._data.default_joint_friction_coeff[:, actuator.joint_indices] = actuator.friction + if get_isaac_sim_version().major >= 5: + self._data.default_joint_dynamic_friction_coeff[:, actuator.joint_indices] = actuator.dynamic_friction + self._data.default_joint_viscous_friction_coeff[:, actuator.joint_indices] = actuator.viscous_friction + + # perform some sanity checks to ensure actuators are prepared correctly + total_act_joints = sum(actuator.num_joints for actuator in self.actuators.values()) + if total_act_joints != (self.num_joints - self.num_fixed_tendons): + logger.warning( + "Not all actuators are configured! Total number of actuated joints not equal to number of" + f" joints available: {total_act_joints} != {self.num_joints - self.num_fixed_tendons}." + ) + + if self.cfg.actuator_value_resolution_debug_print: + t = PrettyTable(["Group", "Property", "Name", "ID", "USD Value", "ActutatorCfg Value", "Applied"]) + for actuator_group, actuator in self.actuators.items(): + group_count = 0 + for property, resolution_details in actuator.joint_property_resolution_table.items(): + for prop_idx, resolution_detail in enumerate(resolution_details): + actuator_group_str = actuator_group if group_count == 0 else "" + property_str = property if prop_idx == 0 else "" + fmt = [f"{v:.2e}" if isinstance(v, float) else str(v) for v in resolution_detail] + t.add_row([actuator_group_str, property_str, *fmt]) + group_count += 1 + logger.warning(f"\nActuatorCfg-USD Value Discrepancy Resolution (matching values are skipped): \n{t}") + + def _process_tendons(self): + """Process fixed and spatial tendons.""" + # create a list to store the fixed tendon names + self._fixed_tendon_names = list() + self._spatial_tendon_names = list() + # parse fixed tendons properties if they exist + if self.num_fixed_tendons > 0 or self.num_spatial_tendons > 0: + joint_paths = self.root_physx_view.dof_paths[0] + + # iterate over all joints to find tendons attached to them + for j in range(self.num_joints): + usd_joint_path = joint_paths[j] + # check whether joint has tendons - tendon name follows the joint name it is attached to + joint = UsdPhysics.Joint.Get(self.stage, usd_joint_path) + if joint.GetPrim().HasAPI(PhysxSchema.PhysxTendonAxisRootAPI): + joint_name = usd_joint_path.split("/")[-1] + self._fixed_tendon_names.append(joint_name) + elif joint.GetPrim().HasAPI(PhysxSchema.PhysxTendonAttachmentRootAPI) or joint.GetPrim().HasAPI( + PhysxSchema.PhysxTendonAttachmentLeafAPI + ): + joint_name = usd_joint_path.split("/")[-1] + self._spatial_tendon_names.append(joint_name) + + # store the fixed tendon names + self.data.fixed_tendon_names = self._fixed_tendon_names + self.data.spatial_tendon_names = self._spatial_tendon_names + + def _apply_actuator_model(self): + """Processes joint commands for the articulation by forwarding them to the actuators. + + The actions are first processed using actuator models. Depending on the robot configuration, + the actuator models compute the joint level simulation commands and sets them into the PhysX buffers. + """ + # process actions per group + for actuator in self.actuators.values(): + # prepare input for actuator model based on cached data + # TODO : A tensor dict would be nice to do the indexing of all tensors together + control_action = ArticulationActions( + joint_positions=self._data.joint_pos_target[:, actuator.joint_indices], + joint_velocities=self._data.joint_vel_target[:, actuator.joint_indices], + joint_efforts=self._data.joint_effort_target[:, actuator.joint_indices], + joint_indices=actuator.joint_indices, + ) + # compute joint command from the actuator model + control_action = actuator.compute( + control_action, + joint_pos=self._data.joint_pos[:, actuator.joint_indices], + joint_vel=self._data.joint_vel[:, actuator.joint_indices], + ) + # update targets (these are set into the simulation) + if control_action.joint_positions is not None: + self._joint_pos_target_sim[:, actuator.joint_indices] = control_action.joint_positions + if control_action.joint_velocities is not None: + self._joint_vel_target_sim[:, actuator.joint_indices] = control_action.joint_velocities + if control_action.joint_efforts is not None: + self._joint_effort_target_sim[:, actuator.joint_indices] = control_action.joint_efforts + # update state of the actuator model + # -- torques + self._data.computed_torque[:, actuator.joint_indices] = actuator.computed_effort + self._data.applied_torque[:, actuator.joint_indices] = actuator.applied_effort + # -- actuator data + self._data.soft_joint_vel_limits[:, actuator.joint_indices] = actuator.velocity_limit + # TODO: find a cleaner way to handle gear ratio. Only needed for variable gear ratio actuators. + if hasattr(actuator, "gear_ratio"): + self._data.gear_ratio[:, actuator.joint_indices] = actuator.gear_ratio + + """ + Internal helpers -- Debugging. + """ + + def _validate_cfg(self): + """Validate the configuration after processing. + + Note: + This function should be called only after the configuration has been processed and the buffers have been + created. Otherwise, some settings that are altered during processing may not be validated. + For instance, the actuator models may change the joint max velocity limits. + """ + # check that the default values are within the limits + joint_pos_limits = self.root_physx_view.get_dof_limits()[0].to(self.device) + out_of_range = self._data.default_joint_pos[0] < joint_pos_limits[:, 0] + out_of_range |= self._data.default_joint_pos[0] > joint_pos_limits[:, 1] + violated_indices = torch.nonzero(out_of_range, as_tuple=False).squeeze(-1) + # throw error if any of the default joint positions are out of the limits + if len(violated_indices) > 0: + # prepare message for violated joints + msg = "The following joints have default positions out of the limits: \n" + for idx in violated_indices: + joint_name = self.data.joint_names[idx] + joint_limit = joint_pos_limits[idx] + joint_pos = self.data.default_joint_pos[0, idx] + # add to message + msg += f"\t- '{joint_name}': {joint_pos:.3f} not in [{joint_limit[0]:.3f}, {joint_limit[1]:.3f}]\n" + raise ValueError(msg) + + # check that the default joint velocities are within the limits + joint_max_vel = self.root_physx_view.get_dof_max_velocities()[0].to(self.device) + out_of_range = torch.abs(self._data.default_joint_vel[0]) > joint_max_vel + violated_indices = torch.nonzero(out_of_range, as_tuple=False).squeeze(-1) + if len(violated_indices) > 0: + # prepare message for violated joints + msg = "The following joints have default velocities out of the limits: \n" + for idx in violated_indices: + joint_name = self.data.joint_names[idx] + joint_limit = [-joint_max_vel[idx], joint_max_vel[idx]] + joint_vel = self.data.default_joint_vel[0, idx] + # add to message + msg += f"\t- '{joint_name}': {joint_vel:.3f} not in [{joint_limit[0]:.3f}, {joint_limit[1]:.3f}]\n" + raise ValueError(msg) + + def _log_articulation_info(self): + """Log information about the articulation. + + Note: We purposefully read the values from the simulator to ensure that the values are configured as expected. + """ + + # define custom formatters for large numbers and limit ranges + def format_large_number(_, v: float) -> str: + """Format large numbers using scientific notation.""" + if abs(v) >= 1e3: + return f"{v:.1e}" + else: + return f"{v:.3f}" + + def format_limits(_, v: tuple[float, float]) -> str: + """Format limit ranges using scientific notation.""" + if abs(v[0]) >= 1e3 or abs(v[1]) >= 1e3: + return f"[{v[0]:.1e}, {v[1]:.1e}]" + else: + return f"[{v[0]:.3f}, {v[1]:.3f}]" + + # read out all joint parameters from simulation + # -- gains + stiffnesses = self.root_physx_view.get_dof_stiffnesses()[0].tolist() + dampings = self.root_physx_view.get_dof_dampings()[0].tolist() + # -- properties + armatures = self.root_physx_view.get_dof_armatures()[0].tolist() + if get_isaac_sim_version().major < 5: + static_frictions = self.root_physx_view.get_dof_friction_coefficients()[0].tolist() + else: + friction_props = self.root_physx_view.get_dof_friction_properties() + static_frictions = friction_props[:, :, 0][0].tolist() + dynamic_frictions = friction_props[:, :, 1][0].tolist() + viscous_frictions = friction_props[:, :, 2][0].tolist() + # -- limits + position_limits = self.root_physx_view.get_dof_limits()[0].tolist() + velocity_limits = self.root_physx_view.get_dof_max_velocities()[0].tolist() + effort_limits = self.root_physx_view.get_dof_max_forces()[0].tolist() + # create table for term information + joint_table = PrettyTable() + joint_table.title = f"Simulation Joint Information (Prim path: {self.cfg.prim_path})" + # build field names based on Isaac Sim version + field_names = ["Index", "Name", "Stiffness", "Damping", "Armature"] + if get_isaac_sim_version().major < 5: + field_names.append("Static Friction") + else: + field_names.extend(["Static Friction", "Dynamic Friction", "Viscous Friction"]) + field_names.extend(["Position Limits", "Velocity Limits", "Effort Limits"]) + joint_table.field_names = field_names + + # apply custom formatters to numeric columns + joint_table.custom_format["Stiffness"] = format_large_number + joint_table.custom_format["Damping"] = format_large_number + joint_table.custom_format["Armature"] = format_large_number + joint_table.custom_format["Static Friction"] = format_large_number + if get_isaac_sim_version().major >= 5: + joint_table.custom_format["Dynamic Friction"] = format_large_number + joint_table.custom_format["Viscous Friction"] = format_large_number + joint_table.custom_format["Position Limits"] = format_limits + joint_table.custom_format["Velocity Limits"] = format_large_number + joint_table.custom_format["Effort Limits"] = format_large_number + + # set alignment of table columns + joint_table.align["Name"] = "l" + # add info on each term + for index, name in enumerate(self.joint_names): + # build row data based on Isaac Sim version + row_data = [index, name, stiffnesses[index], dampings[index], armatures[index]] + if get_isaac_sim_version().major < 5: + row_data.append(static_frictions[index]) + else: + row_data.extend([static_frictions[index], dynamic_frictions[index], viscous_frictions[index]]) + row_data.extend([position_limits[index], velocity_limits[index], effort_limits[index]]) + # add row to table + joint_table.add_row(row_data) + # convert table to string + logger.info(f"Simulation parameters for joints in {self.cfg.prim_path}:\n" + joint_table.get_string()) + + # read out all fixed tendon parameters from simulation + if self.num_fixed_tendons > 0: + # -- gains + ft_stiffnesses = self.root_physx_view.get_fixed_tendon_stiffnesses()[0].tolist() + ft_dampings = self.root_physx_view.get_fixed_tendon_dampings()[0].tolist() + # -- limits + ft_limit_stiffnesses = self.root_physx_view.get_fixed_tendon_limit_stiffnesses()[0].tolist() + ft_limits = self.root_physx_view.get_fixed_tendon_limits()[0].tolist() + ft_rest_lengths = self.root_physx_view.get_fixed_tendon_rest_lengths()[0].tolist() + ft_offsets = self.root_physx_view.get_fixed_tendon_offsets()[0].tolist() + # create table for term information + tendon_table = PrettyTable() + tendon_table.title = f"Simulation Fixed Tendon Information (Prim path: {self.cfg.prim_path})" + tendon_table.field_names = [ + "Index", + "Stiffness", + "Damping", + "Limit Stiffness", + "Limits", + "Rest Length", + "Offset", + ] + tendon_table.float_format = ".3" + + # apply custom formatters to tendon table columns + tendon_table.custom_format["Stiffness"] = format_large_number + tendon_table.custom_format["Damping"] = format_large_number + tendon_table.custom_format["Limit Stiffness"] = format_large_number + tendon_table.custom_format["Limits"] = format_limits + tendon_table.custom_format["Rest Length"] = format_large_number + tendon_table.custom_format["Offset"] = format_large_number + + # add info on each term + for index in range(self.num_fixed_tendons): + tendon_table.add_row( + [ + index, + ft_stiffnesses[index], + ft_dampings[index], + ft_limit_stiffnesses[index], + ft_limits[index], + ft_rest_lengths[index], + ft_offsets[index], + ] + ) + # convert table to string + logger.info( + f"Simulation parameters for fixed tendons in {self.cfg.prim_path}:\n" + tendon_table.get_string() + ) + + if self.num_spatial_tendons > 0: + # -- gains + st_stiffnesses = self.root_physx_view.get_spatial_tendon_stiffnesses()[0].tolist() + st_dampings = self.root_physx_view.get_spatial_tendon_dampings()[0].tolist() + # -- limits + st_limit_stiffnesses = self.root_physx_view.get_spatial_tendon_limit_stiffnesses()[0].tolist() + st_offsets = self.root_physx_view.get_spatial_tendon_offsets()[0].tolist() + # create table for term information + tendon_table = PrettyTable() + tendon_table.title = f"Simulation Spatial Tendon Information (Prim path: {self.cfg.prim_path})" + tendon_table.field_names = [ + "Index", + "Stiffness", + "Damping", + "Limit Stiffness", + "Offset", + ] + tendon_table.float_format = ".3" + # add info on each term + for index in range(self.num_spatial_tendons): + tendon_table.add_row( + [ + index, + st_stiffnesses[index], + st_dampings[index], + st_limit_stiffnesses[index], + st_offsets[index], + ] + ) + # convert table to string + logger.info( + f"Simulation parameters for spatial tendons in {self.cfg.prim_path}:\n" + tendon_table.get_string() + ) + + """ + Deprecated methods. + """ + + def write_joint_friction_to_sim( + self, + joint_friction: torch.Tensor | float, + joint_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | None = None, + ): + """Write joint friction coefficients into the simulation. + + .. deprecated:: 2.1.0 + Please use :meth:`write_joint_friction_coefficient_to_sim` instead. + """ + logger.warning( + "The function 'write_joint_friction_to_sim' will be deprecated in a future release. Please" + " use 'write_joint_friction_coefficient_to_sim' instead." + ) + self.write_joint_friction_coefficient_to_sim(joint_friction, joint_ids=joint_ids, env_ids=env_ids) + + def write_joint_limits_to_sim( + self, + limits: torch.Tensor | float, + joint_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | None = None, + warn_limit_violation: bool = True, + ): + """Write joint limits into the simulation. + + .. deprecated:: 2.1.0 + Please use :meth:`write_joint_position_limit_to_sim` instead. + """ + logger.warning( + "The function 'write_joint_limits_to_sim' will be deprecated in a future release. Please" + " use 'write_joint_position_limit_to_sim' instead." + ) + self.write_joint_position_limit_to_sim( + limits, joint_ids=joint_ids, env_ids=env_ids, warn_limit_violation=warn_limit_violation + ) + + def set_fixed_tendon_limit( + self, + limit: torch.Tensor, + fixed_tendon_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | None = None, + ): + """Set fixed tendon position limits into internal buffers. + + .. deprecated:: 2.1.0 + Please use :meth:`set_fixed_tendon_position_limit` instead. + """ + logger.warning( + "The function 'set_fixed_tendon_limit' will be deprecated in a future release. Please" + " use 'set_fixed_tendon_position_limit' instead." + ) + self.set_fixed_tendon_position_limit(limit, fixed_tendon_ids=fixed_tendon_ids, env_ids=env_ids) diff --git a/source/isaaclab/isaaclab/assets/deformable_object/__init__.py b/source/isaaclab_physx/isaaclab_physx/assets/deformable_object/__init__.py similarity index 81% rename from source/isaaclab/isaaclab/assets/deformable_object/__init__.py rename to source/isaaclab_physx/isaaclab_physx/assets/deformable_object/__init__.py index 503a238935b..e7d3faf6f09 100644 --- a/source/isaaclab/isaaclab/assets/deformable_object/__init__.py +++ b/source/isaaclab_physx/isaaclab_physx/assets/deformable_object/__init__.py @@ -8,3 +8,9 @@ from .deformable_object import DeformableObject from .deformable_object_cfg import DeformableObjectCfg from .deformable_object_data import DeformableObjectData + +__all__ = [ + "DeformableObject", + "DeformableObjectCfg", + "DeformableObjectData", +] \ No newline at end of file diff --git a/source/isaaclab/isaaclab/assets/deformable_object/deformable_object.py b/source/isaaclab_physx/isaaclab_physx/assets/deformable_object/deformable_object.py similarity index 99% rename from source/isaaclab/isaaclab/assets/deformable_object/deformable_object.py rename to source/isaaclab_physx/isaaclab_physx/assets/deformable_object/deformable_object.py index 12ae6c51ad8..ea03f402a12 100644 --- a/source/isaaclab/isaaclab/assets/deformable_object/deformable_object.py +++ b/source/isaaclab_physx/isaaclab_physx/assets/deformable_object/deformable_object.py @@ -19,7 +19,7 @@ import isaaclab.utils.math as math_utils from isaaclab.markers import VisualizationMarkers -from ..asset_base import AssetBase +from isaaclab.assets.asset_base import AssetBase from .deformable_object_data import DeformableObjectData if TYPE_CHECKING: diff --git a/source/isaaclab/isaaclab/assets/deformable_object/deformable_object_cfg.py b/source/isaaclab_physx/isaaclab_physx/assets/deformable_object/deformable_object_cfg.py similarity index 94% rename from source/isaaclab/isaaclab/assets/deformable_object/deformable_object_cfg.py rename to source/isaaclab_physx/isaaclab_physx/assets/deformable_object/deformable_object_cfg.py index 9ef06d1b54c..8d90de2549d 100644 --- a/source/isaaclab/isaaclab/assets/deformable_object/deformable_object_cfg.py +++ b/source/isaaclab_physx/isaaclab_physx/assets/deformable_object/deformable_object_cfg.py @@ -9,7 +9,7 @@ from isaaclab.markers.config import DEFORMABLE_TARGET_MARKER_CFG from isaaclab.utils import configclass -from ..asset_base_cfg import AssetBaseCfg +from isaaclab.assets.asset_base_cfg import AssetBaseCfg from .deformable_object import DeformableObject diff --git a/source/isaaclab/isaaclab/assets/deformable_object/deformable_object_data.py b/source/isaaclab_physx/isaaclab_physx/assets/deformable_object/deformable_object_data.py similarity index 100% rename from source/isaaclab/isaaclab/assets/deformable_object/deformable_object_data.py rename to source/isaaclab_physx/isaaclab_physx/assets/deformable_object/deformable_object_data.py diff --git a/source/isaaclab_physx/isaaclab_physx/assets/rigid_object/__init__.py b/source/isaaclab_physx/isaaclab_physx/assets/rigid_object/__init__.py new file mode 100644 index 00000000000..3ea9cf62dd6 --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/assets/rigid_object/__init__.py @@ -0,0 +1,14 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Sub-module for rigid object assets.""" + +from .rigid_object import RigidObject +from .rigid_object_data import RigidObjectData + +__all__ = [ + "RigidObject", + "RigidObjectData", +] \ No newline at end of file diff --git a/source/isaaclab_physx/isaaclab_physx/assets/rigid_object/rigid_object.py b/source/isaaclab_physx/isaaclab_physx/assets/rigid_object/rigid_object.py new file mode 100644 index 00000000000..0ed4515165e --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/assets/rigid_object/rigid_object.py @@ -0,0 +1,698 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from __future__ import annotations + +import logging +from collections.abc import Sequence +from typing import TYPE_CHECKING + +import torch +import warp as wp + +import omni.physics.tensors.impl.api as physx +from isaacsim.core.simulation_manager import SimulationManager +from pxr import UsdPhysics + +import isaaclab.sim as sim_utils +import isaaclab.utils.math as math_utils +import isaaclab.utils.string as string_utils +from isaaclab.utils.wrench_composer import WrenchComposer +from isaaclab.assets.rigid_object.base_rigid_object import BaseRigidObject + +from .rigid_object_data import RigidObjectData + +if TYPE_CHECKING: + from isaaclab.assets.rigid_object.rigid_object_cfg import RigidObjectCfg + +# import logger +logger = logging.getLogger(__name__) + + +class RigidObject(BaseRigidObject): + """A rigid object asset class. + + Rigid objects are assets comprising of rigid bodies. They can be used to represent dynamic objects + such as boxes, spheres, etc. A rigid body is described by its pose, velocity and mass distribution. + + For an asset to be considered a rigid object, the root prim of the asset must have the `USD RigidBodyAPI`_ + applied to it. This API is used to define the simulation properties of the rigid body. On playing the + simulation, the physics engine will automatically register the rigid body and create a corresponding + rigid body handle. This handle can be accessed using the :attr:`root_physx_view` attribute. + + .. note:: + + For users familiar with Isaac Sim, the PhysX view class API is not the exactly same as Isaac Sim view + class API. Similar to Isaac Lab, Isaac Sim wraps around the PhysX view API. However, as of now (2023.1 release), + we see a large difference in initializing the view classes in Isaac Sim. This is because the view classes + in Isaac Sim perform additional USD-related operations which are slow and also not required. + + .. _`USD RigidBodyAPI`: https://openusd.org/dev/api/class_usd_physics_rigid_body_a_p_i.html + """ + + cfg: RigidObjectCfg + """Configuration instance for the rigid object.""" + + __backend_name__: str = "physx" + """The name of the backend for the rigid object.""" + + def __init__(self, cfg: RigidObjectCfg): + """Initialize the rigid object. + + Args: + cfg: A configuration instance. + """ + super().__init__(cfg) + + """ + Properties + """ + + @property + def data(self) -> RigidObjectData: + return self._data + + @property + def num_instances(self) -> int: + return self.root_view.count + + @property + def num_bodies(self) -> int: + """Number of bodies in the asset. + + This is always 1 since each object is a single rigid body. + """ + return 1 + + @property + def body_names(self) -> list[str]: + """Ordered names of bodies in the rigid object.""" + prim_paths = self.root_view.prim_paths[: self.num_bodies] + return [path.split("/")[-1] for path in prim_paths] + + @property + def root_view(self): + """Root view for the asset. + + Note: + Use this view with caution. It requires handling of tensors in a specific way. + """ + return self._root_view + + @property + def instantaneous_wrench_composer(self) -> WrenchComposer: + """Instantaneous wrench composer. + + Returns a :class:`~isaaclab.utils.wrench_composer.WrenchComposer` instance. Wrenches added or set to this wrench + composer are only valid for the current simulation step. At the end of the simulation step, the wrenches set + to this object are discarded. This is useful to apply forces that change all the time, things like drag forces + for instance. + """ + return self._instantaneous_wrench_composer + + @property + def permanent_wrench_composer(self) -> WrenchComposer: + """Permanent wrench composer. + + Returns a :class:`~isaaclab.utils.wrench_composer.WrenchComposer` instance. Wrenches added or set to this wrench + composer are persistent and are applied to the simulation at every step. This is useful to apply forces that + are constant over a period of time, things like the thrust of a motor for instance. + """ + return self._permanent_wrench_composer + + """ + Operations. + """ + + def reset(self, env_ids: Sequence[int] | None = None): + """Reset the rigid object. + + Args: + env_ids: Environment indices. If None, then all indices are used. + """ + # resolve all indices + if env_ids is None: + env_ids = slice(None) + # reset external wrench + self._instantaneous_wrench_composer.reset(env_ids) + self._permanent_wrench_composer.reset(env_ids) + + def write_data_to_sim(self) -> None: + """Write external wrench to the simulation. + + Note: + We write external wrench to the simulation here since this function is called before the simulation step. + This ensures that the external wrench is applied at every simulation step. + """ + # write external wrench + if self._instantaneous_wrench_composer.active or self._permanent_wrench_composer.active: + if self._instantaneous_wrench_composer.active: + # Compose instantaneous wrench with permanent wrench + self._instantaneous_wrench_composer.add_forces_and_torques( + forces=self._permanent_wrench_composer.composed_force, + torques=self._permanent_wrench_composer.composed_torque, + body_ids=self._ALL_BODY_INDICES_WP, + env_ids=self._ALL_INDICES_WP, + ) + # Apply both instantaneous and permanent wrench to the simulation + self.root_view.apply_forces_and_torques_at_position( + force_data=self._instantaneous_wrench_composer.composed_force_as_torch.view(-1, 3), + torque_data=self._instantaneous_wrench_composer.composed_torque_as_torch.view(-1, 3), + position_data=None, + indices=self._ALL_INDICES, + is_global=False, + ) + else: + # Apply permanent wrench to the simulation + self.root_view.apply_forces_and_torques_at_position( + force_data=self._permanent_wrench_composer.composed_force_as_torch.view(-1, 3), + torque_data=self._permanent_wrench_composer.composed_torque_as_torch.view(-1, 3), + position_data=None, + indices=self._ALL_INDICES, + is_global=False, + ) + self._instantaneous_wrench_composer.reset() + + def update(self, dt: float) -> None: + """Updates the simulation data. + + Args: + dt: The time step size in seconds. + """ + self.data.update(dt) + + """ + Operations - Finders. + """ + + def find_bodies( + self, name_keys: str | Sequence[str], preserve_order: bool = False + ) -> tuple[list[int], list[str]]: + """Find bodies in the rigid body based on the name keys. + + Please check the :meth:`isaaclab.utils.string_utils.resolve_matching_names` function for more + information on the name matching. + + Args: + name_keys: A regular expression or a list of regular expressions to match the body names. + preserve_order: Whether to preserve the order of the name keys in the output. Defaults to False. + + Returns: + A tuple of lists containing the body indices and names. + """ + return string_utils.resolve_matching_names(name_keys, self.body_names, preserve_order) + + """ + Operations - Write to simulation. + """ + + def write_root_state_to_sim(self, root_state: torch.Tensor, env_ids: Sequence[int] | None = None): + """Set the root state over selected environment indices into the simulation. + + The root state comprises of the cartesian position, quaternion orientation in (w, x, y, z), and linear + and angular velocity. All the quantities are in the simulation frame. + + Args: + root_state: Root state in simulation frame. Shape is (len(env_ids), 13). + env_ids: Environment indices. If None, then all indices are used. + """ + self.write_root_link_pose_to_sim(root_state[:, :7], env_ids=env_ids) + self.write_root_com_velocity_to_sim(root_state[:, 7:], env_ids=env_ids) + + def write_root_com_state_to_sim(self, root_state: torch.Tensor, env_ids: Sequence[int] | None = None): + """Set the root center of mass state over selected environment indices into the simulation. + + The root state comprises of the cartesian position, quaternion orientation in (w, x, y, z), and linear + and angular velocity. All the quantities are in the simulation frame. + + Args: + root_state: Root state in simulation frame. Shape is (len(env_ids), 13). + env_ids: Environment indices. If None, then all indices are used. + """ + self.write_root_com_pose_to_sim(root_state[:, :7], env_ids=env_ids) + self.write_root_com_velocity_to_sim(root_state[:, 7:], env_ids=env_ids) + + def write_root_link_state_to_sim(self, root_state: torch.Tensor, env_ids: Sequence[int] | None = None): + """Set the root link state over selected environment indices into the simulation. + + The root state comprises of the cartesian position, quaternion orientation in (w, x, y, z), and linear + and angular velocity. All the quantities are in the simulation frame. + + Args: + root_state: Root state in simulation frame. Shape is (len(env_ids), 13). + env_ids: Environment indices. If None, then all indices are used. + """ + self.write_root_link_pose_to_sim(root_state[:, :7], env_ids=env_ids) + self.write_root_link_velocity_to_sim(root_state[:, 7:], env_ids=env_ids) + + def write_root_pose_to_sim(self, root_pose: torch.Tensor, env_ids: Sequence[int] | None = None): + """Set the root pose over selected environment indices into the simulation. + + The root pose comprises of the cartesian position and quaternion orientation in (w, x, y, z). + + Args: + root_pose: Root link poses in simulation frame. Shape is (len(env_ids), 7). + env_ids: Environment indices. If None, then all indices are used. + """ + self.write_root_link_pose_to_sim(root_pose, env_ids=env_ids) + + def write_root_link_pose_to_sim(self, root_pose: torch.Tensor, env_ids: Sequence[int] | None = None): + """Set the root link pose over selected environment indices into the simulation. + + The root pose comprises of the cartesian position and quaternion orientation in (w, x, y, z). + + Args: + root_pose: Root link poses in simulation frame. Shape is (len(env_ids), 7). + env_ids: Environment indices. If None, then all indices are used. + """ + # resolve all indices + physx_env_ids = env_ids + if env_ids is None: + env_ids = slice(None) + physx_env_ids = self._ALL_INDICES + + # note: we need to do this here since tensors are not set into simulation until step. + # set into internal buffers + self.data.root_link_pose_w[env_ids] = root_pose.clone() + # update these buffers only if the user is using them. Otherwise this adds to overhead. + if self.data._root_link_state_w.data is not None: + self.data.root_link_state_w[env_ids, :7] = self.data.root_link_pose_w[env_ids] + if self.data._root_state_w.data is not None: + self.data.root_state_w[env_ids, :7] = self.data.root_link_pose_w[env_ids] + if self.data._root_com_state_w.data is not None: + expected_com_pos, expected_com_quat = math_utils.combine_frame_transforms( + self.data.root_link_pose_w[env_ids, :3], + self.data.root_link_pose_w[env_ids, 3:7], + self.data.body_com_pos_b[env_ids, 0, :], + self.data.body_com_quat_b[env_ids, 0, :], + ) + self.data.root_com_state_w[env_ids, :3] = expected_com_pos + self.data.root_com_state_w[env_ids, 3:7] = expected_com_quat + # convert root quaternion from wxyz to xyzw + root_poses_xyzw = self.data.root_link_pose_w.clone() + root_poses_xyzw[:, 3:] = math_utils.convert_quat(root_poses_xyzw[:, 3:], to="xyzw") + # set into simulation + self.root_view.set_transforms(root_poses_xyzw, indices=physx_env_ids) + + def write_root_com_pose_to_sim(self, root_pose: torch.Tensor, env_ids: Sequence[int] | None = None): + """Set the root center of mass pose over selected environment indices into the simulation. + + The root pose comprises of the cartesian position and quaternion orientation in (w, x, y, z). + The orientation is the orientation of the principle axes of inertia. + + Args: + root_pose: Root center of mass poses in simulation frame. Shape is (len(env_ids), 7). + env_ids: Environment indices. If None, then all indices are used. + """ + # resolve all indices + if env_ids is None: + local_env_ids = slice(env_ids) + else: + local_env_ids = env_ids + + # set into internal buffers + self.data.root_com_pose_w[local_env_ids] = root_pose.clone() + # update these buffers only if the user is using them. Otherwise this adds to overhead. + if self.data._root_com_state_w.data is not None: + self.data.root_com_state_w[local_env_ids, :7] = self.data.root_com_pose_w[local_env_ids] + + # get CoM pose in link frame + com_pos_b = self.data.body_com_pos_b[local_env_ids, 0, :] + com_quat_b = self.data.body_com_quat_b[local_env_ids, 0, :] + # transform input CoM pose to link frame + root_link_pos, root_link_quat = math_utils.combine_frame_transforms( + root_pose[..., :3], + root_pose[..., 3:7], + math_utils.quat_apply(math_utils.quat_inv(com_quat_b), -com_pos_b), + math_utils.quat_inv(com_quat_b), + ) + root_link_pose = torch.cat((root_link_pos, root_link_quat), dim=-1) + + # write transformed pose in link frame to sim + self.write_root_link_pose_to_sim(root_link_pose, env_ids=env_ids) + + def write_root_velocity_to_sim(self, root_velocity: torch.Tensor, env_ids: Sequence[int] | None = None): + """Set the root center of mass velocity over selected environment indices into the simulation. + + The velocity comprises linear velocity (x, y, z) and angular velocity (x, y, z) in that order. + NOTE: This sets the velocity of the root's center of mass rather than the roots frame. + + Args: + root_velocity: Root center of mass velocities in simulation world frame. Shape is (len(env_ids), 6). + env_ids: Environment indices. If None, then all indices are used. + """ + self.write_root_com_velocity_to_sim(root_velocity=root_velocity, env_ids=env_ids) + + def write_root_com_velocity_to_sim(self, root_velocity: torch.Tensor, env_ids: Sequence[int] | None = None): + """Set the root center of mass velocity over selected environment indices into the simulation. + + The velocity comprises linear velocity (x, y, z) and angular velocity (x, y, z) in that order. + NOTE: This sets the velocity of the root's center of mass rather than the roots frame. + + Args: + root_velocity: Root center of mass velocities in simulation world frame. Shape is (len(env_ids), 6). + env_ids: Environment indices. If None, then all indices are used. + """ + # resolve all indices + physx_env_ids = env_ids + if env_ids is None: + env_ids = slice(None) + physx_env_ids = self._ALL_INDICES + + # note: we need to do this here since tensors are not set into simulation until step. + # set into internal buffers + self.data.root_com_vel_w[env_ids] = root_velocity.clone() + # update these buffers only if the user is using them. Otherwise this adds to overhead. + if self.data._root_com_state_w.data is not None: + self.data.root_com_state_w[env_ids, 7:] = self.data.root_com_vel_w[env_ids] + if self.data._root_state_w.data is not None: + self.data.root_state_w[env_ids, 7:] = self.data.root_com_vel_w[env_ids] + if self.data._root_link_state_w.data is not None: + self.data.root_link_state_w[env_ids, 7:] = self.data.root_com_vel_w[env_ids] + # make the acceleration zero to prevent reporting old values + self.data.body_com_acc_w[env_ids] = 0.0 + # set into simulation + self.root_view.set_velocities(self.data.root_com_vel_w, indices=physx_env_ids) + + def write_root_link_velocity_to_sim(self, root_velocity: torch.Tensor, env_ids: Sequence[int] | None = None): + """Set the root link velocity over selected environment indices into the simulation. + + The velocity comprises linear velocity (x, y, z) and angular velocity (x, y, z) in that order. + NOTE: This sets the velocity of the root's frame rather than the roots center of mass. + + Args: + root_velocity: Root frame velocities in simulation world frame. Shape is (len(env_ids), 6). + env_ids: Environment indices. If None, then all indices are used. + """ + # resolve all indices + if env_ids is None: + local_env_ids = slice(env_ids) + else: + local_env_ids = env_ids + + # set into internal buffers + self.data.root_link_vel_w[local_env_ids] = root_velocity.clone() + # update these buffers only if the user is using them. Otherwise this adds to overhead. + if self.data._root_link_state_w.data is not None: + self.data.root_link_state_w[local_env_ids, 7:] = self.data.root_link_vel_w[local_env_ids] + + # get CoM pose in link frame + quat = self.data.root_link_quat_w[local_env_ids] + com_pos_b = self.data.body_com_pos_b[local_env_ids, 0, :] + # transform input velocity to center of mass frame + root_com_velocity = root_velocity.clone() + root_com_velocity[:, :3] += torch.linalg.cross( + root_com_velocity[:, 3:], math_utils.quat_apply(quat, com_pos_b), dim=-1 + ) + + # write transformed velocity in CoM frame to sim + self.write_root_com_velocity_to_sim(root_com_velocity, env_ids=env_ids) + + + """ + Operations - Setters. + """ + + def set_masses( + self, + masses: torch.Tensor, + body_ids: Sequence[int] | None = None, + env_ids: Sequence[int] | None = None, + ): + """Set masses of all bodies. + + Args: + masses: Masses of all bodies. Shape is (num_instances, num_bodies). + body_ids: The body indices to set the masses for. Defaults to None (all bodies). + env_ids: The environment indices to set the masses for. Defaults to None (all environments). + """ + # resolve indices + physx_env_ids = env_ids + if env_ids is None: + env_ids = slice(None) + physx_env_ids = self._ALL_INDICES + if body_ids is None: + body_ids = slice(None) + # broadcast env_ids if needed to allow double indexing + if env_ids != slice(None) and body_ids != slice(None): + env_ids = env_ids[:, None] + # set into internal buffers + self.data.body_mass[env_ids, body_ids] = masses + # set into simulation + self.root_view.set_masses(self.data.body_mass.cpu(), indices=physx_env_ids.cpu()) + + def set_coms( + self, + coms: torch.Tensor, + body_ids: Sequence[int] | None = None, + env_ids: Sequence[int] | None = None, + ): + """Set center of mass positions of all bodies. + + Args: + coms: Center of mass positions of all bodies. Shape is (num_instances, num_bodies, 3). + body_ids: The body indices to set the center of mass positions for. Defaults to None (all bodies). + env_ids: The environment indices to set the center of mass positions for. Defaults to None (all environments). + """ + # resolve indices + physx_env_ids = env_ids + if env_ids is None: + env_ids = slice(None) + physx_env_ids = self._ALL_INDICES + if body_ids is None: + body_ids = slice(None) + # broadcast env_ids if needed to allow double indexing + if env_ids != slice(None) and body_ids != slice(None): + env_ids = env_ids[:, None] + # set into internal buffers + self.data.body_com_pose_b[env_ids, body_ids] = coms + # set into simulation + self.root_view.set_coms(self.data.body_com_pose_b.cpu(), indices=physx_env_ids.cpu()) + + def set_inertias( + self, + inertias: torch.Tensor, + body_ids: Sequence[int] | None = None, + env_ids: Sequence[int] | None = None, + ): + """Set inertias of all bodies. + + Args: + inertias: Inertias of all bodies. Shape is (num_instances, num_bodies, 3, 3). + body_ids: The body indices to set the inertias for. Defaults to None (all bodies). + env_ids: The environment indices to set the inertias for. Defaults to None (all environments). + """ + # resolve indices + physx_env_ids = env_ids + if env_ids is None: + env_ids = slice(None) + physx_env_ids = self._ALL_INDICES + if body_ids is None: + body_ids = slice(None) + # broadcast env_ids if needed to allow double indexing + if env_ids != slice(None) and body_ids != slice(None): + env_ids = env_ids[:, None] + # set into internal buffers + self.data.body_inertia[env_ids, body_ids] = inertias + # set into simulation + self.root_view.set_inertias(self.data.body_inertia.cpu(), indices=physx_env_ids.cpu()) + + def set_external_force_and_torque( + self, + forces: torch.Tensor, + torques: torch.Tensor, + positions: torch.Tensor | None = None, + body_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | None = None, + is_global: bool = False, + ) -> None: + """Set external force and torque to apply on the asset's bodies in their local frame. + + For many applications, we want to keep the applied external force on rigid bodies constant over a period of + time (for instance, during the policy control). This function allows us to store the external force and torque + into buffers which are then applied to the simulation at every step. Optionally, set the position to apply the + external wrench at (in the local link frame of the bodies). + + .. caution:: + If the function is called with empty forces and torques, then this function disables the application + of external wrench to the simulation. + + .. code-block:: python + + # example of disabling external wrench + asset.set_external_force_and_torque(forces=torch.zeros(0, 3), torques=torch.zeros(0, 3)) + + .. caution:: + If the function is called consecutively with and with different values for ``is_global``, then the + all the external wrenches will be applied in the frame specified by the last call. + + .. code-block:: python + + # example of setting external wrench in the global frame + asset.set_external_force_and_torque(forces=torch.ones(1, 1, 3), env_ids=[0], is_global=True) + # example of setting external wrench in the link frame + asset.set_external_force_and_torque(forces=torch.ones(1, 1, 3), env_ids=[1], is_global=False) + # Both environments will have the external wrenches applied in the link frame + + .. note:: + This function does not apply the external wrench to the simulation. It only fills the buffers with + the desired values. To apply the external wrench, call the :meth:`write_data_to_sim` function + right before the simulation step. + + Args: + forces: External forces in bodies' local frame. Shape is (len(env_ids), len(body_ids), 3). + torques: External torques in bodies' local frame. Shape is (len(env_ids), len(body_ids), 3). + positions: External wrench positions in bodies' local frame. Shape is (len(env_ids), len(body_ids), 3). Defaults to None. + body_ids: Body indices to apply external wrench to. Defaults to None (all bodies). + env_ids: Environment indices to apply external wrench to. Defaults to None (all instances). + is_global: Whether to apply the external wrench in the global frame. Defaults to False. If set to False, + the external wrench is applied in the link frame of the bodies. + """ + logger.warning( + "The function 'set_external_force_and_torque' will be deprecated in a future release. Please" + " use 'permanent_wrench_composer.set_forces_and_torques' instead." + ) + if forces is None and torques is None: + logger.warning("No forces or torques provided. No permanent external wrench will be applied.") + + # resolve all indices + # -- env_ids + if env_ids is None: + env_ids = self._ALL_INDICES_WP + elif not isinstance(env_ids, torch.Tensor): + env_ids = wp.array(env_ids, dtype=wp.int32, device=self.device) + else: + env_ids = wp.from_torch(env_ids.to(torch.int32), dtype=wp.int32) + # -- body_ids + body_ids = self._ALL_BODY_INDICES_WP + + # Write to wrench composer + self._permanent_wrench_composer.set_forces_and_torques( + forces=wp.from_torch(forces, dtype=wp.vec3f) if forces is not None else None, + torques=wp.from_torch(torques, dtype=wp.vec3f) if torques is not None else None, + positions=wp.from_torch(positions, dtype=wp.vec3f) if positions is not None else None, + body_ids=body_ids, + env_ids=env_ids, + is_global=is_global, + ) + + """ + Internal helper. + """ + + def _initialize_impl(self): + # obtain global simulation view + self._physics_sim_view = SimulationManager.get_physics_sim_view() + # obtain the first prim in the regex expression (all others are assumed to be a copy of this) + template_prim = sim_utils.find_first_matching_prim(self.cfg.prim_path) + if template_prim is None: + raise RuntimeError(f"Failed to find prim for expression: '{self.cfg.prim_path}'.") + template_prim_path = template_prim.GetPath().pathString + + # find rigid root prims + root_prims = sim_utils.get_all_matching_child_prims( + template_prim_path, + predicate=lambda prim: prim.HasAPI(UsdPhysics.RigidBodyAPI), + traverse_instance_prims=False, + ) + if len(root_prims) == 0: + raise RuntimeError( + f"Failed to find a rigid body when resolving '{self.cfg.prim_path}'." + " Please ensure that the prim has 'USD RigidBodyAPI' applied." + ) + if len(root_prims) > 1: + raise RuntimeError( + f"Failed to find a single rigid body when resolving '{self.cfg.prim_path}'." + f" Found multiple '{root_prims}' under '{template_prim_path}'." + " Please ensure that there is only one rigid body in the prim path tree." + ) + + articulation_prims = sim_utils.get_all_matching_child_prims( + template_prim_path, + predicate=lambda prim: prim.HasAPI(UsdPhysics.ArticulationRootAPI), + traverse_instance_prims=False, + ) + if len(articulation_prims) != 0: + if articulation_prims[0].GetAttribute("physxArticulation:articulationEnabled").Get(): + raise RuntimeError( + f"Found an articulation root when resolving '{self.cfg.prim_path}' for rigid objects. These are" + f" located at: '{articulation_prims}' under '{template_prim_path}'. Please disable the articulation" + " root in the USD or from code by setting the parameter" + " 'ArticulationRootPropertiesCfg.articulation_enabled' to False in the spawn configuration." + ) + + # resolve root prim back into regex expression + root_prim_path = root_prims[0].GetPath().pathString + root_prim_path_expr = self.cfg.prim_path + root_prim_path[len(template_prim_path) :] + # -- object view + self._root_view = self._physics_sim_view.create_rigid_body_view(root_prim_path_expr.replace(".*", "*")) + + # check if the rigid body was created + if self.root_view._backend is None: + raise RuntimeError(f"Failed to create rigid body at: {self.cfg.prim_path}. Please check PhysX logs.") + + # log information about the rigid body + logger.info(f"Rigid body initialized at: {self.cfg.prim_path} with root '{root_prim_path_expr}'.") + logger.info(f"Number of instances: {self.num_instances}") + logger.info(f"Number of bodies: {self.num_bodies}") + logger.info(f"Body names: {self.body_names}") + + # container for data access + self._data = RigidObjectData(self.root_view, self.device) + + # create buffers + self._create_buffers() + # process configuration + self._process_cfg() + # update the rigid body data + self.update(0.0) + # Let the rigid object data know that it is fully instantiated and ready to use. + self.data.is_primed = True + + def _create_buffers(self): + """Create buffers for storing data.""" + # constants + self._ALL_INDICES = torch.arange(self.num_instances, dtype=torch.long, device=self.device) + self._ALL_INDICES_WP = wp.from_torch(self._ALL_INDICES.to(torch.int32), dtype=wp.int32) + self._ALL_BODY_INDICES_WP = wp.from_torch( + torch.arange(self.num_bodies, dtype=torch.int32, device=self.device), dtype=wp.int32 + ) + + # external wrench composer + self._instantaneous_wrench_composer = WrenchComposer(self) + self._permanent_wrench_composer = WrenchComposer(self) + + # set information about rigid body into data + self._data.body_names = self.body_names + + def _process_cfg(self) -> None: + """Post processing of configuration parameters.""" + """Post processing of configuration parameters.""" + # default state + # -- root state + # note: we cast to tuple to avoid torch/numpy type mismatch. + default_root_pose = ( + tuple(self.cfg.init_state.pos) + + tuple(self.cfg.init_state.rot) + ) + default_root_vel = ( + tuple(self.cfg.init_state.lin_vel) + + tuple(self.cfg.init_state.ang_vel) + ) + default_root_pose = torch.tensor(default_root_pose, dtype=torch.float, device=self.device) + default_root_vel = torch.tensor(default_root_vel, dtype=torch.float, device=self.device) + self._data.default_root_pose = default_root_pose.repeat(self.num_instances, 1) + self._data.default_root_vel = default_root_vel.repeat(self.num_instances, 1) + + """ + Internal simulation callbacks. + """ + + def _invalidate_initialize_callback(self, event): + """Invalidates the scene elements.""" + # call parent + super()._invalidate_initialize_callback(event) + # set all existing views to None to invalidate them + self._root_view = None \ No newline at end of file diff --git a/source/isaaclab_physx/isaaclab_physx/assets/rigid_object/rigid_object_data.py b/source/isaaclab_physx/isaaclab_physx/assets/rigid_object/rigid_object_data.py new file mode 100644 index 00000000000..f7b1e52441f --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/assets/rigid_object/rigid_object_data.py @@ -0,0 +1,840 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +import weakref +import logging + +import torch + +import omni.physics.tensors.impl.api as physx + +import isaaclab.utils.math as math_utils +from isaaclab.sim.utils.stage import get_current_stage_id +from isaaclab.utils.buffers import TimestampedBuffer +from isaaclab.assets.rigid_object.base_rigid_object_data import BaseRigidObjectData + +# import logger +logger = logging.getLogger(__name__) + + +class RigidObjectData(BaseRigidObjectData): + """Data container for a rigid object. + + This class contains the data for a rigid object in the simulation. The data includes the state of + the root rigid body and the state of all the bodies in the object. The data is stored in the simulation + world frame unless otherwise specified. + + For a rigid body, there are two frames of reference that are used: + + - Actor frame: The frame of reference of the rigid body prim. This typically corresponds to the Xform prim + with the rigid body schema. + - Center of mass frame: The frame of reference of the center of mass of the rigid body. + Depending on the settings of the simulation, the actor frame and the center of mass frame may be the same. + This needs to be taken into account when interpreting the data. + + The data is lazily updated, meaning that the data is only updated when it is accessed. This is useful + when the data is expensive to compute or retrieve. The data is updated when the timestamp of the buffer + is older than the current simulation timestamp. The timestamp is updated whenever the data is updated. + """ + + __backend_name__: str = "physx" + """The name of the backend for the rigid object data.""" + + def __init__(self, root_view: physx.RigidBodyView, device: str): + """Initializes the rigid object data. + + Args: + root_view: The root rigid body view. + device: The device used for processing. + """ + super().__init__(root_view, device) + # Set the root rigid body view + # note: this is stored as a weak reference to avoid circular references between the asset class + # and the data container. This is important to avoid memory leaks. + self._root_view: physx.RigidBodyView = weakref.proxy(root_view) + + # Set initial time stamp + self._sim_timestamp = 0.0 + self._is_primed = False + + # Obtain global physics sim view + stage_id = get_current_stage_id() + physics_sim_view = physx.create_simulation_view("torch", stage_id) + physics_sim_view.set_subspace_roots("/") + gravity = physics_sim_view.get_gravity() + # Convert to direction vector + gravity_dir = torch.tensor((gravity[0], gravity[1], gravity[2]), device=self.device) + gravity_dir = math_utils.normalize(gravity_dir.unsqueeze(0)).squeeze(0) + + # Initialize constants + self.GRAVITY_VEC_W = gravity_dir.repeat(self._root_view.count, 1) + self.FORWARD_VEC_B = torch.tensor((1.0, 0.0, 0.0), device=self.device).repeat(self._root_view.count, 1) + + @property + def is_primed(self) -> bool: + """Whether the rigid object data is fully instantiated and ready to use.""" + return self._is_primed + + @is_primed.setter + def is_primed(self, value: bool) -> None: + """Set whether the rigid object data is fully instantiated and ready to use. + + .. note:: Once this quantity is set to True, it cannot be changed. + + Args: + value: The primed state. + + Raises: + ValueError: If the rigid object data is already primed. + """ + if self._is_primed: + raise ValueError("The rigid object data is already primed.") + self._is_primed = value + + def update(self, dt: float) -> None: + """Updates the data for the rigid object. + + Args: + dt: The time step for the update. This must be a positive value. + """ + # update the simulation timestamp + self._sim_timestamp += dt + + """ + Names. + """ + + body_names: list[str] = None + """Body names in the order parsed by the simulation view.""" + + """ + Defaults. + """ + + @property + def default_root_pose(self) -> torch.Tensor: + """Default root pose ``[pos, quat]`` in local environment frame. Shape is (num_instances, 7). + + The position and quaternion are of the rigid body's actor frame. + """ + return self._default_root_pose + + @default_root_pose.setter + def default_root_pose(self, value: torch.Tensor) -> None: + """Set the default root pose. + + Args: + value: The default root pose. Shape is (num_instances, 7). + + Raises: + ValueError: If the rigid object data is already primed. + """ + if self._is_primed: + raise ValueError("The rigid object data is already primed.") + self._default_root_pose = value + + + @property + def default_root_vel(self) -> torch.Tensor: + """Default root velocity ``[lin_vel, ang_vel]`` in local environment frame. Shape is (num_instances, 6). + + The linear and angular velocities are of the rigid body's center of mass frame. + """ + return self._default_root_vel + + @default_root_vel.setter + def default_root_vel(self, value: torch.Tensor) -> None: + """Set the default root velocity. + + Args: + value: The default root velocity. Shape is (num_instances, 6). + + Raises: + ValueError: If the rigid object data is already primed. + """ + if self._is_primed: + raise ValueError("The rigid object data is already primed.") + self._default_root_vel = value + + @property + def default_root_state(self) -> torch.Tensor: + """Default root state ``[pos, quat, lin_vel, ang_vel]`` in local environment frame. Shape is (num_instances, 13). + + The position and quaternion are of the rigid body's actor frame. Meanwhile, the linear and angular velocities are + of the center of mass frame. + """ + logger.warning("Reading the root state directly is deprecated since IsaacLab 3.0 and will be removed in a future version. Please use the default_root_pose and default_root_vel properties instead.") + return torch.cat([self.default_root_pose, self.default_root_vel], dim=1) + + @default_root_state.setter + def default_root_state(self, value: torch.Tensor) -> None: + """Set the default root state. + + Args: + value: The default root state. Shape is (num_instances, 13). + + Raises: + ValueError: If the rigid object data is already primed. + """ + logger.warning("Setting the root state directly is deprecated since IsaacLab 3.0 and will be removed in a future version. Please use the default_root_pose and default_root_vel properties instead.") + if self._is_primed: + raise ValueError("The rigid object data is already primed.") + self._default_root_pose = value[:, :7] + self._default_root_vel = value[:, 7:] + + """ + Root state properties. + """ + + @property + def root_link_pose_w(self) -> torch.Tensor: + """Root link pose ``[pos, quat]`` in simulation world frame. Shape is (num_instances, 7). + + This quantity is the pose of the actor frame of the root rigid body relative to the world. + The orientation is provided in (w, x, y, z) format. + """ + if self._root_link_pose_w.timestamp < self._sim_timestamp: + # read data from simulation + pose = self._root_view.get_transforms().clone() + pose[:, 3:7] = math_utils.convert_quat(pose[:, 3:7], to="wxyz") + # set the buffer data and timestamp + self._root_link_pose_w.data = pose + self._root_link_pose_w.timestamp = self._sim_timestamp + + return self._root_link_pose_w.data + + @property + def root_link_vel_w(self) -> torch.Tensor: + """Root link velocity ``[lin_vel, ang_vel]`` in simulation world frame. Shape is (num_instances, 6). + + This quantity contains the linear and angular velocities of the actor frame of the root + rigid body relative to the world. + """ + if self._root_link_vel_w.timestamp < self._sim_timestamp: + # read the CoM velocity + vel = self.root_com_vel_w.clone() + # adjust linear velocity to link from center of mass + vel[:, :3] += torch.linalg.cross( + vel[:, 3:], math_utils.quat_apply(self.root_link_quat_w, -self.body_com_pos_b[:, 0]), dim=-1 + ) + # set the buffer data and timestamp + self._root_link_vel_w.data = vel + self._root_link_vel_w.timestamp = self._sim_timestamp + + return self._root_link_vel_w.data + + @property + def root_com_pose_w(self) -> torch.Tensor: + """Root center of mass pose ``[pos, quat]`` in simulation world frame. Shape is (num_instances, 7). + + This quantity is the pose of the center of mass frame of the root rigid body relative to the world. + The orientation is provided in (w, x, y, z) format. + """ + if self._root_com_pose_w.timestamp < self._sim_timestamp: + # apply local transform to center of mass frame + pos, quat = math_utils.combine_frame_transforms( + self.root_link_pos_w, self.root_link_quat_w, self.body_com_pos_b[:, 0], self.body_com_quat_b[:, 0] + ) + # set the buffer data and timestamp + self._root_com_pose_w.data = torch.cat((pos, quat), dim=-1) + self._root_com_pose_w.timestamp = self._sim_timestamp + + return self._root_com_pose_w.data + + @property + def root_com_vel_w(self) -> torch.Tensor: + """Root center of mass velocity ``[lin_vel, ang_vel]`` in simulation world frame. Shape is (num_instances, 6). + + This quantity contains the linear and angular velocities of the root rigid body's center of mass frame + relative to the world. + """ + if self._root_com_vel_w.timestamp < self._sim_timestamp: + self._root_com_vel_w.data = self._root_view.get_velocities() + self._root_com_vel_w.timestamp = self._sim_timestamp + + return self._root_com_vel_w.data + + @property + def root_state_w(self) -> torch.Tensor: + """Root state ``[pos, quat, lin_vel, ang_vel]`` in simulation world frame. Shape is (num_instances, 13). + + The position and orientation are of the rigid body's actor frame. Meanwhile, the linear and angular + velocities are of the rigid body's center of mass frame. + """ + if self._root_state_w.timestamp < self._sim_timestamp: + self._root_state_w.data = torch.cat((self.root_link_pose_w, self.root_com_vel_w), dim=-1) + self._root_state_w.timestamp = self._sim_timestamp + + return self._root_state_w.data + + @property + def root_link_state_w(self) -> torch.Tensor: + """Root state ``[pos, quat, lin_vel, ang_vel]`` in simulation world frame. Shape is (num_instances, 13). + + The position, quaternion, and linear/angular velocity are of the rigid body root frame relative to the + world. The orientation is provided in (w, x, y, z) format. + """ + if self._root_link_state_w.timestamp < self._sim_timestamp: + self._root_link_state_w.data = torch.cat((self.root_link_pose_w, self.root_link_vel_w), dim=-1) + self._root_link_state_w.timestamp = self._sim_timestamp + + return self._root_link_state_w.data + + @property + def root_com_state_w(self) -> torch.Tensor: + """Root center of mass state ``[pos, quat, lin_vel, ang_vel]`` in simulation world frame. + Shape is (num_instances, 13). + + The position, quaternion, and linear/angular velocity are of the rigid body's center of mass frame + relative to the world. Center of mass frame is the orientation principle axes of inertia. + """ + if self._root_com_state_w.timestamp < self._sim_timestamp: + self._root_com_state_w.data = torch.cat((self.root_com_pose_w, self.root_com_vel_w), dim=-1) + self._root_com_state_w.timestamp = self._sim_timestamp + + return self._root_com_state_w.data + + """ + Body state properties. + """ + + @property + def body_mass(self) -> torch.Tensor: + """Mass of all bodies in the simulation world frame. Shape is (num_instances, 1, 1).""" + return self._body_mass + + @property + def body_inertia(self) -> torch.Tensor: + """Inertia of all bodies in the simulation world frame. Shape is (num_instances, 1, 3, 3).""" + return self._body_inertia + + @property + def body_link_pose_w(self) -> torch.Tensor: + """Body link pose ``[pos, quat]`` in simulation world frame. Shape is (num_instances, 1, 7). + + This quantity is the pose of the actor frame of the rigid body relative to the world. + The orientation is provided in (w, x, y, z) format. + """ + return self.root_link_pose_w.view(-1, 1, 7) + + @property + def body_link_vel_w(self) -> torch.Tensor: + """Body link velocity ``[lin_vel, ang_vel]`` in simulation world frame. Shape is (num_instances, 1, 6). + + This quantity contains the linear and angular velocities of the actor frame of the root + rigid body relative to the world. + """ + return self.root_link_vel_w.view(-1, 1, 6) + + @property + def body_com_pose_w(self) -> torch.Tensor: + """Body center of mass pose ``[pos, quat]`` in simulation world frame. Shape is (num_instances, 1, 7). + + This quantity is the pose of the center of mass frame of the rigid body relative to the world. + The orientation is provided in (w, x, y, z) format. + """ + return self.root_com_pose_w.view(-1, 1, 7) + + @property + def body_com_vel_w(self) -> torch.Tensor: + """Body center of mass velocity ``[lin_vel, ang_vel]`` in simulation world frame. + Shape is (num_instances, 1, 6). + + This quantity contains the linear and angular velocities of the root rigid body's center of mass frame + relative to the world. + """ + return self.root_com_vel_w.view(-1, 1, 6) + + @property + def body_state_w(self) -> torch.Tensor: + """State of all bodies `[pos, quat, lin_vel, ang_vel]` in simulation world frame. + Shape is (num_instances, 1, 13). + + The position and orientation are of the rigid bodies' actor frame. Meanwhile, the linear and angular + velocities are of the rigid bodies' center of mass frame. + """ + return self.root_state_w.view(-1, 1, 13) + + @property + def body_link_state_w(self) -> torch.Tensor: + """State of all bodies ``[pos, quat, lin_vel, ang_vel]`` in simulation world frame. + Shape is (num_instances, 1, 13). + + The position, quaternion, and linear/angular velocity are of the body's link frame relative to the world. + The orientation is provided in (w, x, y, z) format. + """ + return self.root_link_state_w.view(-1, 1, 13) + + @property + def body_com_state_w(self) -> torch.Tensor: + """State of all bodies ``[pos, quat, lin_vel, ang_vel]`` in simulation world frame. + Shape is (num_instances, num_bodies, 13). + + The position, quaternion, and linear/angular velocity are of the body's center of mass frame relative to the + world. Center of mass frame is assumed to be the same orientation as the link rather than the orientation of the + principle inertia. The orientation is provided in (w, x, y, z) format. + """ + return self.root_com_state_w.view(-1, 1, 13) + + @property + def body_com_acc_w(self) -> torch.Tensor: + """Acceleration of all bodies ``[lin_acc, ang_acc]`` in the simulation world frame. + Shape is (num_instances, 1, 6). + + This quantity is the acceleration of the rigid bodies' center of mass frame relative to the world. + """ + if self._body_com_acc_w.timestamp < self._sim_timestamp: + self._body_com_acc_w.data = self._root_view.get_accelerations().unsqueeze(1) + self._body_com_acc_w.timestamp = self._sim_timestamp + + return self._body_com_acc_w.data + + @property + def body_com_pose_b(self) -> torch.Tensor: + """Center of mass pose ``[pos, quat]`` of all bodies in their respective body's link frames. + Shape is (num_instances, 1, 7). + + This quantity is the pose of the center of mass frame of the rigid body relative to the body's link frame. + The orientation is provided in (w, x, y, z) format. + """ + if self._body_com_pose_b.timestamp < self._sim_timestamp: + # read data from simulation + pose = self._root_view.get_coms().to(self.device) + pose[:, 3:7] = math_utils.convert_quat(pose[:, 3:7], to="wxyz") + # set the buffer data and timestamp + self._body_com_pose_b.data = pose.view(-1, 1, 7) + self._body_com_pose_b.timestamp = self._sim_timestamp + + return self._body_com_pose_b.data + + """ + Derived Properties. + """ + + @property + def projected_gravity_b(self) -> torch.Tensor: + """Projection of the gravity direction on base frame. Shape is (num_instances, 3).""" + return math_utils.quat_apply_inverse(self.root_link_quat_w, self.GRAVITY_VEC_W) + + @property + def heading_w(self) -> torch.Tensor: + """Yaw heading of the base frame (in radians). Shape is (num_instances,). + + .. note:: + This quantity is computed by assuming that the forward-direction of the base + frame is along x-direction, i.e. :math:`(1, 0, 0)`. + """ + forward_w = math_utils.quat_apply(self.root_link_quat_w, self.FORWARD_VEC_B) + return torch.atan2(forward_w[:, 1], forward_w[:, 0]) + + @property + def root_link_lin_vel_b(self) -> torch.Tensor: + """Root link linear velocity in base frame. Shape is (num_instances, 3). + + This quantity is the linear velocity of the actor frame of the root rigid body frame with respect to the + rigid body's actor frame. + """ + return math_utils.quat_apply_inverse(self.root_link_quat_w, self.root_link_lin_vel_w) + + @property + def root_link_ang_vel_b(self) -> torch.Tensor: + """Root link angular velocity in base world frame. Shape is (num_instances, 3). + + This quantity is the angular velocity of the actor frame of the root rigid body frame with respect to the + rigid body's actor frame. + """ + return math_utils.quat_apply_inverse(self.root_link_quat_w, self.root_link_ang_vel_w) + + @property + def root_com_lin_vel_b(self) -> torch.Tensor: + """Root center of mass linear velocity in base frame. Shape is (num_instances, 3). + + This quantity is the linear velocity of the root rigid body's center of mass frame with respect to the + rigid body's actor frame. + """ + return math_utils.quat_apply_inverse(self.root_link_quat_w, self.root_com_lin_vel_w) + + @property + def root_com_ang_vel_b(self) -> torch.Tensor: + """Root center of mass angular velocity in base world frame. Shape is (num_instances, 3). + + This quantity is the angular velocity of the root rigid body's center of mass frame with respect to the + rigid body's actor frame. + """ + return math_utils.quat_apply_inverse(self.root_link_quat_w, self.root_com_ang_vel_w) + + """ + Sliced properties. + """ + + @property + def root_link_pos_w(self) -> torch.Tensor: + """Root link position in simulation world frame. Shape is (num_instances, 3). + + This quantity is the position of the actor frame of the root rigid body relative to the world. + """ + return self.root_link_pose_w[:, :3] + + @property + def root_link_quat_w(self) -> torch.Tensor: + """Root link orientation (w, x, y, z) in simulation world frame. Shape is (num_instances, 4). + + This quantity is the orientation of the actor frame of the root rigid body. + """ + return self.root_link_pose_w[:, 3:7] + + @property + def root_link_lin_vel_w(self) -> torch.Tensor: + """Root linear velocity in simulation world frame. Shape is (num_instances, 3). + + This quantity is the linear velocity of the root rigid body's actor frame relative to the world. + """ + return self.root_link_vel_w[:, :3] + + @property + def root_link_ang_vel_w(self) -> torch.Tensor: + """Root link angular velocity in simulation world frame. Shape is (num_instances, 3). + + This quantity is the angular velocity of the actor frame of the root rigid body relative to the world. + """ + return self.root_link_vel_w[:, 3:6] + + @property + def root_com_pos_w(self) -> torch.Tensor: + """Root center of mass position in simulation world frame. Shape is (num_instances, 3). + + This quantity is the position of the actor frame of the root rigid body relative to the world. + """ + return self.root_com_pose_w[:, :3] + + @property + def root_com_quat_w(self) -> torch.Tensor: + """Root center of mass orientation (w, x, y, z) in simulation world frame. Shape is (num_instances, 4). + + This quantity is the orientation of the actor frame of the root rigid body relative to the world. + """ + return self.root_com_pose_w[:, 3:7] + + @property + def root_com_lin_vel_w(self) -> torch.Tensor: + """Root center of mass linear velocity in simulation world frame. Shape is (num_instances, 3). + + This quantity is the linear velocity of the root rigid body's center of mass frame relative to the world. + """ + return self.root_com_vel_w[:, :3] + + @property + def root_com_ang_vel_w(self) -> torch.Tensor: + """Root center of mass angular velocity in simulation world frame. Shape is (num_instances, 3). + + This quantity is the angular velocity of the root rigid body's center of mass frame relative to the world. + """ + return self.root_com_vel_w[:, 3:6] + + @property + def body_link_pos_w(self) -> torch.Tensor: + """Positions of all bodies in simulation world frame. Shape is (num_instances, 1, 3). + + This quantity is the position of the rigid bodies' actor frame relative to the world. + """ + return self.body_link_pose_w[..., :3] + + @property + def body_link_quat_w(self) -> torch.Tensor: + """Orientation (w, x, y, z) of all bodies in simulation world frame. Shape is (num_instances, 1, 4). + + This quantity is the orientation of the rigid bodies' actor frame relative to the world. + """ + return self.body_link_pose_w[..., 3:7] + + @property + def body_link_lin_vel_w(self) -> torch.Tensor: + """Linear velocity of all bodies in simulation world frame. Shape is (num_instances, 1, 3). + + This quantity is the linear velocity of the rigid bodies' center of mass frame relative to the world. + """ + return self.body_link_vel_w[..., :3] + + @property + def body_link_ang_vel_w(self) -> torch.Tensor: + """Angular velocity of all bodies in simulation world frame. Shape is (num_instances, 1, 3). + + This quantity is the angular velocity of the rigid bodies' center of mass frame relative to the world. + """ + return self.body_link_vel_w[..., 3:6] + + @property + def body_com_pos_w(self) -> torch.Tensor: + """Positions of all bodies in simulation world frame. Shape is (num_instances, 1, 3). + + This quantity is the position of the rigid bodies' actor frame. + """ + return self.body_com_pose_w[..., :3] + + @property + def body_com_quat_w(self) -> torch.Tensor: + """Orientation (w, x, y, z) of the principle axis of inertia of all bodies in simulation world frame. + + Shape is (num_instances, 1, 4). This quantity is the orientation of the rigid bodies' actor frame. + """ + return self.body_com_pose_w[..., 3:7] + + @property + def body_com_lin_vel_w(self) -> torch.Tensor: + """Linear velocity of all bodies in simulation world frame. Shape is (num_instances, 1, 3). + + This quantity is the linear velocity of the rigid bodies' center of mass frame. + """ + return self.body_com_vel_w[..., :3] + + @property + def body_com_ang_vel_w(self) -> torch.Tensor: + """Angular velocity of all bodies in simulation world frame. Shape is (num_instances, 1, 3). + + This quantity is the angular velocity of the rigid bodies' center of mass frame. + """ + return self.body_com_vel_w[..., 3:6] + + @property + def body_com_lin_acc_w(self) -> torch.Tensor: + """Linear acceleration of all bodies in simulation world frame. Shape is (num_instances, 1, 3). + + This quantity is the linear acceleration of the rigid bodies' center of mass frame. + """ + return self.body_com_acc_w[..., :3] + + @property + def body_com_ang_acc_w(self) -> torch.Tensor: + """Angular acceleration of all bodies in simulation world frame. Shape is (num_instances, 1, 3). + + This quantity is the angular acceleration of the rigid bodies' center of mass frame. + """ + return self.body_com_acc_w[..., 3:6] + + @property + def body_com_pos_b(self) -> torch.Tensor: + """Center of mass position of all of the bodies in their respective link frames. + Shape is (num_instances, 1, 3). + + This quantity is the center of mass location relative to its body'slink frame. + """ + return self.body_com_pose_b[..., :3] + + @property + def body_com_quat_b(self) -> torch.Tensor: + """Orientation (w, x, y, z) of the principle axis of inertia of all of the bodies in their + respective link frames. Shape is (num_instances, 1, 4). + + This quantity is the orientation of the principles axes of inertia relative to its body's link frame. + """ + return self.body_com_pose_b[..., 3:7] + + def _create_buffers(self) -> None: + # Initialize the lazy buffers. + # -- link frame w.r.t. world frame + self._root_link_pose_w = TimestampedBuffer() + self._root_link_vel_w = TimestampedBuffer() + # -- com frame w.r.t. link frame + self._body_com_pose_b = TimestampedBuffer() + # -- com frame w.r.t. world frame + self._root_com_pose_w = TimestampedBuffer() + self._root_com_vel_w = TimestampedBuffer() + self._body_com_acc_w = TimestampedBuffer() + # -- combined state (these are cached as they concatenate) + self._root_state_w = TimestampedBuffer() + self._root_link_state_w = TimestampedBuffer() + self._root_com_state_w = TimestampedBuffer() + + # -- Default state + self._default_root_pose = torch.zeros(self._root_view.count, 7, device=self.device) + self._default_root_vel = torch.zeros(self._root_view.count, 6, device=self.device) + + # -- Body properties + self._body_mass = self._root_view.get_masses().to(self.device).clone() + self._body_inertia = self._root_view.get_inertias().to(self.device).clone() + + """ + Backwards compatibility. (Deprecated properties) + """ + + @property + def root_pose_w(self) -> torch.Tensor: + """Deprecated property. Please use :attr:`root_link_pose_w` instead.""" + logger.warning( + "The `root_pose_w` property will be deprecated in a future release. Please use `root_link_pose_w` instead." + ) + return self.root_link_pose_w + + @property + def root_pos_w(self) -> torch.Tensor: + """Deprecated property. Please use :attr:`root_link_pos_w` instead.""" + logger.warning( + "The `root_pos_w` property will be deprecated in a future release. Please use `root_link_pos_w` instead." + ) + return self.root_link_pos_w + + @property + def root_quat_w(self) -> torch.Tensor: + """Deprecated property. Please use :attr:`root_link_quat_w` instead.""" + logger.warning( + "The `root_quat_w` property will be deprecated in a future release. Please use `root_link_quat_w` instead." + ) + return self.root_link_quat_w + + @property + def root_vel_w(self) -> torch.Tensor: + """Deprecated property. Please use :attr:`root_com_vel_w` instead.""" + logger.warning( + "The `root_vel_w` property will be deprecated in a future release. Please use `root_com_vel_w` instead." + ) + return self.root_com_vel_w + + @property + def root_lin_vel_w(self) -> torch.Tensor: + """Deprecated property. Please use :attr:`root_com_lin_vel_w` instead.""" + logger.warning( + "The `root_lin_vel_w` property will be deprecated in a future release. Please use" + " `root_com_lin_vel_w` instead." + ) + return self.root_com_lin_vel_w + + @property + def root_ang_vel_w(self) -> torch.Tensor: + """Deprecated property. Please use :attr:`root_com_ang_vel_w` instead.""" + logger.warning( + "The `root_ang_vel_w` property will be deprecated in a future release. Please use" + " `root_com_ang_vel_w` instead." + ) + return self.root_com_ang_vel_w + + @property + def root_lin_vel_b(self) -> torch.Tensor: + """Deprecated property. Please use :attr:`root_com_lin_vel_b` instead.""" + logger.warning( + "The `root_lin_vel_b` property will be deprecated in a future release. Please use" + " `root_com_lin_vel_b` instead." + ) + return self.root_com_lin_vel_b + + @property + def root_ang_vel_b(self) -> torch.Tensor: + """Deprecated property. Please use :attr:`root_com_ang_vel_b` instead.""" + logger.warning( + "The `root_ang_vel_b` property will be deprecated in a future release. Please use" + " `root_com_ang_vel_b` instead." + ) + return self.root_com_ang_vel_b + + @property + def body_pose_w(self) -> torch.Tensor: + """Deprecated property. Please use :attr:`body_link_pose_w` instead.""" + logger.warning( + "The `body_pose_w` property will be deprecated in a future release. Please use `body_link_pose_w` instead." + ) + return self.body_link_pose_w + + @property + def body_pos_w(self) -> torch.Tensor: + """Deprecated property. Please use :attr:`body_link_pos_w` instead.""" + logger.warning( + "The `body_pos_w` property will be deprecated in a future release. Please use `body_link_pos_w` instead." + ) + return self.body_link_pos_w + + @property + def body_quat_w(self) -> torch.Tensor: + """Deprecated property. Please use :attr:`body_link_quat_w` instead.""" + logger.warning( + "The `body_quat_w` property will be deprecated in a future release. Please use `body_link_quat_w` instead." + ) + return self.body_link_quat_w + + @property + def body_vel_w(self) -> torch.Tensor: + """Deprecated property. Please use :attr:`body_com_vel_w` instead.""" + logger.warning( + "The `body_vel_w` property will be deprecated in a future release. Please use `body_com_vel_w` instead." + ) + return self.body_com_vel_w + + @property + def body_lin_vel_w(self) -> torch.Tensor: + """Deprecated property. Please use :attr:`body_com_lin_vel_w` instead.""" + logger.warning( + "The `body_lin_vel_w` property will be deprecated in a future release. Please use" + " `body_com_lin_vel_w` instead." + ) + return self.body_com_lin_vel_w + + @property + def body_ang_vel_w(self) -> torch.Tensor: + """Deprecated property. Please use :attr:`body_com_ang_vel_w` instead.""" + logger.warning( + "The `body_ang_vel_w` property will be deprecated in a future release. Please use" + " `body_com_ang_vel_w` instead." + ) + return self.body_com_ang_vel_w + + @property + def body_acc_w(self) -> torch.Tensor: + """Deprecated property. Please use :attr:`body_com_acc_w` instead.""" + logger.warning( + "The `body_acc_w` property will be deprecated in a future release. Please use `body_com_acc_w` instead." + ) + return self.body_com_acc_w + + @property + def body_lin_acc_w(self) -> torch.Tensor: + """Deprecated property. Please use :attr:`body_com_lin_acc_w` instead.""" + logger.warning( + "The `body_lin_acc_w` property will be deprecated in a future release. Please use" + " `body_com_lin_acc_w` instead." + ) + return self.body_com_lin_acc_w + + @property + def body_ang_acc_w(self) -> torch.Tensor: + """Deprecated property. Please use :attr:`body_com_ang_acc_w` instead.""" + logger.warning( + "The `body_ang_acc_w` property will be deprecated in a future release. Please use" + " `body_com_ang_acc_w` instead." + ) + return self.body_com_ang_acc_w + + @property + def com_pos_b(self) -> torch.Tensor: + """Deprecated property. Please use :attr:`body_com_pos_b` instead.""" + logger.warning( + "The `com_pos_b` property will be deprecated in a future release. Please use `body_com_pos_b` instead." + ) + return self.body_com_pos_b + + @property + def com_quat_b(self) -> torch.Tensor: + """Deprecated property. Please use :attr:`body_com_quat_b` instead.""" + logger.warning( + "The `com_quat_b` property will be deprecated in a future release. Please use `body_com_quat_b` instead." + ) + return self.body_com_quat_b + + """ + Removed - Default values are no longer stored. + """ + + @property + def default_mass(self) -> torch.Tensor: + """Removed: Default mass is no longer stored.""" + raise RuntimeError( + "The property 'default_mass' has been removed. Default values are no longer stored. " + "Please use 'body_mass' to get the current mass values from the simulation." + ) + + @property + def default_inertia(self) -> torch.Tensor: + """Removed: Default inertia is no longer stored.""" + raise RuntimeError( + "The property 'default_inertia' has been removed. Default values are no longer stored. " + "Please use 'body_inertia' to get the current inertia values from the simulation." + ) \ No newline at end of file diff --git a/source/isaaclab_physx/isaaclab_physx/assets/rigid_object/rigid_object_data_old.py b/source/isaaclab_physx/isaaclab_physx/assets/rigid_object/rigid_object_data_old.py new file mode 100644 index 00000000000..d1a94f1eac7 --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/assets/rigid_object/rigid_object_data_old.py @@ -0,0 +1,657 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +import weakref + +import torch + +import omni.physics.tensors.impl.api as physx + +import isaaclab.utils.math as math_utils +from isaaclab.sim.utils.stage import get_current_stage_id +from isaaclab.utils.buffers import TimestampedBuffer + + +class RigidObjectData: + """Data container for a rigid object. + + This class contains the data for a rigid object in the simulation. The data includes the state of + the root rigid body and the state of all the bodies in the object. The data is stored in the simulation + world frame unless otherwise specified. + + For a rigid body, there are two frames of reference that are used: + + - Actor frame: The frame of reference of the rigid body prim. This typically corresponds to the Xform prim + with the rigid body schema. + - Center of mass frame: The frame of reference of the center of mass of the rigid body. + + Depending on the settings of the simulation, the actor frame and the center of mass frame may be the same. + This needs to be taken into account when interpreting the data. + + The data is lazily updated, meaning that the data is only updated when it is accessed. This is useful + when the data is expensive to compute or retrieve. The data is updated when the timestamp of the buffer + is older than the current simulation timestamp. The timestamp is updated whenever the data is updated. + """ + + def __init__(self, root_physx_view: physx.RigidBodyView, device: str): + """Initializes the rigid object data. + + Args: + root_physx_view: The root rigid body view. + device: The device used for processing. + """ + # Set the parameters + self.device = device + # Set the root rigid body view + # note: this is stored as a weak reference to avoid circular references between the asset class + # and the data container. This is important to avoid memory leaks. + self._root_physx_view: physx.RigidBodyView = weakref.proxy(root_physx_view) + + # Set initial time stamp + self._sim_timestamp = 0.0 + + # Obtain global physics sim view + stage_id = get_current_stage_id() + physics_sim_view = physx.create_simulation_view("torch", stage_id) + physics_sim_view.set_subspace_roots("/") + gravity = physics_sim_view.get_gravity() + # Convert to direction vector + gravity_dir = torch.tensor((gravity[0], gravity[1], gravity[2]), device=self.device) + gravity_dir = math_utils.normalize(gravity_dir.unsqueeze(0)).squeeze(0) + + # Initialize constants + self.GRAVITY_VEC_W = gravity_dir.repeat(self._root_physx_view.count, 1) + self.FORWARD_VEC_B = torch.tensor((1.0, 0.0, 0.0), device=self.device).repeat(self._root_physx_view.count, 1) + + # Initialize the lazy buffers. + # -- link frame w.r.t. world frame + self._root_link_pose_w = TimestampedBuffer() + self._root_link_vel_w = TimestampedBuffer() + # -- com frame w.r.t. link frame + self._body_com_pose_b = TimestampedBuffer() + # -- com frame w.r.t. world frame + self._root_com_pose_w = TimestampedBuffer() + self._root_com_vel_w = TimestampedBuffer() + self._body_com_acc_w = TimestampedBuffer() + # -- combined state (these are cached as they concatenate) + self._root_state_w = TimestampedBuffer() + self._root_link_state_w = TimestampedBuffer() + self._root_com_state_w = TimestampedBuffer() + + def update(self, dt: float): + """Updates the data for the rigid object. + + Args: + dt: The time step for the update. This must be a positive value. + """ + # update the simulation timestamp + self._sim_timestamp += dt + + ## + # Names. + ## + + body_names: list[str] = None + """Body names in the order parsed by the simulation view.""" + + ## + # Defaults. + ## + + default_root_state: torch.Tensor = None + """Default root state ``[pos, quat, lin_vel, ang_vel]`` in local environment frame. Shape is (num_instances, 13). + + The position and quaternion are of the rigid body's actor frame. Meanwhile, the linear and angular velocities are + of the center of mass frame. + """ + + default_mass: torch.Tensor = None + """Default mass read from the simulation. Shape is (num_instances, 1).""" + + default_inertia: torch.Tensor = None + """Default inertia tensor read from the simulation. Shape is (num_instances, 9). + + The inertia tensor should be given with respect to the center of mass, expressed in the rigid body's actor frame. + The values are stored in the order :math:`[I_{xx}, I_{yx}, I_{zx}, I_{xy}, I_{yy}, I_{zy}, I_{xz}, I_{yz}, I_{zz}]`. + However, due to the symmetry of inertia tensors, row- and column-major orders are equivalent. + + This quantity is parsed from the USD schema at the time of initialization. + """ + + ## + # Root state properties. + ## + + @property + def root_link_pose_w(self) -> torch.Tensor: + """Root link pose ``[pos, quat]`` in simulation world frame. Shape is (num_instances, 7). + + This quantity is the pose of the actor frame of the root rigid body relative to the world. + The orientation is provided in (w, x, y, z) format. + """ + if self._root_link_pose_w.timestamp < self._sim_timestamp: + # read data from simulation + pose = self._root_physx_view.get_transforms().clone() + pose[:, 3:7] = math_utils.convert_quat(pose[:, 3:7], to="wxyz") + # set the buffer data and timestamp + self._root_link_pose_w.data = pose + self._root_link_pose_w.timestamp = self._sim_timestamp + + return self._root_link_pose_w.data + + @property + def root_link_vel_w(self) -> torch.Tensor: + """Root link velocity ``[lin_vel, ang_vel]`` in simulation world frame. Shape is (num_instances, 6). + + This quantity contains the linear and angular velocities of the actor frame of the root + rigid body relative to the world. + """ + if self._root_link_vel_w.timestamp < self._sim_timestamp: + # read the CoM velocity + vel = self.root_com_vel_w.clone() + # adjust linear velocity to link from center of mass + vel[:, :3] += torch.linalg.cross( + vel[:, 3:], math_utils.quat_apply(self.root_link_quat_w, -self.body_com_pos_b[:, 0]), dim=-1 + ) + # set the buffer data and timestamp + self._root_link_vel_w.data = vel + self._root_link_vel_w.timestamp = self._sim_timestamp + + return self._root_link_vel_w.data + + @property + def root_com_pose_w(self) -> torch.Tensor: + """Root center of mass pose ``[pos, quat]`` in simulation world frame. Shape is (num_instances, 7). + + This quantity is the pose of the center of mass frame of the root rigid body relative to the world. + The orientation is provided in (w, x, y, z) format. + """ + if self._root_com_pose_w.timestamp < self._sim_timestamp: + # apply local transform to center of mass frame + pos, quat = math_utils.combine_frame_transforms( + self.root_link_pos_w, self.root_link_quat_w, self.body_com_pos_b[:, 0], self.body_com_quat_b[:, 0] + ) + # set the buffer data and timestamp + self._root_com_pose_w.data = torch.cat((pos, quat), dim=-1) + self._root_com_pose_w.timestamp = self._sim_timestamp + + return self._root_com_pose_w.data + + @property + def root_com_vel_w(self) -> torch.Tensor: + """Root center of mass velocity ``[lin_vel, ang_vel]`` in simulation world frame. Shape is (num_instances, 6). + + This quantity contains the linear and angular velocities of the root rigid body's center of mass frame + relative to the world. + """ + if self._root_com_vel_w.timestamp < self._sim_timestamp: + self._root_com_vel_w.data = self._root_physx_view.get_velocities() + self._root_com_vel_w.timestamp = self._sim_timestamp + + return self._root_com_vel_w.data + + @property + def root_state_w(self) -> torch.Tensor: + """Root state ``[pos, quat, lin_vel, ang_vel]`` in simulation world frame. Shape is (num_instances, 13). + + The position and orientation are of the rigid body's actor frame. Meanwhile, the linear and angular + velocities are of the rigid body's center of mass frame. + """ + if self._root_state_w.timestamp < self._sim_timestamp: + self._root_state_w.data = torch.cat((self.root_link_pose_w, self.root_com_vel_w), dim=-1) + self._root_state_w.timestamp = self._sim_timestamp + + return self._root_state_w.data + + @property + def root_link_state_w(self) -> torch.Tensor: + """Root state ``[pos, quat, lin_vel, ang_vel]`` in simulation world frame. Shape is (num_instances, 13). + + The position, quaternion, and linear/angular velocity are of the rigid body root frame relative to the + world. The orientation is provided in (w, x, y, z) format. + """ + if self._root_link_state_w.timestamp < self._sim_timestamp: + self._root_link_state_w.data = torch.cat((self.root_link_pose_w, self.root_link_vel_w), dim=-1) + self._root_link_state_w.timestamp = self._sim_timestamp + + return self._root_link_state_w.data + + @property + def root_com_state_w(self) -> torch.Tensor: + """Root center of mass state ``[pos, quat, lin_vel, ang_vel]`` in simulation world frame. + Shape is (num_instances, 13). + + The position, quaternion, and linear/angular velocity are of the rigid body's center of mass frame + relative to the world. Center of mass frame is the orientation principle axes of inertia. + """ + if self._root_com_state_w.timestamp < self._sim_timestamp: + self._root_com_state_w.data = torch.cat((self.root_com_pose_w, self.root_com_vel_w), dim=-1) + self._root_com_state_w.timestamp = self._sim_timestamp + + return self._root_com_state_w.data + + ## + # Body state properties. + ## + + @property + def body_link_pose_w(self) -> torch.Tensor: + """Body link pose ``[pos, quat]`` in simulation world frame. Shape is (num_instances, 1, 7). + + This quantity is the pose of the actor frame of the rigid body relative to the world. + The orientation is provided in (w, x, y, z) format. + """ + return self.root_link_pose_w.view(-1, 1, 7) + + @property + def body_link_vel_w(self) -> torch.Tensor: + """Body link velocity ``[lin_vel, ang_vel]`` in simulation world frame. Shape is (num_instances, 1, 6). + + This quantity contains the linear and angular velocities of the actor frame of the root + rigid body relative to the world. + """ + return self.root_link_vel_w.view(-1, 1, 6) + + @property + def body_com_pose_w(self) -> torch.Tensor: + """Body center of mass pose ``[pos, quat]`` in simulation world frame. Shape is (num_instances, 1, 7). + + This quantity is the pose of the center of mass frame of the rigid body relative to the world. + The orientation is provided in (w, x, y, z) format. + """ + return self.root_com_pose_w.view(-1, 1, 7) + + @property + def body_com_vel_w(self) -> torch.Tensor: + """Body center of mass velocity ``[lin_vel, ang_vel]`` in simulation world frame. + Shape is (num_instances, 1, 6). + + This quantity contains the linear and angular velocities of the root rigid body's center of mass frame + relative to the world. + """ + return self.root_com_vel_w.view(-1, 1, 6) + + @property + def body_state_w(self) -> torch.Tensor: + """State of all bodies `[pos, quat, lin_vel, ang_vel]` in simulation world frame. + Shape is (num_instances, 1, 13). + + The position and orientation are of the rigid bodies' actor frame. Meanwhile, the linear and angular + velocities are of the rigid bodies' center of mass frame. + """ + return self.root_state_w.view(-1, 1, 13) + + @property + def body_link_state_w(self) -> torch.Tensor: + """State of all bodies ``[pos, quat, lin_vel, ang_vel]`` in simulation world frame. + Shape is (num_instances, 1, 13). + + The position, quaternion, and linear/angular velocity are of the body's link frame relative to the world. + The orientation is provided in (w, x, y, z) format. + """ + return self.root_link_state_w.view(-1, 1, 13) + + @property + def body_com_state_w(self) -> torch.Tensor: + """State of all bodies ``[pos, quat, lin_vel, ang_vel]`` in simulation world frame. + Shape is (num_instances, num_bodies, 13). + + The position, quaternion, and linear/angular velocity are of the body's center of mass frame relative to the + world. Center of mass frame is assumed to be the same orientation as the link rather than the orientation of the + principle inertia. The orientation is provided in (w, x, y, z) format. + """ + return self.root_com_state_w.view(-1, 1, 13) + + @property + def body_com_acc_w(self) -> torch.Tensor: + """Acceleration of all bodies ``[lin_acc, ang_acc]`` in the simulation world frame. + Shape is (num_instances, 1, 6). + + This quantity is the acceleration of the rigid bodies' center of mass frame relative to the world. + """ + if self._body_com_acc_w.timestamp < self._sim_timestamp: + self._body_com_acc_w.data = self._root_physx_view.get_accelerations().unsqueeze(1) + self._body_com_acc_w.timestamp = self._sim_timestamp + + return self._body_com_acc_w.data + + @property + def body_com_pose_b(self) -> torch.Tensor: + """Center of mass pose ``[pos, quat]`` of all bodies in their respective body's link frames. + Shape is (num_instances, 1, 7). + + This quantity is the pose of the center of mass frame of the rigid body relative to the body's link frame. + The orientation is provided in (w, x, y, z) format. + """ + if self._body_com_pose_b.timestamp < self._sim_timestamp: + # read data from simulation + pose = self._root_physx_view.get_coms().to(self.device) + pose[:, 3:7] = math_utils.convert_quat(pose[:, 3:7], to="wxyz") + # set the buffer data and timestamp + self._body_com_pose_b.data = pose.view(-1, 1, 7) + self._body_com_pose_b.timestamp = self._sim_timestamp + + return self._body_com_pose_b.data + + ## + # Derived Properties. + ## + + @property + def projected_gravity_b(self) -> torch.Tensor: + """Projection of the gravity direction on base frame. Shape is (num_instances, 3).""" + return math_utils.quat_apply_inverse(self.root_link_quat_w, self.GRAVITY_VEC_W) + + @property + def heading_w(self) -> torch.Tensor: + """Yaw heading of the base frame (in radians). Shape is (num_instances,). + + Note: + This quantity is computed by assuming that the forward-direction of the base + frame is along x-direction, i.e. :math:`(1, 0, 0)`. + """ + forward_w = math_utils.quat_apply(self.root_link_quat_w, self.FORWARD_VEC_B) + return torch.atan2(forward_w[:, 1], forward_w[:, 0]) + + @property + def root_link_lin_vel_b(self) -> torch.Tensor: + """Root link linear velocity in base frame. Shape is (num_instances, 3). + + This quantity is the linear velocity of the actor frame of the root rigid body frame with respect to the + rigid body's actor frame. + """ + return math_utils.quat_apply_inverse(self.root_link_quat_w, self.root_link_lin_vel_w) + + @property + def root_link_ang_vel_b(self) -> torch.Tensor: + """Root link angular velocity in base world frame. Shape is (num_instances, 3). + + This quantity is the angular velocity of the actor frame of the root rigid body frame with respect to the + rigid body's actor frame. + """ + return math_utils.quat_apply_inverse(self.root_link_quat_w, self.root_link_ang_vel_w) + + @property + def root_com_lin_vel_b(self) -> torch.Tensor: + """Root center of mass linear velocity in base frame. Shape is (num_instances, 3). + + This quantity is the linear velocity of the root rigid body's center of mass frame with respect to the + rigid body's actor frame. + """ + return math_utils.quat_apply_inverse(self.root_link_quat_w, self.root_com_lin_vel_w) + + @property + def root_com_ang_vel_b(self) -> torch.Tensor: + """Root center of mass angular velocity in base world frame. Shape is (num_instances, 3). + + This quantity is the angular velocity of the root rigid body's center of mass frame with respect to the + rigid body's actor frame. + """ + return math_utils.quat_apply_inverse(self.root_link_quat_w, self.root_com_ang_vel_w) + + ## + # Sliced properties. + ## + + @property + def root_link_pos_w(self) -> torch.Tensor: + """Root link position in simulation world frame. Shape is (num_instances, 3). + + This quantity is the position of the actor frame of the root rigid body relative to the world. + """ + return self.root_link_pose_w[:, :3] + + @property + def root_link_quat_w(self) -> torch.Tensor: + """Root link orientation (w, x, y, z) in simulation world frame. Shape is (num_instances, 4). + + This quantity is the orientation of the actor frame of the root rigid body. + """ + return self.root_link_pose_w[:, 3:7] + + @property + def root_link_lin_vel_w(self) -> torch.Tensor: + """Root linear velocity in simulation world frame. Shape is (num_instances, 3). + + This quantity is the linear velocity of the root rigid body's actor frame relative to the world. + """ + return self.root_link_vel_w[:, :3] + + @property + def root_link_ang_vel_w(self) -> torch.Tensor: + """Root link angular velocity in simulation world frame. Shape is (num_instances, 3). + + This quantity is the angular velocity of the actor frame of the root rigid body relative to the world. + """ + return self.root_link_vel_w[:, 3:6] + + @property + def root_com_pos_w(self) -> torch.Tensor: + """Root center of mass position in simulation world frame. Shape is (num_instances, 3). + + This quantity is the position of the actor frame of the root rigid body relative to the world. + """ + return self.root_com_pose_w[:, :3] + + @property + def root_com_quat_w(self) -> torch.Tensor: + """Root center of mass orientation (w, x, y, z) in simulation world frame. Shape is (num_instances, 4). + + This quantity is the orientation of the actor frame of the root rigid body relative to the world. + """ + return self.root_com_pose_w[:, 3:7] + + @property + def root_com_lin_vel_w(self) -> torch.Tensor: + """Root center of mass linear velocity in simulation world frame. Shape is (num_instances, 3). + + This quantity is the linear velocity of the root rigid body's center of mass frame relative to the world. + """ + return self.root_com_vel_w[:, :3] + + @property + def root_com_ang_vel_w(self) -> torch.Tensor: + """Root center of mass angular velocity in simulation world frame. Shape is (num_instances, 3). + + This quantity is the angular velocity of the root rigid body's center of mass frame relative to the world. + """ + return self.root_com_vel_w[:, 3:6] + + @property + def body_link_pos_w(self) -> torch.Tensor: + """Positions of all bodies in simulation world frame. Shape is (num_instances, 1, 3). + + This quantity is the position of the rigid bodies' actor frame relative to the world. + """ + return self.body_link_pose_w[..., :3] + + @property + def body_link_quat_w(self) -> torch.Tensor: + """Orientation (w, x, y, z) of all bodies in simulation world frame. Shape is (num_instances, 1, 4). + + This quantity is the orientation of the rigid bodies' actor frame relative to the world. + """ + return self.body_link_pose_w[..., 3:7] + + @property + def body_link_lin_vel_w(self) -> torch.Tensor: + """Linear velocity of all bodies in simulation world frame. Shape is (num_instances, 1, 3). + + This quantity is the linear velocity of the rigid bodies' center of mass frame relative to the world. + """ + return self.body_link_vel_w[..., :3] + + @property + def body_link_ang_vel_w(self) -> torch.Tensor: + """Angular velocity of all bodies in simulation world frame. Shape is (num_instances, 1, 3). + + This quantity is the angular velocity of the rigid bodies' center of mass frame relative to the world. + """ + return self.body_link_vel_w[..., 3:6] + + @property + def body_com_pos_w(self) -> torch.Tensor: + """Positions of all bodies in simulation world frame. Shape is (num_instances, 1, 3). + + This quantity is the position of the rigid bodies' actor frame. + """ + return self.body_com_pose_w[..., :3] + + @property + def body_com_quat_w(self) -> torch.Tensor: + """Orientation (w, x, y, z) of the principle axis of inertia of all bodies in simulation world frame. + + Shape is (num_instances, 1, 4). This quantity is the orientation of the rigid bodies' actor frame. + """ + return self.body_com_pose_w[..., 3:7] + + @property + def body_com_lin_vel_w(self) -> torch.Tensor: + """Linear velocity of all bodies in simulation world frame. Shape is (num_instances, 1, 3). + + This quantity is the linear velocity of the rigid bodies' center of mass frame. + """ + return self.body_com_vel_w[..., :3] + + @property + def body_com_ang_vel_w(self) -> torch.Tensor: + """Angular velocity of all bodies in simulation world frame. Shape is (num_instances, 1, 3). + + This quantity is the angular velocity of the rigid bodies' center of mass frame. + """ + return self.body_com_vel_w[..., 3:6] + + @property + def body_com_lin_acc_w(self) -> torch.Tensor: + """Linear acceleration of all bodies in simulation world frame. Shape is (num_instances, 1, 3). + + This quantity is the linear acceleration of the rigid bodies' center of mass frame. + """ + return self.body_com_acc_w[..., :3] + + @property + def body_com_ang_acc_w(self) -> torch.Tensor: + """Angular acceleration of all bodies in simulation world frame. Shape is (num_instances, 1, 3). + + This quantity is the angular acceleration of the rigid bodies' center of mass frame. + """ + return self.body_com_acc_w[..., 3:6] + + @property + def body_com_pos_b(self) -> torch.Tensor: + """Center of mass position of all of the bodies in their respective link frames. + Shape is (num_instances, 1, 3). + + This quantity is the center of mass location relative to its body'slink frame. + """ + return self.body_com_pose_b[..., :3] + + @property + def body_com_quat_b(self) -> torch.Tensor: + """Orientation (w, x, y, z) of the principle axis of inertia of all of the bodies in their + respective link frames. Shape is (num_instances, 1, 4). + + This quantity is the orientation of the principles axes of inertia relative to its body's link frame. + """ + return self.body_com_pose_b[..., 3:7] + + ## + # Properties for backwards compatibility. + ## + + @property + def root_pose_w(self) -> torch.Tensor: + """Same as :attr:`root_link_pose_w`.""" + return self.root_link_pose_w + + @property + def root_pos_w(self) -> torch.Tensor: + """Same as :attr:`root_link_pos_w`.""" + return self.root_link_pos_w + + @property + def root_quat_w(self) -> torch.Tensor: + """Same as :attr:`root_link_quat_w`.""" + return self.root_link_quat_w + + @property + def root_vel_w(self) -> torch.Tensor: + """Same as :attr:`root_com_vel_w`.""" + return self.root_com_vel_w + + @property + def root_lin_vel_w(self) -> torch.Tensor: + """Same as :attr:`root_com_lin_vel_w`.""" + return self.root_com_lin_vel_w + + @property + def root_ang_vel_w(self) -> torch.Tensor: + """Same as :attr:`root_com_ang_vel_w`.""" + return self.root_com_ang_vel_w + + @property + def root_lin_vel_b(self) -> torch.Tensor: + """Same as :attr:`root_com_lin_vel_b`.""" + return self.root_com_lin_vel_b + + @property + def root_ang_vel_b(self) -> torch.Tensor: + """Same as :attr:`root_com_ang_vel_b`.""" + return self.root_com_ang_vel_b + + @property + def body_pose_w(self) -> torch.Tensor: + """Same as :attr:`body_link_pose_w`.""" + return self.body_link_pose_w + + @property + def body_pos_w(self) -> torch.Tensor: + """Same as :attr:`body_link_pos_w`.""" + return self.body_link_pos_w + + @property + def body_quat_w(self) -> torch.Tensor: + """Same as :attr:`body_link_quat_w`.""" + return self.body_link_quat_w + + @property + def body_vel_w(self) -> torch.Tensor: + """Same as :attr:`body_com_vel_w`.""" + return self.body_com_vel_w + + @property + def body_lin_vel_w(self) -> torch.Tensor: + """Same as :attr:`body_com_lin_vel_w`.""" + return self.body_com_lin_vel_w + + @property + def body_ang_vel_w(self) -> torch.Tensor: + """Same as :attr:`body_com_ang_vel_w`.""" + return self.body_com_ang_vel_w + + @property + def body_acc_w(self) -> torch.Tensor: + """Same as :attr:`body_com_acc_w`.""" + return self.body_com_acc_w + + @property + def body_lin_acc_w(self) -> torch.Tensor: + """Same as :attr:`body_com_lin_acc_w`.""" + return self.body_com_lin_acc_w + + @property + def body_ang_acc_w(self) -> torch.Tensor: + """Same as :attr:`body_com_ang_acc_w`.""" + return self.body_com_ang_acc_w + + @property + def com_pos_b(self) -> torch.Tensor: + """Same as :attr:`body_com_pos_b`.""" + return self.body_com_pos_b + + @property + def com_quat_b(self) -> torch.Tensor: + """Same as :attr:`body_com_quat_b`.""" + return self.body_com_quat_b diff --git a/source/isaaclab_physx/isaaclab_physx/assets/rigid_object/rigid_object_old.py b/source/isaaclab_physx/isaaclab_physx/assets/rigid_object/rigid_object_old.py new file mode 100644 index 00000000000..6d0ec98f431 --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/assets/rigid_object/rigid_object_old.py @@ -0,0 +1,582 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from __future__ import annotations + +import logging +from collections.abc import Sequence +from typing import TYPE_CHECKING + +import torch +import warp as wp + +import omni.physics.tensors.impl.api as physx +from isaacsim.core.simulation_manager import SimulationManager +from pxr import UsdPhysics + +import isaaclab.sim as sim_utils +import isaaclab.utils.math as math_utils +import isaaclab.utils.string as string_utils +from isaaclab.utils.wrench_composer import WrenchComposer + +from ..asset_base import AssetBase +from .rigid_object_data import RigidObjectData + +if TYPE_CHECKING: + from .rigid_object_cfg import RigidObjectCfg + +# import logger +logger = logging.getLogger(__name__) + + +class RigidObject(AssetBase): + """A rigid object asset class. + + Rigid objects are assets comprising of rigid bodies. They can be used to represent dynamic objects + such as boxes, spheres, etc. A rigid body is described by its pose, velocity and mass distribution. + + For an asset to be considered a rigid object, the root prim of the asset must have the `USD RigidBodyAPI`_ + applied to it. This API is used to define the simulation properties of the rigid body. On playing the + simulation, the physics engine will automatically register the rigid body and create a corresponding + rigid body handle. This handle can be accessed using the :attr:`root_physx_view` attribute. + + .. note:: + + For users familiar with Isaac Sim, the PhysX view class API is not the exactly same as Isaac Sim view + class API. Similar to Isaac Lab, Isaac Sim wraps around the PhysX view API. However, as of now (2023.1 release), + we see a large difference in initializing the view classes in Isaac Sim. This is because the view classes + in Isaac Sim perform additional USD-related operations which are slow and also not required. + + .. _`USD RigidBodyAPI`: https://openusd.org/dev/api/class_usd_physics_rigid_body_a_p_i.html + """ + + cfg: RigidObjectCfg + """Configuration instance for the rigid object.""" + + def __init__(self, cfg: RigidObjectCfg): + """Initialize the rigid object. + + Args: + cfg: A configuration instance. + """ + super().__init__(cfg) + + """ + Properties + """ + + @property + def data(self) -> RigidObjectData: + return self._data + + @property + def num_instances(self) -> int: + return self.root_physx_view.count + + @property + def num_bodies(self) -> int: + """Number of bodies in the asset. + + This is always 1 since each object is a single rigid body. + """ + return 1 + + @property + def body_names(self) -> list[str]: + """Ordered names of bodies in the rigid object.""" + prim_paths = self.root_physx_view.prim_paths[: self.num_bodies] + return [path.split("/")[-1] for path in prim_paths] + + @property + def root_physx_view(self) -> physx.RigidBodyView: + """Rigid body view for the asset (PhysX). + + Note: + Use this view with caution. It requires handling of tensors in a specific way. + """ + return self._root_physx_view + + @property + def instantaneous_wrench_composer(self) -> WrenchComposer: + """Instantaneous wrench composer. + + Returns a :class:`~isaaclab.utils.wrench_composer.WrenchComposer` instance. Wrenches added or set to this wrench + composer are only valid for the current simulation step. At the end of the simulation step, the wrenches set + to this object are discarded. This is useful to apply forces that change all the time, things like drag forces + for instance. + """ + return self._instantaneous_wrench_composer + + @property + def permanent_wrench_composer(self) -> WrenchComposer: + """Permanent wrench composer. + + Returns a :class:`~isaaclab.utils.wrench_composer.WrenchComposer` instance. Wrenches added or set to this wrench + composer are persistent and are applied to the simulation at every step. This is useful to apply forces that + are constant over a period of time, things like the thrust of a motor for instance. + """ + return self._permanent_wrench_composer + + """ + Operations. + """ + + def reset(self, env_ids: Sequence[int] | None = None): + # resolve all indices + if env_ids is None: + env_ids = slice(None) + # reset external wrench + self._instantaneous_wrench_composer.reset(env_ids) + self._permanent_wrench_composer.reset(env_ids) + + def write_data_to_sim(self): + """Write external wrench to the simulation. + + Note: + We write external wrench to the simulation here since this function is called before the simulation step. + This ensures that the external wrench is applied at every simulation step. + """ + # write external wrench + if self._instantaneous_wrench_composer.active or self._permanent_wrench_composer.active: + if self._instantaneous_wrench_composer.active: + # Compose instantaneous wrench with permanent wrench + self._instantaneous_wrench_composer.add_forces_and_torques( + forces=self._permanent_wrench_composer.composed_force, + torques=self._permanent_wrench_composer.composed_torque, + body_ids=self._ALL_BODY_INDICES_WP, + env_ids=self._ALL_INDICES_WP, + ) + # Apply both instantaneous and permanent wrench to the simulation + self.root_physx_view.apply_forces_and_torques_at_position( + force_data=self._instantaneous_wrench_composer.composed_force_as_torch.view(-1, 3), + torque_data=self._instantaneous_wrench_composer.composed_torque_as_torch.view(-1, 3), + position_data=None, + indices=self._ALL_INDICES, + is_global=False, + ) + else: + # Apply permanent wrench to the simulation + self.root_physx_view.apply_forces_and_torques_at_position( + force_data=self._permanent_wrench_composer.composed_force_as_torch.view(-1, 3), + torque_data=self._permanent_wrench_composer.composed_torque_as_torch.view(-1, 3), + position_data=None, + indices=self._ALL_INDICES, + is_global=False, + ) + self._instantaneous_wrench_composer.reset() + + def update(self, dt: float): + self._data.update(dt) + + """ + Operations - Finders. + """ + + def find_bodies(self, name_keys: str | Sequence[str], preserve_order: bool = False) -> tuple[list[int], list[str]]: + """Find bodies in the rigid body based on the name keys. + + Please check the :meth:`isaaclab.utils.string_utils.resolve_matching_names` function for more + information on the name matching. + + Args: + name_keys: A regular expression or a list of regular expressions to match the body names. + preserve_order: Whether to preserve the order of the name keys in the output. Defaults to False. + + Returns: + A tuple of lists containing the body indices and names. + """ + return string_utils.resolve_matching_names(name_keys, self.body_names, preserve_order) + + """ + Operations - Write to simulation. + """ + + def write_root_state_to_sim(self, root_state: torch.Tensor, env_ids: Sequence[int] | None = None): + """Set the root state over selected environment indices into the simulation. + + The root state comprises of the cartesian position, quaternion orientation in (w, x, y, z), and linear + and angular velocity. All the quantities are in the simulation frame. + + Args: + root_state: Root state in simulation frame. Shape is (len(env_ids), 13). + env_ids: Environment indices. If None, then all indices are used. + """ + self.write_root_link_pose_to_sim(root_state[:, :7], env_ids=env_ids) + self.write_root_com_velocity_to_sim(root_state[:, 7:], env_ids=env_ids) + + def write_root_com_state_to_sim(self, root_state: torch.Tensor, env_ids: Sequence[int] | None = None): + """Set the root center of mass state over selected environment indices into the simulation. + + The root state comprises of the cartesian position, quaternion orientation in (w, x, y, z), and linear + and angular velocity. All the quantities are in the simulation frame. + + Args: + root_state: Root state in simulation frame. Shape is (len(env_ids), 13). + env_ids: Environment indices. If None, then all indices are used. + """ + self.write_root_com_pose_to_sim(root_state[:, :7], env_ids=env_ids) + self.write_root_com_velocity_to_sim(root_state[:, 7:], env_ids=env_ids) + + def write_root_link_state_to_sim(self, root_state: torch.Tensor, env_ids: Sequence[int] | None = None): + """Set the root link state over selected environment indices into the simulation. + + The root state comprises of the cartesian position, quaternion orientation in (w, x, y, z), and linear + and angular velocity. All the quantities are in the simulation frame. + + Args: + root_state: Root state in simulation frame. Shape is (len(env_ids), 13). + env_ids: Environment indices. If None, then all indices are used. + """ + self.write_root_link_pose_to_sim(root_state[:, :7], env_ids=env_ids) + self.write_root_link_velocity_to_sim(root_state[:, 7:], env_ids=env_ids) + + def write_root_pose_to_sim(self, root_pose: torch.Tensor, env_ids: Sequence[int] | None = None): + """Set the root pose over selected environment indices into the simulation. + + The root pose comprises of the cartesian position and quaternion orientation in (w, x, y, z). + + Args: + root_pose: Root link poses in simulation frame. Shape is (len(env_ids), 7). + env_ids: Environment indices. If None, then all indices are used. + """ + self.write_root_link_pose_to_sim(root_pose, env_ids=env_ids) + + def write_root_link_pose_to_sim(self, root_pose: torch.Tensor, env_ids: Sequence[int] | None = None): + """Set the root link pose over selected environment indices into the simulation. + + The root pose comprises of the cartesian position and quaternion orientation in (w, x, y, z). + + Args: + root_pose: Root link poses in simulation frame. Shape is (len(env_ids), 7). + env_ids: Environment indices. If None, then all indices are used. + """ + # resolve all indices + physx_env_ids = env_ids + if env_ids is None: + env_ids = slice(None) + physx_env_ids = self._ALL_INDICES + + # note: we need to do this here since tensors are not set into simulation until step. + # set into internal buffers + self._data.root_link_pose_w[env_ids] = root_pose.clone() + # update these buffers only if the user is using them. Otherwise this adds to overhead. + if self._data._root_link_state_w.data is not None: + self._data.root_link_state_w[env_ids, :7] = self._data.root_link_pose_w[env_ids] + if self._data._root_state_w.data is not None: + self._data.root_state_w[env_ids, :7] = self._data.root_link_pose_w[env_ids] + if self._data._root_com_state_w.data is not None: + expected_com_pos, expected_com_quat = math_utils.combine_frame_transforms( + self._data.root_link_pose_w[env_ids, :3], + self._data.root_link_pose_w[env_ids, 3:7], + self.data.body_com_pos_b[env_ids, 0, :], + self.data.body_com_quat_b[env_ids, 0, :], + ) + self._data.root_com_state_w[env_ids, :3] = expected_com_pos + self._data.root_com_state_w[env_ids, 3:7] = expected_com_quat + # convert root quaternion from wxyz to xyzw + root_poses_xyzw = self._data.root_link_pose_w.clone() + root_poses_xyzw[:, 3:] = math_utils.convert_quat(root_poses_xyzw[:, 3:], to="xyzw") + # set into simulation + self.root_physx_view.set_transforms(root_poses_xyzw, indices=physx_env_ids) + + def write_root_com_pose_to_sim(self, root_pose: torch.Tensor, env_ids: Sequence[int] | None = None): + """Set the root center of mass pose over selected environment indices into the simulation. + + The root pose comprises of the cartesian position and quaternion orientation in (w, x, y, z). + The orientation is the orientation of the principle axes of inertia. + + Args: + root_pose: Root center of mass poses in simulation frame. Shape is (len(env_ids), 7). + env_ids: Environment indices. If None, then all indices are used. + """ + # resolve all indices + if env_ids is None: + local_env_ids = slice(env_ids) + else: + local_env_ids = env_ids + + # set into internal buffers + self._data.root_com_pose_w[local_env_ids] = root_pose.clone() + # update these buffers only if the user is using them. Otherwise this adds to overhead. + if self._data._root_com_state_w.data is not None: + self._data.root_com_state_w[local_env_ids, :7] = self._data.root_com_pose_w[local_env_ids] + + # get CoM pose in link frame + com_pos_b = self.data.body_com_pos_b[local_env_ids, 0, :] + com_quat_b = self.data.body_com_quat_b[local_env_ids, 0, :] + # transform input CoM pose to link frame + root_link_pos, root_link_quat = math_utils.combine_frame_transforms( + root_pose[..., :3], + root_pose[..., 3:7], + math_utils.quat_apply(math_utils.quat_inv(com_quat_b), -com_pos_b), + math_utils.quat_inv(com_quat_b), + ) + root_link_pose = torch.cat((root_link_pos, root_link_quat), dim=-1) + + # write transformed pose in link frame to sim + self.write_root_link_pose_to_sim(root_link_pose, env_ids=env_ids) + + def write_root_velocity_to_sim(self, root_velocity: torch.Tensor, env_ids: Sequence[int] | None = None): + """Set the root center of mass velocity over selected environment indices into the simulation. + + The velocity comprises linear velocity (x, y, z) and angular velocity (x, y, z) in that order. + NOTE: This sets the velocity of the root's center of mass rather than the roots frame. + + Args: + root_velocity: Root center of mass velocities in simulation world frame. Shape is (len(env_ids), 6). + env_ids: Environment indices. If None, then all indices are used. + """ + self.write_root_com_velocity_to_sim(root_velocity=root_velocity, env_ids=env_ids) + + def write_root_com_velocity_to_sim(self, root_velocity: torch.Tensor, env_ids: Sequence[int] | None = None): + """Set the root center of mass velocity over selected environment indices into the simulation. + + The velocity comprises linear velocity (x, y, z) and angular velocity (x, y, z) in that order. + NOTE: This sets the velocity of the root's center of mass rather than the roots frame. + + Args: + root_velocity: Root center of mass velocities in simulation world frame. Shape is (len(env_ids), 6). + env_ids: Environment indices. If None, then all indices are used. + """ + # resolve all indices + physx_env_ids = env_ids + if env_ids is None: + env_ids = slice(None) + physx_env_ids = self._ALL_INDICES + + # note: we need to do this here since tensors are not set into simulation until step. + # set into internal buffers + self._data.root_com_vel_w[env_ids] = root_velocity.clone() + # update these buffers only if the user is using them. Otherwise this adds to overhead. + if self._data._root_com_state_w.data is not None: + self._data.root_com_state_w[env_ids, 7:] = self._data.root_com_vel_w[env_ids] + if self._data._root_state_w.data is not None: + self._data.root_state_w[env_ids, 7:] = self._data.root_com_vel_w[env_ids] + if self._data._root_link_state_w.data is not None: + self._data.root_link_state_w[env_ids, 7:] = self._data.root_com_vel_w[env_ids] + # make the acceleration zero to prevent reporting old values + self._data.body_com_acc_w[env_ids] = 0.0 + # set into simulation + self.root_physx_view.set_velocities(self._data.root_com_vel_w, indices=physx_env_ids) + + def write_root_link_velocity_to_sim(self, root_velocity: torch.Tensor, env_ids: Sequence[int] | None = None): + """Set the root link velocity over selected environment indices into the simulation. + + The velocity comprises linear velocity (x, y, z) and angular velocity (x, y, z) in that order. + NOTE: This sets the velocity of the root's frame rather than the roots center of mass. + + Args: + root_velocity: Root frame velocities in simulation world frame. Shape is (len(env_ids), 6). + env_ids: Environment indices. If None, then all indices are used. + """ + # resolve all indices + if env_ids is None: + local_env_ids = slice(env_ids) + else: + local_env_ids = env_ids + + # set into internal buffers + self._data.root_link_vel_w[local_env_ids] = root_velocity.clone() + # update these buffers only if the user is using them. Otherwise this adds to overhead. + if self._data._root_link_state_w.data is not None: + self._data.root_link_state_w[local_env_ids, 7:] = self._data.root_link_vel_w[local_env_ids] + + # get CoM pose in link frame + quat = self.data.root_link_quat_w[local_env_ids] + com_pos_b = self.data.body_com_pos_b[local_env_ids, 0, :] + # transform input velocity to center of mass frame + root_com_velocity = root_velocity.clone() + root_com_velocity[:, :3] += torch.linalg.cross( + root_com_velocity[:, 3:], math_utils.quat_apply(quat, com_pos_b), dim=-1 + ) + + # write transformed velocity in CoM frame to sim + self.write_root_com_velocity_to_sim(root_com_velocity, env_ids=env_ids) + + """ + Operations - Setters. + """ + + def set_external_force_and_torque( + self, + forces: torch.Tensor, + torques: torch.Tensor, + positions: torch.Tensor | None = None, + body_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | None = None, + is_global: bool = False, + ): + """Set external force and torque to apply on the asset's bodies in their local frame. + + For many applications, we want to keep the applied external force on rigid bodies constant over a period of + time (for instance, during the policy control). This function allows us to store the external force and torque + into buffers which are then applied to the simulation at every step. Optionally, set the position to apply the + external wrench at (in the local link frame of the bodies). + + .. caution:: + If the function is called with empty forces and torques, then this function disables the application + of external wrench to the simulation. + + .. code-block:: python + + # example of disabling external wrench + asset.set_external_force_and_torque(forces=torch.zeros(0, 3), torques=torch.zeros(0, 3)) + + .. note:: + This function does not apply the external wrench to the simulation. It only fills the buffers with + the desired values. To apply the external wrench, call the :meth:`write_data_to_sim` function + right before the simulation step. + + Args: + forces: External forces in bodies' local frame. Shape is (len(env_ids), len(body_ids), 3). + torques: External torques in bodies' local frame. Shape is (len(env_ids), len(body_ids), 3). + positions: External wrench positions in bodies' local frame. Shape is (len(env_ids), len(body_ids), 3). + Defaults to None. + body_ids: Body indices to apply external wrench to. Defaults to None (all bodies). + env_ids: Environment indices to apply external wrench to. Defaults to None (all instances). + is_global: Whether to apply the external wrench in the global frame. Defaults to False. If set to False, + the external wrench is applied in the link frame of the bodies. + """ + logger.warning( + "The function 'set_external_force_and_torque' will be deprecated in a future release. Please" + " use 'permanent_wrench_composer.set_forces_and_torques' instead." + ) + if forces is None and torques is None: + logger.warning("No forces or torques provided. No permanent external wrench will be applied.") + + # resolve all indices + # -- env_ids + if env_ids is None: + env_ids = self._ALL_INDICES_WP + elif not isinstance(env_ids, torch.Tensor): + env_ids = wp.array(env_ids, dtype=wp.int32, device=self.device) + else: + env_ids = wp.from_torch(env_ids.to(torch.int32), dtype=wp.int32) + # -- body_ids + body_ids = self._ALL_BODY_INDICES_WP + + # Write to wrench composer + self._permanent_wrench_composer.set_forces_and_torques( + forces=wp.from_torch(forces, dtype=wp.vec3f) if forces is not None else None, + torques=wp.from_torch(torques, dtype=wp.vec3f) if torques is not None else None, + positions=wp.from_torch(positions, dtype=wp.vec3f) if positions is not None else None, + body_ids=body_ids, + env_ids=env_ids, + is_global=is_global, + ) + + """ + Internal helper. + """ + + def _initialize_impl(self): + # obtain global simulation view + self._physics_sim_view = SimulationManager.get_physics_sim_view() + # obtain the first prim in the regex expression (all others are assumed to be a copy of this) + template_prim = sim_utils.find_first_matching_prim(self.cfg.prim_path) + if template_prim is None: + raise RuntimeError(f"Failed to find prim for expression: '{self.cfg.prim_path}'.") + template_prim_path = template_prim.GetPath().pathString + + # find rigid root prims + root_prims = sim_utils.get_all_matching_child_prims( + template_prim_path, + predicate=lambda prim: prim.HasAPI(UsdPhysics.RigidBodyAPI), + traverse_instance_prims=False, + ) + if len(root_prims) == 0: + raise RuntimeError( + f"Failed to find a rigid body when resolving '{self.cfg.prim_path}'." + " Please ensure that the prim has 'USD RigidBodyAPI' applied." + ) + if len(root_prims) > 1: + raise RuntimeError( + f"Failed to find a single rigid body when resolving '{self.cfg.prim_path}'." + f" Found multiple '{root_prims}' under '{template_prim_path}'." + " Please ensure that there is only one rigid body in the prim path tree." + ) + + articulation_prims = sim_utils.get_all_matching_child_prims( + template_prim_path, + predicate=lambda prim: prim.HasAPI(UsdPhysics.ArticulationRootAPI), + traverse_instance_prims=False, + ) + if len(articulation_prims) != 0: + if articulation_prims[0].GetAttribute("physxArticulation:articulationEnabled").Get(): + raise RuntimeError( + f"Found an articulation root when resolving '{self.cfg.prim_path}' for rigid objects. These are" + f" located at: '{articulation_prims}' under '{template_prim_path}'. Please disable the articulation" + " root in the USD or from code by setting the parameter" + " 'ArticulationRootPropertiesCfg.articulation_enabled' to False in the spawn configuration." + ) + + # resolve root prim back into regex expression + root_prim_path = root_prims[0].GetPath().pathString + root_prim_path_expr = self.cfg.prim_path + root_prim_path[len(template_prim_path) :] + # -- object view + self._root_physx_view = self._physics_sim_view.create_rigid_body_view(root_prim_path_expr.replace(".*", "*")) + + # check if the rigid body was created + if self._root_physx_view._backend is None: + raise RuntimeError(f"Failed to create rigid body at: {self.cfg.prim_path}. Please check PhysX logs.") + + # log information about the rigid body + logger.info(f"Rigid body initialized at: {self.cfg.prim_path} with root '{root_prim_path_expr}'.") + logger.info(f"Number of instances: {self.num_instances}") + logger.info(f"Number of bodies: {self.num_bodies}") + logger.info(f"Body names: {self.body_names}") + + # container for data access + self._data = RigidObjectData(self.root_physx_view, self.device) + + # create buffers + self._create_buffers() + # process configuration + self._process_cfg() + # update the rigid body data + self.update(0.0) + + def _create_buffers(self): + """Create buffers for storing data.""" + # constants + self._ALL_INDICES = torch.arange(self.num_instances, dtype=torch.long, device=self.device) + self._ALL_INDICES_WP = wp.from_torch(self._ALL_INDICES.to(torch.int32), dtype=wp.int32) + self._ALL_BODY_INDICES_WP = wp.from_torch( + torch.arange(self.num_bodies, dtype=torch.int32, device=self.device), dtype=wp.int32 + ) + + # external wrench composer + self._instantaneous_wrench_composer = WrenchComposer(self) + self._permanent_wrench_composer = WrenchComposer(self) + + # set information about rigid body into data + self._data.body_names = self.body_names + self._data.default_mass = self.root_physx_view.get_masses().clone() + self._data.default_inertia = self.root_physx_view.get_inertias().clone() + + def _process_cfg(self): + """Post processing of configuration parameters.""" + # default state + # -- root state + # note: we cast to tuple to avoid torch/numpy type mismatch. + default_root_state = ( + tuple(self.cfg.init_state.pos) + + tuple(self.cfg.init_state.rot) + + tuple(self.cfg.init_state.lin_vel) + + tuple(self.cfg.init_state.ang_vel) + ) + default_root_state = torch.tensor(default_root_state, dtype=torch.float, device=self.device) + self._data.default_root_state = default_root_state.repeat(self.num_instances, 1) + + """ + Internal simulation callbacks. + """ + + def _invalidate_initialize_callback(self, event): + """Invalidates the scene elements.""" + # call parent + super()._invalidate_initialize_callback(event) + # set all existing views to None to invalidate them + self._root_physx_view = None diff --git a/source/isaaclab_physx/isaaclab_physx/assets/rigid_object_collection/__init__.py b/source/isaaclab_physx/isaaclab_physx/assets/rigid_object_collection/__init__.py new file mode 100644 index 00000000000..66e5c5aa594 --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/assets/rigid_object_collection/__init__.py @@ -0,0 +1,14 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Sub-module for rigid object collection.""" + +from .rigid_object_collection import RigidObjectCollection +from .rigid_object_collection_data import RigidObjectCollectionData + +__all__ = [ + "RigidObjectCollection", + "RigidObjectCollectionData", +] \ No newline at end of file diff --git a/source/isaaclab_physx/isaaclab_physx/assets/rigid_object_collection/rigid_object_collection.py b/source/isaaclab_physx/isaaclab_physx/assets/rigid_object_collection/rigid_object_collection.py new file mode 100644 index 00000000000..908ca382f23 --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/assets/rigid_object_collection/rigid_object_collection.py @@ -0,0 +1,845 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from __future__ import annotations + +import logging +import re +from collections.abc import Sequence +from typing import TYPE_CHECKING + +import torch +import warp as wp + +import omni.physics.tensors.impl.api as physx +from isaacsim.core.simulation_manager import SimulationManager +from pxr import UsdPhysics + +import isaaclab.sim as sim_utils +import isaaclab.utils.math as math_utils +import isaaclab.utils.string as string_utils +from isaaclab.utils.wrench_composer import WrenchComposer +from isaaclab.assets.rigid_object_collection.base_rigid_object_collection import BaseRigidObjectCollection + +from .rigid_object_collection_data import RigidObjectCollectionData + +if TYPE_CHECKING: + from isaaclab.assets.rigid_object_collection.rigid_object_collection_cfg import RigidObjectCollectionCfg + +# import logger +logger = logging.getLogger(__name__) + +class RigidObjectCollection(BaseRigidObjectCollection): + """A rigid object collection class. + + This class represents a collection of rigid objects in the simulation, where the state of the + rigid objects can be accessed and modified using a batched ``(env_ids, object_ids)`` API. + + For each rigid body in the collection, the root prim of the asset must have the `USD RigidBodyAPI`_ + applied to it. This API is used to define the simulation properties of the rigid bodies. On playing the + simulation, the physics engine will automatically register the rigid bodies and create a corresponding + rigid body handle. This handle can be accessed using the :attr:`root_physx_view` attribute. + + Rigid objects in the collection are uniquely identified via the key of the dictionary + :attr:`~isaaclab.assets.RigidObjectCollectionCfg.rigid_objects` in the + :class:`~isaaclab.assets.RigidObjectCollectionCfg` configuration class. + This differs from the :class:`~isaaclab.assets.RigidObject` class, where a rigid object is identified by + the name of the Xform where the `USD RigidBodyAPI`_ is applied. This would not be possible for the rigid + object collection since the :attr:`~isaaclab.assets.RigidObjectCollectionCfg.rigid_objects` dictionary + could contain the same rigid object multiple times, leading to ambiguity. + + .. _`USD RigidBodyAPI`: https://openusd.org/dev/api/class_usd_physics_rigid_body_a_p_i.html + """ + + cfg: RigidObjectCollectionCfg + """Configuration instance for the rigid object.""" + + __backend_name__: str = "physx" + """The name of the backend for the rigid object.""" + + def __init__(self, cfg: RigidObjectCollectionCfg): + """Initialize the rigid object. + + Args: + cfg: A configuration instance. + """ + super().__init__(cfg) + # Note: We never call the parent constructor as it tries to call its own spawning which we don't want. + # check that the config is valid + cfg.validate() + # store inputs + self.cfg = cfg.copy() + # flag for whether the asset is initialized + self._is_initialized = False + self._prim_paths = [] + # spawn the rigid objects + for rigid_body_cfg in self.cfg.rigid_objects.values(): + # check if the rigid object path is valid + # note: currently the spawner does not work if there is a regex pattern in the leaf + # For example, if the prim path is "/World/Object_[1,2]" since the spawner will not + # know which prim to spawn. This is a limitation of the spawner and not the asset. + asset_path = rigid_body_cfg.prim_path.split("/")[-1] + asset_path_is_regex = re.match(r"^[a-zA-Z0-9/_]+$", asset_path) is None + # spawn the asset + if rigid_body_cfg.spawn is not None and not asset_path_is_regex: + rigid_body_cfg.spawn.func( + rigid_body_cfg.prim_path, + rigid_body_cfg.spawn, + translation=rigid_body_cfg.init_state.pos, + orientation=rigid_body_cfg.init_state.rot, + ) + # check that spawn was successful + matching_prims = sim_utils.find_matching_prims(rigid_body_cfg.prim_path) + if len(matching_prims) == 0: + raise RuntimeError(f"Could not find prim with path {rigid_body_cfg.prim_path}.") + self._prim_paths.append(rigid_body_cfg.prim_path) + # stores object names + self._body_names_list = [] + + # register various callback functions + self._register_callbacks() + self._debug_vis_handle = None + + """ + Properties + """ + + @property + def data(self) -> RigidObjectCollectionData: + return self._data + + @property + def num_instances(self) -> int: + return self.root_view.count // self.num_bodies + + @property + def num_bodies(self) -> int: + """Number of bodies in the rigid object collection.""" + return len(self.body_names) + + @property + def body_names(self) -> list[str]: + """Ordered names of bodies in the rigid object collection.""" + return self._body_names_list + + @property + def root_view(self): + """Root view for the rigid object collection. + + Note: + Use this view with caution. It requires handling of tensors in a specific way. + """ + return self._root_view + + @property + def instantaneous_wrench_composer(self) -> WrenchComposer: + """Instantaneous wrench composer for the rigid object collection.""" + return self._instantaneous_wrench_composer + + @property + def permanent_wrench_composer(self) -> WrenchComposer: + """Permanent wrench composer for the rigid object collection.""" + return self._permanent_wrench_composer + + """ + Operations. + """ + + def reset(self, env_ids: Sequence[int] | None = None, object_ids: slice | torch.Tensor | None = None) -> None: + """Resets all internal buffers of selected environments and objects. + + Args: + env_ids: Environment indices. If None, then all indices are used. + object_ids: Object indices. If None, then all indices are used. + """ + # resolve all indices + if env_ids is None: + env_ids = self._ALL_ENV_INDICES + if object_ids is None: + object_ids = self._ALL_OBJ_INDICES + # reset external wrench + self._instantaneous_wrench_composer.reset(env_ids) + self._permanent_wrench_composer.reset(env_ids) + + def write_data_to_sim(self) -> None: + """Write external wrench to the simulation. + + Note: + We write external wrench to the simulation here since this function is called before the simulation step. + This ensures that the external wrench is applied at every simulation step. + """ + # write external wrench + if self._instantaneous_wrench_composer.active or self._permanent_wrench_composer.active: + if self._instantaneous_wrench_composer.active: + # Compose instantaneous wrench with permanent wrench + self._instantaneous_wrench_composer.add_forces_and_torques( + forces=self._permanent_wrench_composer.composed_force, + torques=self._permanent_wrench_composer.composed_torque, + body_ids=self._ALL_OBJ_INDICES_WP, + env_ids=self._ALL_ENV_INDICES_WP, + ) + # Apply both instantaneous and permanent wrench to the simulation + self.root_physx_view.apply_forces_and_torques_at_position( + force_data=self.reshape_data_to_view(self._instantaneous_wrench_composer.composed_force_as_torch), + torque_data=self.reshape_data_to_view(self._instantaneous_wrench_composer.composed_torque_as_torch), + position_data=None, + indices=self._env_body_ids_to_view_ids(self._ALL_ENV_INDICES, self._ALL_OBJ_INDICES), + is_global=False, + ) + else: + # Apply permanent wrench to the simulation + self.root_physx_view.apply_forces_and_torques_at_position( + force_data=self.reshape_data_to_view(self._permanent_wrench_composer.composed_force_as_torch), + torque_data=self.reshape_data_to_view(self._permanent_wrench_composer.composed_torque_as_torch), + position_data=None, + indices=self._env_body_ids_to_view_ids(self._ALL_ENV_INDICES, self._ALL_OBJ_INDICES), + is_global=False, + ) + self._instantaneous_wrench_composer.reset() + + def update(self, dt: float) -> None: + """Updates the simulation data. + + Args: + dt: The time step size in seconds. + """ + self.data.update(dt) + + """ + Operations - Finders. + """ + + def find_bodies( + self, name_keys: str | Sequence[str], preserve_order: bool = False + ) -> tuple[torch.Tensor, list[str], list[int]]: + """Find bodies in the rigid body collection based on the name keys. + + Please check the :meth:`isaaclab.utils.string_utils.resolve_matching_names` function for more + information on the name matching. + + Args: + name_keys: A regular expression or a list of regular expressions to match the body names. + preserve_order: Whether to preserve the order of the name keys in the output. Defaults to False. + + Returns: + A tuple of lists containing the body mask, names and indices. + """ + obj_ids, obj_names = string_utils.resolve_matching_names(name_keys, self.object_names, preserve_order) + return torch.tensor(obj_ids, device=self.device), obj_names + + """ + Operations - Write to simulation. + """ + + def write_body_state_to_sim( + self, + body_states: torch.Tensor, + env_ids: Sequence[int] | None = None, + body_ids: slice | torch.Tensor | None = None, + ) -> None: + """Set the bodies state over selected environment indices into the simulation. + + The body state comprises of the cartesian position, quaternion orientation in (x, y, z, w), and linear + and angular velocity. All the quantities are in the simulation frame. Shape is (len(env_ids), len(body_ids), 13). + + Args: + body_states: Body states in simulation frame. Shape is (len(env_ids), len(body_ids), 13). + env_ids: Environment indices. If None, then all indices are used. + body_ids: Body indices. If None, then all indices are used. + """ + self.write_body_link_pose_to_sim(body_states[..., :7], env_ids=env_ids, body_ids=body_ids) + self.write_body_com_velocity_to_sim(body_states[..., 7:], env_ids=env_ids, body_ids=body_ids) + + def write_body_com_state_to_sim( + self, + body_states: torch.Tensor, + env_ids: Sequence[int] | None = None, + body_ids: slice | torch.Tensor | None = None, + ) -> None: + """Set the body center of mass state over selected environment and body indices into the simulation. + + The body state comprises of the cartesian position, quaternion orientation in (x, y, z, w), and linear + and angular velocity. All the quantities are in the simulation frame. + + Args: + body_states: Body states in simulation frame. Shape is (len(env_ids), len(body_ids), 13). + env_ids: Environment indices. If None, then all indices are used. + body_ids: Body indices. If None, then all indices are used. + """ + self.write_body_com_pose_to_sim(body_states[..., :7], env_ids=env_ids, body_ids=body_ids) + self.write_body_com_velocity_to_sim(body_states[..., 7:], env_ids=env_ids, body_ids=body_ids) + + def write_body_link_state_to_sim( + self, + body_states: torch.Tensor, + env_ids: Sequence[int] | None = None, + body_ids: slice | torch.Tensor | None = None, + ) -> None: + """Set the body link state over selected environment and body indices into the simulation. + + The body state comprises of the cartesian position, quaternion orientation in (x, y, z, w), and linear + and angular velocity. All the quantities are in the simulation frame. + + Args: + body_states: Body states in simulation frame. Shape is (len(env_ids), len(body_ids), 13). + env_ids: Environment indices. If None, then all indices are used. + body_ids: Body indices. If None, then all indices are used. + """ + self.write_body_link_pose_to_sim(body_states[..., :7], env_ids=env_ids, body_ids=body_ids) + self.write_body_link_velocity_to_sim(body_states[..., 7:], env_ids=env_ids, body_ids=body_ids) + + def write_body_pose_to_sim( + self, + body_poses: torch.Tensor, + env_ids: Sequence[int] | None = None, + body_ids: slice | torch.Tensor | None = None, + ) -> None: + """Set the body poses over selected environment and body indices into the simulation. + + The body pose comprises of the cartesian position and quaternion orientation in (x, y, z, w). + + Args: + body_poses: Body poses in simulation frame. Shape is (len(env_ids), len(body_ids), 7). + env_ids: Environment indices. If None, then all indices are used. + body_ids: Body indices. If None, then all indices are used. + """ + self.write_body_link_pose_to_sim(body_poses, env_ids=env_ids, body_ids=body_ids) + + def write_body_link_pose_to_sim( + self, + body_poses: torch.Tensor, + env_ids: Sequence[int] | None = None, + body_ids: slice | torch.Tensor | None = None, + ) -> None: + """Set the body link pose over selected environment and body indices into the simulation. + + The body link pose comprises of the cartesian position and quaternion orientation in (x, y, z, w). + + Args: + body_poses: Body link poses in simulation frame. Shape is (len(env_ids), len(body_ids), 7). + env_ids: Environment indices. If None, then all indices are used. + body_ids: Body indices. If None, then all indices are used. + """ + # resolve all indices + # -- env_ids + if env_ids is None: + env_ids = self._ALL_ENV_INDICES + # -- body_ids + if body_ids is None: + body_ids = self._ALL_OBJ_INDICES + + # note: we need to do this here since tensors are not set into simulation until step. + # set into internal buffers + self.data.body_link_pose_w[env_ids[:, None], body_ids] = body_poses.clone() + # update these buffers only if the user is using them. Otherwise this adds to overhead. + if self.data._body_link_state_w.data is not None: + self.data.body_link_state_w[env_ids[:, None], body_ids, :7] = body_poses.clone() + if self.data._body_state_w.data is not None: + self.data.body_state_w[env_ids[:, None], body_ids, :7] = body_poses.clone() + if self.data._body_com_state_w.data is not None: + # get CoM pose in link frame + com_pos_b = self.data.body_com_pos_b[env_ids[:, None], body_ids] + com_quat_b = self.data.body_com_quat_b[env_ids[:, None], body_ids] + com_pos, com_quat = math_utils.combine_frame_transforms( + body_poses[..., :3], + body_poses[..., 3:7], + com_pos_b, + com_quat_b, + ) + self.data.body_com_state_w[env_ids[:, None], body_ids, :3] = com_pos + self.data.body_com_state_w[env_ids[:, None], body_ids, 3:7] = com_quat + + # convert the quaternion from wxyz to xyzw + poses_xyzw = self.data.body_link_pose_w.clone() + poses_xyzw[..., 3:] = math_utils.convert_quat(poses_xyzw[..., 3:], to="xyzw") + + # set into simulation + view_ids = self._env_body_ids_to_view_ids(env_ids, body_ids) + self.root_view.set_transforms(self.reshape_data_to_view(poses_xyzw), indices=view_ids) + + def write_body_com_pose_to_sim( + self, + body_poses: torch.Tensor, + env_ids: Sequence[int] | None = None, + body_ids: slice | torch.Tensor | None = None, + ) -> None: + """Set the body center of mass pose over selected environment and body indices into the simulation. + + The body center of mass pose comprises of the cartesian position and quaternion orientation in (x, y, z, w). + The orientation is the orientation of the principle axes of inertia. + + Args: + body_poses: Body center of mass poses in simulation frame. Shape is (len(env_ids), len(body_ids), 7). + env_ids: Environment indices. If None, then all indices are used. + body_ids: Body indices. If None, then all indices are used. + """ + # resolve all indices + # -- env_ids + if env_ids is None: + env_ids = self._ALL_ENV_INDICES + # -- body_ids + if body_ids is None: + body_ids = self._ALL_OBJ_INDICES + + # set into internal buffers + self.data.body_com_pose_w[env_ids[:, None], body_ids] = body_poses.clone() + # update these buffers only if the user is using them. Otherwise this adds to overhead. + if self.data._body_com_state_w.data is not None: + self.data.body_com_state_w[env_ids[:, None], body_ids, :7] = body_poses.clone() + + # get CoM pose in link frame + com_pos_b = self.data.body_com_pos_b[env_ids[:, None], body_ids] + com_quat_b = self.data.body_com_quat_b[env_ids[:, None], body_ids] + # transform input CoM pose to link frame + body_link_pos, body_link_quat = math_utils.combine_frame_transforms( + body_poses[..., :3], + body_poses[..., 3:7], + math_utils.quat_apply(math_utils.quat_inv(com_quat_b), -com_pos_b), + math_utils.quat_inv(com_quat_b), + ) + + # write transformed pose in link frame to sim + body_link_pose = torch.cat((body_link_pos, body_link_quat), dim=-1) + self.write_body_link_pose_to_sim(body_link_pose, env_ids=env_ids, body_ids=body_ids) + + def write_body_velocity_to_sim( + self, + body_velocities: torch.Tensor, + env_ids: Sequence[int] | None = None, + body_ids: slice | torch.Tensor | None = None, + ) -> None: + """Set the body velocity over selected environment and body indices into the simulation. + + The velocity comprises linear velocity (x, y, z) and angular velocity (x, y, z) in that order. + ..note:: This sets the velocity of the body's center of mass rather than the body's frame. + + Args: + body_velocities: Body velocities in simulation frame. Shape is (len(env_ids), len(body_ids), 6). + env_ids: Environment indices. If None, then all indices are used. + body_ids: Body indices. If None, then all indices are used. + """ + self.write_body_com_velocity_to_sim(body_velocities, env_ids=env_ids, body_ids=body_ids) + + def write_body_com_velocity_to_sim( + self, + body_velocities: torch.Tensor, + env_ids: Sequence[int] | None = None, + body_ids: slice | torch.Tensor | None = None, + ) -> None: + """Set the body center of mass velocity over selected environment and body indices into the simulation. + + The velocity comprises linear velocity (x, y, z) and angular velocity (x, y, z) in that order. + ..note:: This sets the velocity of the body's center of mass rather than the body's frame. + + Args: + body_velocities: Body center of mass velocities in simulation frame. Shape is (len(env_ids), len(body_ids), 6). + env_ids: Environment indices. If None, then all indices are used. + body_ids: Body indices. If None, then all indices are used. + """ + # resolve all indices + # -- env_ids + if env_ids is None: + env_ids = self._ALL_ENV_INDICES + # -- body_ids + if body_ids is None: + body_ids = self._ALL_OBJ_INDICES + + # note: we need to do this here since tensors are not set into simulation until step. + # set into internal buffers + self.data.body_com_vel_w[env_ids[:, None], body_ids] = body_velocities.clone() + # update these buffers only if the user is using them. Otherwise this adds to overhead. + if self.data._body_com_state_w.data is not None: + self.data.body_com_state_w[env_ids[:, None], body_ids, 7:] = body_velocities.clone() + if self.data._body_state_w.data is not None: + self.data.body_state_w[env_ids[:, None], body_ids, 7:] = body_velocities.clone() + if self.data._body_link_state_w.data is not None: + self.data.body_link_state_w[env_ids[:, None], body_ids, 7:] = body_velocities.clone() + # make the acceleration zero to prevent reporting old values + self.data.body_com_acc_w[env_ids[:, None], body_ids] = 0.0 + + # set into simulation + view_ids = self._env_body_ids_to_view_ids(env_ids, body_ids) + self.root_view.set_velocities(self.reshape_data_to_view(self.data.body_com_vel_w), indices=view_ids) + + def write_body_link_velocity_to_sim( + self, + body_velocities: torch.Tensor, + env_ids: Sequence[int] | None = None, + body_ids: slice | torch.Tensor | None = None, + ) -> None: + """Set the body link velocity over selected environment and body indices into the simulation. + + The velocity comprises linear velocity (x, y, z) and angular velocity (x, y, z) in that order. + ..note:: This sets the velocity of the body's frame rather than the body's center of mass. + + Args: + body_velocities: Body link velocities in simulation frame. Shape is (len(env_ids), len(body_ids), 6). + env_ids: Environment indices. If None, then all indices are used. + body_ids: Body indices. If None, then all indices are used. + """ + # resolve all indices + # -- env_ids + if env_ids is None: + env_ids = self._ALL_ENV_INDICES + # -- body_ids + if body_ids is None: + body_ids = self._ALL_OBJ_INDICES + + # set into internal buffers + self.data.body_link_vel_w[env_ids[:, None], body_ids] = body_velocities.clone() + # update these buffers only if the user is using them. Otherwise this adds to overhead. + if self.data._body_link_state_w.data is not None: + self.data.body_link_state_w[env_ids[:, None], body_ids, 7:] = body_velocities.clone() + + # get CoM pose in link frame + quat = self.data.body_link_quat_w[env_ids[:, None], body_ids] + com_pos_b = self.data.body_com_pos_b[env_ids[:, None], body_ids] + # transform input velocity to center of mass frame + body_com_velocity = body_velocities.clone() + body_com_velocity[..., :3] += torch.linalg.cross( + body_com_velocity[..., 3:], math_utils.quat_apply(quat, com_pos_b), dim=-1 + ) + + # write center of mass velocity to sim + self.write_body_com_velocity_to_sim(body_com_velocity, env_ids=env_ids, body_ids=body_ids) + + """ + Operations - Setters. + """ + + def set_masses( + self, + masses: torch.Tensor, + body_ids: Sequence[int] | None = None, + env_ids: Sequence[int] | None = None, + ): + """Set masses of all bodies. + + Args: + masses: Masses of all bodies. Shape is (num_instances, num_bodies). + body_ids: The body indices to set the masses for. Defaults to None (all bodies). + env_ids: The environment indices to set the masses for. Defaults to None (all environments). + """ + + def set_coms( + self, + coms: torch.Tensor, + body_ids: Sequence[int] | None = None, + env_ids: Sequence[int] | None = None, + ): + """Set center of mass positions of all bodies. + + Args: + coms: Center of mass positions of all bodies. Shape is (num_instances, num_bodies, 3). + body_ids: The body indices to set the center of mass positions for. Defaults to None (all bodies). + env_ids: The environment indices to set the center of mass positions for. Defaults to None (all environments). + """ + + def set_inertias( + self, + inertias: torch.Tensor, + body_ids: Sequence[int] | None = None, + env_ids: Sequence[int] | None = None, + ): + """Set inertias of all bodies. + + Args: + inertias: Inertias of all bodies. Shape is (num_instances, num_bodies, 3, 3). + body_ids: The body indices to set the inertias for. Defaults to None (all bodies). + env_ids: The environment indices to set the inertias for. Defaults to None (all environments). + """ + + def set_external_force_and_torque( + self, + forces: torch.Tensor, + torques: torch.Tensor, + positions: torch.Tensor | None = None, + body_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | None = None, + is_global: bool = False, + ) -> None: + """Set external force and torque to apply on the rigid object collection's bodies in their local frame. + + For many applications, we want to keep the applied external force on rigid bodies constant over a period of + time (for instance, during the policy control). This function allows us to store the external force and torque + into buffers which are then applied to the simulation at every step. Optionally, set the position to apply the + external wrench at (in the local link frame of the bodies). + + .. caution:: + If the function is called with empty forces and torques, then this function disables the application + of external wrench to the simulation. + + .. code-block:: python + + # example of disabling external wrench + asset.set_external_force_and_torque(forces=torch.zeros(0, 3), torques=torch.zeros(0, 3)) + + .. caution:: + If the function is called consecutively with and with different values for ``is_global``, then the + all the external wrenches will be applied in the frame specified by the last call. + + .. code-block:: python + + # example of setting external wrench in the global frame + asset.set_external_force_and_torque(forces=torch.ones(1, 1, 3), env_ids=[0], is_global=True) + # example of setting external wrench in the link frame + asset.set_external_force_and_torque(forces=torch.ones(1, 1, 3), env_ids=[1], is_global=False) + # Both environments will have the external wrenches applied in the link frame + + .. note:: + This function does not apply the external wrench to the simulation. It only fills the buffers with + the desired values. To apply the external wrench, call the :meth:`write_data_to_sim` function + right before the simulation step. + + Args: + forces: External forces in bodies' local frame. Shape is (len(env_ids), len(body_ids), 3). + torques: External torques in bodies' local frame. Shape is (len(env_ids), len(body_ids), 3). + positions: External wrench positions in bodies' local frame. Shape is (len(env_ids), len(body_ids), 3). Defaults to None. + body_ids: Body indices to apply external wrench to. Defaults to None (all bodies). + env_ids: Environment indices to apply external wrench to. Defaults to None (all instances). + is_global: Whether to apply the external wrench in the global frame. Defaults to False. If set to False, + the external wrench is applied in the link frame of the bodies. + """ + logger.warning( + "The function 'set_external_force_and_torque' will be deprecated in a future release. Please" + " use 'permanent_wrench_composer.set_forces_and_torques' instead." + ) + + if forces is None and torques is None: + logger.warning("No forces or torques provided. No permanent external wrench will be applied.") + + # resolve all indices + # -- env_ids + if env_ids is None: + env_ids = self._ALL_ENV_INDICES_WP + elif not isinstance(env_ids, torch.Tensor): + env_ids = wp.array(env_ids, dtype=wp.int32, device=self.device) + else: + env_ids = wp.from_torch(env_ids.to(torch.int32), dtype=wp.int32) + # -- body_ids + if body_ids is None: + body_ids = self._ALL_OBJ_INDICES_WP + elif isinstance(body_ids, slice): + body_ids = wp.from_torch( + torch.arange(self.num_bodies, dtype=torch.int32, device=self.device)[body_ids], dtype=wp.int32 + ) + elif not isinstance(object_ids, torch.Tensor): + body_ids = wp.array(body_ids, dtype=wp.int32, device=self.device) + else: + body_ids = wp.from_torch(body_ids.to(torch.int32), dtype=wp.int32) + + # Write to wrench composer + self._permanent_wrench_composer.set_forces_and_torques( + forces=wp.from_torch(forces, dtype=wp.vec3f) if forces is not None else None, + torques=wp.from_torch(torques, dtype=wp.vec3f) if torques is not None else None, + positions=wp.from_torch(positions, dtype=wp.vec3f) if positions is not None else None, + body_ids=body_ids, + env_ids=env_ids, + is_global=is_global, + ) + + """ + Helper functions. + """ + + def reshape_view_to_data(self, data: torch.Tensor) -> torch.Tensor: + """Reshapes and arranges the data coming from the :attr:`root_physx_view` to + (num_instances, num_objects, data_dim). + + Args: + data: The data coming from the :attr:`root_physx_view`. Shape is (num_instances * num_objects, data_dim). + + Returns: + The reshaped data. Shape is (num_instances, num_objects, data_dim). + """ + return torch.einsum("ijk -> jik", data.reshape(self.num_objects, self.num_instances, -1)) + + def reshape_data_to_view(self, data: torch.Tensor) -> torch.Tensor: + """Reshapes and arranges the data to the be consistent with data from the :attr:`root_physx_view`. + + Args: + data: The data to be reshaped. Shape is (num_instances, num_objects, data_dim). + + Returns: + The reshaped data. Shape is (num_instances * num_objects, data_dim). + """ + return torch.einsum("ijk -> jik", data).reshape(self.num_objects * self.num_instances, *data.shape[2:]) + + """ + Internal helper. + """ + + def _initialize_impl(self): + # clear body names list to prevent double counting on re-initialization + self._body_names_list.clear() + # obtain global simulation view + self._physics_sim_view = SimulationManager.get_physics_sim_view() + root_prim_path_exprs = [] + for name, rigid_body_cfg in self.cfg.rigid_object_cfg.items(): + # obtain the first prim in the regex expression (all others are assumed to be a copy of this) + template_prim = sim_utils.find_first_matching_prim(rigid_body_cfg.prim_path) + if template_prim is None: + raise RuntimeError(f"Failed to find prim for expression: '{rigid_body_cfg.prim_path}'.") + template_prim_path = template_prim.GetPath().pathString + + # find rigid root prims + root_prims = sim_utils.get_all_matching_child_prims( + template_prim_path, + predicate=lambda prim: prim.HasAPI(UsdPhysics.RigidBodyAPI), + traverse_instance_prims=False, + ) + if len(root_prims) == 0: + raise RuntimeError( + f"Failed to find a rigid body when resolving '{rigid_body_cfg.prim_path}'." + " Please ensure that the prim has 'USD RigidBodyAPI' applied." + ) + if len(root_prims) > 1: + raise RuntimeError( + f"Failed to find a single rigid body when resolving '{rigid_body_cfg.prim_path}'." + f" Found multiple '{root_prims}' under '{template_prim_path}'." + " Please ensure that there is only one rigid body in the prim path tree." + ) + + # check that no rigid object has an articulation root API, which decreases simulation performance + articulation_prims = sim_utils.get_all_matching_child_prims( + template_prim_path, + predicate=lambda prim: prim.HasAPI(UsdPhysics.ArticulationRootAPI), + traverse_instance_prims=False, + ) + if len(articulation_prims) != 0: + if articulation_prims[0].GetAttribute("physxArticulation:articulationEnabled").Get(): + raise RuntimeError( + f"Found an articulation root when resolving '{rigid_body_cfg.prim_path}' in the rigid object" + f" collection. These are located at: '{articulation_prims}' under '{template_prim_path}'." + " Please disable the articulation root in the USD or from code by setting the parameter" + " 'ArticulationRootPropertiesCfg.articulation_enabled' to False in the spawn configuration." + ) + + # resolve root prim back into regex expression + root_prim_path = root_prims[0].GetPath().pathString + root_prim_path_expr = rigid_body_cfg.prim_path + root_prim_path[len(template_prim_path) :] + root_prim_path_exprs.append(root_prim_path_expr.replace(".*", "*")) + + self._body_names_list.append(name) + + # -- object view + self._root_physx_view = self._physics_sim_view.create_rigid_body_view(root_prim_path_exprs) + + # check if the rigid body was created + if self._root_physx_view._backend is None: + raise RuntimeError("Failed to create rigid body collection. Please check PhysX logs.") + + # log information about the rigid body + logger.info(f"Number of instances: {self.num_instances}") + logger.info(f"Number of distinct bodies: {self.num_bodies}") + logger.info(f"Body names: {self.body_names}") + + # container for data access + self._data = RigidObjectCollectionData(self.root_physx_view, self.num_bodies, self.device) + + # create buffers + self._create_buffers() + # process configuration + self._process_cfg() + # update the rigid body data + self.update(0.0) + + def _create_buffers(self): + # constants + self._ALL_ENV_INDICES = torch.arange(self.num_instances, dtype=torch.long, device=self.device) + self._ALL_BODY_INDICES = torch.arange(self.num_bodies, dtype=torch.long, device=self.device) + self._ALL_ENV_INDICES_WP = wp.from_torch(self._ALL_ENV_INDICES.to(torch.int32), dtype=wp.int32) + self._ALL_BODY_INDICES_WP = wp.from_torch(self._ALL_BODY_INDICES.to(torch.int32), dtype=wp.int32) + + # external wrench composer + self._instantaneous_wrench_composer = WrenchComposer(self) + self._permanent_wrench_composer = WrenchComposer(self) + + # set information about rigid body into data + self._data.body_names = self.body_names + + def _process_cfg(self) -> None: + """Post processing of configuration parameters.""" + # default state + # -- body state + default_body_poses = [] + default_body_vels = [] + for rigid_object_cfg in self.cfg.rigid_objects.values(): + default_body_pose = ( + tuple(rigid_object_cfg.init_state.pos) + + tuple(rigid_object_cfg.init_state.rot) + ) + default_body_vel = ( + tuple(rigid_object_cfg.init_state.lin_vel) + + tuple(rigid_object_cfg.init_state.ang_vel) + ) + default_body_pose = ( + torch.tensor(default_body_pose, dtype=torch.float, device=self.device) + .repeat(self.num_instances, 1) + .unsqueeze(1) + ) + default_body_vel = ( + torch.tensor(default_body_vel, dtype=torch.float, device=self.device) + .repeat(self.num_instances, 1) + .unsqueeze(1) + ) + default_body_poses.append(default_body_pose) + default_body_vels.append(default_body_vel) + # concatenate the default state for each object + default_body_poses = torch.cat(default_body_poses, dim=1) + default_body_vels = torch.cat(default_body_vels, dim=1) + self.data.default_body_pose = default_body_poses + self.data.default_body_vel = default_body_vels + + def _env_body_ids_to_view_ids( + self, env_ids: torch.Tensor, body_ids: Sequence[int] | slice | torch.Tensor + ) -> torch.Tensor: + """Converts environment and body indices to indices consistent with data from :attr:`root_physx_view`. + + Args: + env_ids: Environment indices. + body_ids: Body indices. + + Returns: + The view indices. + """ + # the order is env_0/body_0, env_0/body_1, env_0/body_..., env_1/body_0, env_1/body_1, ... + # return a flat tensor of indices + if isinstance(body_ids, slice): + body_ids = self._ALL_BODY_INDICES + elif isinstance(body_ids, Sequence): + body_ids = torch.tensor(body_ids, device=self.device) + return (body_ids.unsqueeze(1) * self.num_instances + env_ids).flatten() + + """ + Internal simulation callbacks. + """ + + def _invalidate_initialize_callback(self, event) -> None: + """Invalidates the scene elements.""" + # call parent + super()._invalidate_initialize_callback(event) + # set all existing views to None to invalidate them + self._root_view = None + + def _on_prim_deletion(self, prim_path: str) -> None: + """Invalidates and deletes the callbacks when the prim is deleted. + + Args: + prim_path: The path to the prim that is being deleted. + + Note: + This function is called when the prim is deleted. + """ + if prim_path == "/": + self._clear_callbacks() + return + for prim_path_expr in self._prim_paths: + result = re.match( + pattern="^" + "/".join(prim_path_expr.split("/")[: prim_path.count("/") + 1]) + "$", string=prim_path + ) + if result: + self._clear_callbacks() + return \ No newline at end of file diff --git a/source/isaaclab_physx/isaaclab_physx/assets/rigid_object_collection/rigid_object_collection_data.py b/source/isaaclab_physx/isaaclab_physx/assets/rigid_object_collection/rigid_object_collection_data.py new file mode 100644 index 00000000000..df3b6432fea --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/assets/rigid_object_collection/rigid_object_collection_data.py @@ -0,0 +1,910 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +import weakref +import logging + +import torch + +import omni.physics.tensors.impl.api as physx + +import isaaclab.utils.math as math_utils +from isaaclab.sim.utils.stage import get_current_stage_id +from isaaclab.utils.buffers import TimestampedBuffer +from isaaclab.assets.rigid_object_collection.base_rigid_object_collection_data import BaseRigidObjectCollectionData + +# import logger +logger = logging.getLogger(__name__) + + +class RigidObjectCollectionData(BaseRigidObjectCollectionData): + """Data container for a rigid object collection. + + This class contains the data for a rigid object collection in the simulation. The data includes the state of + all the bodies in the collection. The data is stored in the simulation world frame unless otherwise specified. + The data is in the order ``(num_instances, num_objects, data_size)``, where data_size is the size of the data. + + For a rigid body, there are two frames of reference that are used: + + - Actor frame: The frame of reference of the rigid body prim. This typically corresponds to the Xform prim + with the rigid body schema. + - Center of mass frame: The frame of reference of the center of mass of the rigid body. + + Depending on the settings of the simulation, the actor frame and the center of mass frame may be the same. + This needs to be taken into account when interpreting the data. + + The data is lazily updated, meaning that the data is only updated when it is accessed. This is useful + when the data is expensive to compute or retrieve. The data is updated when the timestamp of the buffer + is older than the current simulation timestamp. The timestamp is updated whenever the data is updated. + """ + + __backend_name__: str = "physx" + """The name of the backend for the rigid object collection data.""" + + def __init__(self, root_view: physx.RigidBodyView, num_bodies: int, device: str): + """Initializes the rigid object data. + + Args: + root_view: The root rigid body collection view. + num_bodies: The number of bodies in the collection. + device: The device used for processing. + """ + super().__init__(root_view, num_bodies, device) + self.num_bodies = num_bodies + # Set the root rigid body view + # note: this is stored as a weak reference to avoid circular references between the asset class + # and the data container. This is important to avoid memory leaks. + self._root_view: physx.RigidBodyView = weakref.proxy(root_view) + self.num_instances = self._root_view.count // self.num_bodies + + # Set initial time stamp + self._sim_timestamp = 0.0 + self._is_primed = False + + # Obtain global physics sim view + stage_id = get_current_stage_id() + physics_sim_view = physx.create_simulation_view("torch", stage_id) + physics_sim_view.set_subspace_roots("/") + gravity = physics_sim_view.get_gravity() + # Convert to direction vector + gravity_dir = torch.tensor((gravity[0], gravity[1], gravity[2]), device=self.device) + gravity_dir = math_utils.normalize(gravity_dir.unsqueeze(0)).squeeze(0) + + # Initialize constants + self.GRAVITY_VEC_W = gravity_dir.repeat(self.num_instances, self.num_bodies, 1) + self.FORWARD_VEC_B = torch.tensor((1.0, 0.0, 0.0), device=self.device).repeat( + self.num_instances, self.num_bodies, 1 + ) + + @property + def is_primed(self) -> bool: + """Whether the rigid object collection data is fully instantiated and ready to use.""" + return self._is_primed + + @is_primed.setter + def is_primed(self, value: bool) -> None: + """Set whether the rigid object collection data is fully instantiated and ready to use. + + .. note:: Once this quantity is set to True, it cannot be changed. + + Args: + value: The primed state. + + Raises: + ValueError: If the rigid object collection data is already primed. + """ + if self._is_primed: + raise ValueError("The rigid object collection data is already primed.") + self._is_primed = value + + def update(self, dt: float) -> None: + """Updates the data for the rigid object collection. + + Args: + dt: The time step for the update. This must be a positive value. + """ + # update the simulation timestamp + self._sim_timestamp += dt + + """ + Names. + """ + + body_names: list[str] = None + """Body names in the order parsed by the simulation view.""" + + """ + Defaults. + """ + + @property + def default_body_pose(self) -> torch.Tensor: + """Default body pose ``[pos, quat]`` in local environment frame. Shape is (num_instances, num_bodies, 7). + + The position and quaternion are of the rigid body's actor frame. + """ + return self._default_root_pose + + @default_body_pose.setter + def default_body_pose(self, value: torch.Tensor) -> None: + """Set the default body pose. + + Args: + value: The default body pose. Shape is (num_instances, num_bodies, 7). + + Raises: + ValueError: If the rigid object collection data is already primed. + """ + if self._is_primed: + raise ValueError("The rigid object collection data is already primed.") + self._default_body_pose = value + + + @property + def default_body_vel(self) -> torch.Tensor: + """Default body velocity ``[lin_vel, ang_vel]`` in local environment frame. Shape is (num_instances, num_bodies, 6). + + The linear and angular velocities are of the rigid body's center of mass frame. + """ + return self._default_body_vel + + @default_body_vel.setter + def default_body_vel(self, value: torch.Tensor) -> None: + """Set the default body velocity. + + Args: + value: The default body velocity. Shape is (num_instances, num_bodies, 6). + + Raises: + ValueError: If the rigid object collection data is already primed. + """ + if self._is_primed: + raise ValueError("The rigid object collection data is already primed.") + self._default_body_vel = value + + @property + def default_body_state(self) -> torch.Tensor: + """Default root state ``[pos, quat, lin_vel, ang_vel]`` in local environment frame. Shape is (num_instances, num_bodies, 13). + + The position and quaternion are of the rigid body's actor frame. Meanwhile, the linear and angular velocities are + of the center of mass frame. + """ + logger.warning("Reading the body state directly is deprecated since IsaacLab 3.0 and will be removed in a future version. Please use the default_body_pose and default_body_vel properties instead.") + return torch.cat([self.default_body_pose, self.default_body_vel], dim=1) + + @default_body_state.setter + def default_body_state(self, value: torch.Tensor) -> None: + """Set the default body state. + + Args: + value: The default body state. Shape is (num_instances, num_bodies, 13). + + Raises: + ValueError: If the rigid object collection data is already primed. + """ + logger.warning("Setting the root state directly is deprecated since IsaacLab 3.0 and will be removed in a future version. Please use the default_root_pose and default_root_vel properties instead.") + if self._is_primed: + raise ValueError("The rigid object collection data is already primed.") + self._default_body_pose = value[:, :, :7] + self._default_body_vel = value[:, :, 7:] + + + """ + Body state properties. + """ + + @property + def body_link_pose_w(self) -> torch.Tensor: + """Body link pose ``[pos, quat]`` in simulation world frame. Shape is (num_instances, 1, 7). + + This quantity is the pose of the actor frame of the rigid body relative to the world. + The orientation is provided in (x, y, z, w) format. + """ + if self._body_link_pose_w.timestamp < self._sim_timestamp: + # read data from simulation + pose = self._reshape_view_to_data(self._root_view.get_transforms().clone()) + pose[..., 3:7] = math_utils.convert_quat(pose[..., 3:7], to="wxyz") + # set the buffer data and timestamp + self._body_link_pose_w.data = pose + self._body_link_pose_w.timestamp = self._sim_timestamp + + return self._body_link_pose_w.data + + @property + def body_link_vel_w(self) -> torch.Tensor: + """Body link velocity ``[lin_vel, ang_vel]`` in simulation world frame. Shape is (num_instances, 1, 6). + + This quantity contains the linear and angular velocities of the actor frame of the root + rigid body relative to the world. + """ + if self._body_link_vel_w.timestamp < self._sim_timestamp: + # read data from simulation + velocity = self.body_com_vel_w.clone() + # adjust linear velocity to link from center of mass + velocity[..., :3] += torch.linalg.cross( + velocity[..., 3:], math_utils.quat_apply(self.body_link_quat_w, -self.body_com_pos_b), dim=-1 + ) + # set the buffer data and timestamp + self._body_link_vel_w.data = velocity + self._body_link_vel_w.timestamp = self._sim_timestamp + + return self._body_link_vel_w.data + + @property + def body_com_pose_w(self) -> torch.Tensor: + """Body center of mass pose ``[pos, quat]`` in simulation world frame. Shape is (num_instances, 1, 7). + + This quantity is the pose of the center of mass frame of the rigid body relative to the world. + The orientation is provided in (x, y, z, w) format. + """ + if self._body_com_pose_w.timestamp < self._sim_timestamp: + # adjust pose to center of mass + pos, quat = math_utils.combine_frame_transforms( + self.body_link_pos_w, self.body_link_quat_w, self.body_com_pos_b, self.body_com_quat_b + ) + # set the buffer data and timestamp + self._body_com_pose_w.data = torch.cat((pos, quat), dim=-1) + self._body_com_pose_w.timestamp = self._sim_timestamp + + return self._body_com_pose_w.data + + @property + def body_com_vel_w(self) -> torch.Tensor: + """Body center of mass velocity ``[lin_vel, ang_vel]`` in simulation world frame. + Shape is (num_instances, 1, 6). + + This quantity contains the linear and angular velocities of the root rigid body's center of mass frame + relative to the world. + """ + if self._body_com_vel_w.timestamp < self._sim_timestamp: + self._body_com_vel_w.data = self._reshape_view_to_data(self._root_view.get_velocities()) + self._body_com_vel_w.timestamp = self._sim_timestamp + + return self._body_com_vel_w.data + + @property + def body_state_w(self) -> torch.Tensor: + """State of all bodies `[pos, quat, lin_vel, ang_vel]` in simulation world frame. + Shape is (num_instances, 1, 13). + + The position and orientation are of the rigid bodies' actor frame. Meanwhile, the linear and angular + velocities are of the rigid bodies' center of mass frame. + """ + if self._body_state_w.timestamp < self._sim_timestamp: + self._body_state_w.data = torch.cat((self.body_link_pose_w, self.body_com_vel_w), dim=-1) + self._body_state_w.timestamp = self._sim_timestamp + + return self._body_state_w.data + + @property + def body_link_state_w(self) -> torch.Tensor: + """State of all bodies ``[pos, quat, lin_vel, ang_vel]`` in simulation world frame. + Shape is (num_instances, 1, 13). + + The position, quaternion, and linear/angular velocity are of the body's link frame relative to the world. + The orientation is provided in (x, y, z, w) format. + """ + if self._body_link_state_w.timestamp < self._sim_timestamp: + self._body_link_state_w.data = torch.cat((self.body_link_pose_w, self.body_link_vel_w), dim=-1) + self._body_link_state_w.timestamp = self._sim_timestamp + + return self._body_link_state_w.data + + @property + def body_com_state_w(self) -> torch.Tensor: + """State of all bodies ``[pos, quat, lin_vel, ang_vel]`` in simulation world frame. + Shape is (num_instances, num_bodies, 13). + + The position, quaternion, and linear/angular velocity are of the body's center of mass frame relative to the + world. Center of mass frame is assumed to be the same orientation as the link rather than the orientation of the + principle inertia. The orientation is provided in (x, y, z, w) format. + """ + if self._body_com_state_w.timestamp < self._sim_timestamp: + self._body_com_state_w.data = torch.cat((self.body_com_pose_w, self.body_com_vel_w), dim=-1) + self._body_com_state_w.timestamp = self._sim_timestamp + + return self._body_com_state_w.data + + @property + def body_com_acc_w(self) -> torch.Tensor: + """Acceleration of all bodies ``[lin_acc, ang_acc]`` in the simulation world frame. + Shape is (num_instances, 1, 6). + + This quantity is the acceleration of the rigid bodies' center of mass frame relative to the world. + """ + if self._body_com_acc_w.timestamp < self._sim_timestamp: + self._body_com_acc_w.data = self._reshape_view_to_data(self._root_view.get_accelerations()) + self._body_com_acc_w.timestamp = self._sim_timestamp + return self._body_com_acc_w.data + + @property + def body_com_pose_b(self) -> torch.Tensor: + """Center of mass pose ``[pos, quat]`` of all bodies in their respective body's link frames. + Shape is (num_instances, 1, 7). + + This quantity is the pose of the center of mass frame of the rigid body relative to the body's link frame. + The orientation is provided in (x, y, z, w) format. + """ + if self._body_com_pose_b.timestamp < self._sim_timestamp: + # obtain the coms + poses = self._root_view.get_coms().to(self.device) + poses[:, 3:7] = math_utils.convert_quat(poses[:, 3:7], to="wxyz") + # read data from simulation + self._body_com_pose_b.data = self._reshape_view_to_data(poses) + self._body_com_pose_b.timestamp = self._sim_timestamp + + return self._body_com_pose_b.data + + @property + def body_mass(self) -> torch.Tensor: + """Mass of all bodies in the simulation world frame. Shape is (num_instances, 1, 1).""" + return self._body_mass + + @property + def body_inertia(self) -> torch.Tensor: + """Inertia of all bodies in the simulation world frame. Shape is (num_instances, 1, 3, 3).""" + return self._body_inertia + + """ + Derived Properties. + """ + + @property + def projected_gravity_b(self) -> torch.Tensor: + """Projection of the gravity direction on base frame. Shape is (num_instances, 3).""" + return math_utils.quat_apply_inverse(self.body_link_quat_w, self.GRAVITY_VEC_W) + + @property + def heading_w(self) -> torch.Tensor: + """Yaw heading of the base frame (in radians). Shape is (num_instances,). + + .. note:: + This quantity is computed by assuming that the forward-direction of the base + frame is along x-direction, i.e. :math:`(1, 0, 0)`. + """ + forward_w = math_utils.quat_apply(self.body_link_quat_w, self.FORWARD_VEC_B) + return torch.atan2(forward_w[..., 1], forward_w[..., 0]) + + @property + def body_link_lin_vel_b(self) -> torch.Tensor: + """Root link linear velocity in base frame. Shape is (num_instances, 3). + + This quantity is the linear velocity of the actor frame of the root rigid body frame with respect to the + rigid body's actor frame. + """ + return math_utils.quat_apply_inverse(self.body_link_quat_w, self.body_link_lin_vel_w) + + @property + def body_link_ang_vel_b(self) -> torch.Tensor: + """Root link angular velocity in base world frame. Shape is (num_instances, 3). + + This quantity is the angular velocity of the actor frame of the root rigid body frame with respect to the + rigid body's actor frame. + """ + return math_utils.quat_apply_inverse(self.body_link_quat_w, self.body_link_ang_vel_w) + + @property + def body_com_lin_vel_b(self) -> torch.Tensor: + """Root center of mass linear velocity in base frame. Shape is (num_instances, 3). + + This quantity is the linear velocity of the root rigid body's center of mass frame with respect to the + rigid body's actor frame. + """ + return math_utils.quat_apply_inverse(self.body_link_quat_w, self.body_com_lin_vel_w) + + @property + def body_com_ang_vel_b(self) -> torch.Tensor: + """Root center of mass angular velocity in base world frame. Shape is (num_instances, 3). + + This quantity is the angular velocity of the root rigid body's center of mass frame with respect to the + rigid body's actor frame. + """ + return math_utils.quat_apply_inverse(self.body_link_quat_w, self.body_com_ang_vel_w) + + """ + Sliced properties. + """ + + @property + def body_link_pos_w(self) -> torch.Tensor: + """Positions of all bodies in simulation world frame. Shape is (num_instances, 1, 3). + + This quantity is the position of the rigid bodies' actor frame relative to the world. + """ + return self.body_link_pose_w[..., :3] + + @property + def body_link_quat_w(self) -> torch.Tensor: + """Orientation (x, y, z, w) of all bodies in simulation world frame. Shape is (num_instances, 1, 4). + + This quantity is the orientation of the rigid bodies' actor frame relative to the world. + """ + return self.body_link_pose_w[..., 3:7] + + @property + def body_link_lin_vel_w(self) -> torch.Tensor: + """Linear velocity of all bodies in simulation world frame. Shape is (num_instances, 1, 3). + + This quantity is the linear velocity of the rigid bodies' center of mass frame relative to the world. + """ + return self.body_link_vel_w[..., :3] + + @property + def body_link_ang_vel_w(self) -> torch.Tensor: + """Angular velocity of all bodies in simulation world frame. Shape is (num_instances, 1, 3). + + This quantity is the angular velocity of the rigid bodies' center of mass frame relative to the world. + """ + return self.body_link_vel_w[..., 3:6] + + @property + def body_com_pos_w(self) -> torch.Tensor: + """Positions of all bodies in simulation world frame. Shape is (num_instances, 1, 3). + + This quantity is the position of the rigid bodies' actor frame. + """ + return self.body_com_pose_w[..., :3] + + @property + def body_com_quat_w(self) -> torch.Tensor: + """Orientation (x, y, z, w) of the principle axis of inertia of all bodies in simulation world frame. + + Shape is (num_instances, 1, 4). This quantity is the orientation of the rigid bodies' actor frame. + """ + return self.body_com_pose_w[..., 3:7] + + @property + def body_com_lin_vel_w(self) -> torch.Tensor: + """Linear velocity of all bodies in simulation world frame. Shape is (num_instances, 1, 3). + + This quantity is the linear velocity of the rigid bodies' center of mass frame. + """ + return self.body_com_vel_w[..., :3] + + @property + def body_com_ang_vel_w(self) -> torch.Tensor: + """Angular velocity of all bodies in simulation world frame. Shape is (num_instances, 1, 3). + + This quantity is the angular velocity of the rigid bodies' center of mass frame. + """ + return self.body_com_vel_w[..., 3:6] + + @property + def body_com_lin_acc_w(self) -> torch.Tensor: + """Linear acceleration of all bodies in simulation world frame. Shape is (num_instances, 1, 3). + + This quantity is the linear acceleration of the rigid bodies' center of mass frame. + """ + return self.body_com_acc_w[..., :3] + + @property + def body_com_ang_acc_w(self) -> torch.Tensor: + """Angular acceleration of all bodies in simulation world frame. Shape is (num_instances, 1, 3). + + This quantity is the angular acceleration of the rigid bodies' center of mass frame. + """ + return self.body_com_acc_w[..., 3:6] + + @property + def body_com_pos_b(self) -> torch.Tensor: + """Center of mass position of all of the bodies in their respective link frames. + Shape is (num_instances, 1, 3). + + This quantity is the center of mass location relative to its body'slink frame. + """ + return self.body_com_pose_b[..., :3] + + @property + def body_com_quat_b(self) -> torch.Tensor: + """Orientation (x, y, z, w) of the principle axis of inertia of all of the bodies in their + respective link frames. Shape is (num_instances, 1, 4). + + This quantity is the orientation of the principles axes of inertia relative to its body's link frame. + """ + return self.body_com_pose_b[..., 3:7] + + def _create_buffers(self) -> None: + # Initialize the lazy buffers. + # -- link frame w.r.t. world frame + self._body_link_pose_w = TimestampedBuffer() + self._body_link_vel_w = TimestampedBuffer() + # -- com frame w.r.t. link frame + self._body_com_pose_b = TimestampedBuffer() + # -- com frame w.r.t. world frame + self._body_com_pose_w = TimestampedBuffer() + self._body_com_vel_w = TimestampedBuffer() + self._body_com_acc_w = TimestampedBuffer() + # -- combined state(these are cached as they concatenate) + self._body_state_w = TimestampedBuffer() + self._body_link_state_w = TimestampedBuffer() + self._body_com_state_w = TimestampedBuffer() + + # -- Default state + self._default_body_pose = torch.zeros(self.num_instances, self.num_bodies, 7, device=self.device) + self._default_body_vel = torch.zeros(self.num_instances, self.num_bodies, 6, device=self.device) + + # -- Body properties + self._body_mass = self._root_view.get_masses().to(self.device).clone() + self._body_inertia = self._root_view.get_inertias().to(self.device).clone() + """ + Properties for backwards compatibility. + """ + + @property + def object_link_pose_w(self): + """Deprecated property. Please use :attr:`body_link_pose_w` instead.""" + logger.warning( + "The `object_link_pose_w` property will be deprecated in a future release. Please use" + " `body_link_pose_w` instead." + ) + return self.body_link_pose_w + + @property + def object_link_vel_w(self): + """Deprecated property. Please use :attr:`body_link_vel_w` instead.""" + logger.warning( + "The `object_link_vel_w` property will be deprecated in a future release. Please use" + " `body_link_vel_w` instead." + ) + return self.body_link_vel_w + + @property + def object_com_pose_w(self): + """Deprecated property. Please use :attr:`body_com_pose_w` instead.""" + logger.warning( + "The `object_com_pose_w` property will be deprecated in a future release. Please use" + " `body_com_pose_w` instead." + ) + return self.body_com_pose_w + + @property + def object_com_vel_w(self): + """Deprecated property. Please use :attr:`body_com_vel_w` instead.""" + logger.warning( + "The `object_com_vel_w` property will be deprecated in a future release. Please use" + " `body_com_vel_w` instead." + ) + return self.body_com_vel_w + + @property + def object_state_w(self): + """Deprecated property. Please use :attr:`body_state_w` instead.""" + logger.warning( + "The `object_state_w` property will be deprecated in a future release. Please use" + " `body_state_w` instead." + ) + return self.body_state_w + + @property + def object_link_state_w(self): + """Deprecated property. Please use :attr:`body_link_state_w` instead.""" + logger.warning( + "The `object_link_state_w` property will be deprecated in a future release. Please use" + " `body_link_state_w` instead." + ) + return self.body_link_state_w + + @property + def object_com_state_w(self): + """Deprecated property. Please use :attr:`body_com_state_w` instead.""" + logger.warning( + "The `object_com_state_w` property will be deprecated in a future release. Please use" + " `body_com_state_w` instead." + ) + return self.body_com_state_w + + @property + def object_com_acc_w(self): + """Deprecated property. Please use :attr:`body_com_acc_w` instead.""" + logger.warning( + "The `object_com_acc_w` property will be deprecated in a future release. Please use" + " `body_com_acc_w` instead." + ) + return self.body_com_acc_w + + @property + def object_com_pose_b(self): + """Deprecated property. Please use :attr:`body_com_pose_b` instead.""" + logger.warning( + "The `object_com_pose_b` property will be deprecated in a future release. Please use" + " `body_com_pose_b` instead." + ) + return self.body_com_pose_b + + @property + def object_link_pos_w(self) -> torch.Tensor: + """Deprecated property. Please use :attr:`body_link_pos_w` instead.""" + logger.warning( + "The `object_link_pos_w` property will be deprecated in a future release. Please use" + " `body_link_pos_w` instead." + ) + return self.body_link_pos_w + + @property + def object_link_quat_w(self) -> torch.Tensor: + """Deprecated property. Please use :attr:`body_link_quat_w` instead.""" + logger.warning( + "The `object_link_quat_w` property will be deprecated in a future release. Please use" + " `body_link_quat_w` instead." + ) + return self.body_link_quat_w + + @property + def object_link_lin_vel_w(self) -> torch.Tensor: + """Deprecated property. Please use :attr:`body_link_lin_vel_w` instead.""" + logger.warning( + "The `object_link_lin_vel_w` property will be deprecated in a future release. Please use" + " `body_link_lin_vel_w` instead." + ) + return self.body_link_lin_vel_w + + @property + def object_link_ang_vel_w(self) -> torch.Tensor: + """Deprecated property. Please use :attr:`body_link_ang_vel_w` instead.""" + logger.warning( + "The `object_link_ang_vel_w` property will be deprecated in a future release. Please use" + " `body_link_ang_vel_w` instead." + ) + return self.body_link_ang_vel_w + + @property + def object_com_pos_w(self) -> torch.Tensor: + """Deprecated property. Please use :attr:`body_com_pos_w` instead.""" + logger.warning( + "The `object_com_pos_w` property will be deprecated in a future release. Please use" + " `body_com_pos_w` instead." + ) + return self.body_com_pos_w + + @property + def object_com_quat_w(self) -> torch.Tensor: + """Deprecated property. Please use :attr:`body_com_quat_w` instead.""" + logger.warning( + "The `object_com_quat_w` property will be deprecated in a future release. Please use" + " `body_com_quat_w` instead." + ) + return self.body_com_quat_w + + @property + def object_com_lin_vel_w(self) -> torch.Tensor: + """Deprecated property. Please use :attr:`body_com_lin_vel_w` instead.""" + logger.warning( + "The `object_com_lin_vel_w` property will be deprecated in a future release. Please use" + " `body_com_lin_vel_w` instead." + ) + return self.body_com_lin_vel_w + + @property + def object_com_ang_vel_w(self) -> torch.Tensor: + """Deprecated property. Please use :attr:`body_com_ang_vel_w` instead.""" + logger.warning( + "The `object_com_ang_vel_w` property will be deprecated in a future release. Please use" + " `body_com_ang_vel_w` instead." + ) + return self.body_com_ang_vel_w + + @property + def object_com_lin_acc_w(self) -> torch.Tensor: + """Deprecated property. Please use :attr:`body_com_lin_acc_w` instead.""" + logger.warning( + "The `object_com_lin_acc_w` property will be deprecated in a future release. Please use" + " `body_com_lin_acc_w` instead." + ) + return self.body_com_lin_acc_w + + @property + def object_com_ang_acc_w(self) -> torch.Tensor: + """Deprecated property. Please use :attr:`body_com_ang_acc_w` instead.""" + logger.warning( + "The `object_com_ang_acc_w` property will be deprecated in a future release. Please use" + " `body_com_ang_acc_w` instead." + ) + return self.body_com_ang_acc_w + + @property + def object_com_pos_b(self) -> torch.Tensor: + """Deprecated property. Please use :attr:`body_com_pos_b` instead.""" + logger.warning( + "The `object_com_pos_b` property will be deprecated in a future release. Please use" + " `body_com_pos_b` instead." + ) + return self.body_com_pos_b + + @property + def object_com_quat_b(self) -> torch.Tensor: + """Deprecated property. Please use :attr:`body_com_quat_b` instead.""" + logger.warning( + "The `object_com_quat_b` property will be deprecated in a future release. Please use" + " `body_com_quat_b` instead." + ) + return self.body_com_quat_b + + @property + def object_link_lin_vel_b(self) -> torch.Tensor: + """Deprecated property. Please use :attr:`body_link_lin_vel_b` instead.""" + logger.warning( + "The `object_link_lin_vel_b` property will be deprecated in a future release. Please use" + " `body_link_lin_vel_b` instead." + ) + return self.body_link_lin_vel_b + + @property + def object_link_ang_vel_b(self) -> torch.Tensor: + """Deprecated property. Please use :attr:`body_link_ang_vel_b` instead.""" + logger.warning( + "The `object_link_ang_vel_b` property will be deprecated in a future release. Please use" + " `body_link_ang_vel_b` instead." + ) + return self.body_link_ang_vel_b + + @property + def object_com_lin_vel_b(self) -> torch.Tensor: + """Deprecated property. Please use :attr:`body_com_lin_vel_b` instead.""" + logger.warning( + "The `object_com_lin_vel_b` property will be deprecated in a future release. Please use" + " `body_com_lin_vel_b` instead." + ) + return self.body_com_lin_vel_b + + @property + def object_com_ang_vel_b(self) -> torch.Tensor: + """Deprecated property. Please use :attr:`body_com_ang_vel_b` instead.""" + logger.warning( + "The `object_com_ang_vel_b` property will be deprecated in a future release. Please use" + " `body_com_ang_vel_b` instead." + ) + return self.body_com_ang_vel_b + + @property + def object_pose_w(self) -> torch.Tensor: + """Deprecated property. Please use :attr:`body_link_pose_w` instead.""" + logger.warning( + "The `object_pose_w` property will be deprecated in a future release. Please use" + " `body_link_pose_w` instead." + ) + return self.body_link_pose_w + + @property + def object_pos_w(self) -> torch.Tensor: + """Deprecated property. Please use :attr:`body_link_pos_w` instead.""" + logger.warning( + "The `object_pos_w` property will be deprecated in a future release. Please use" + " `body_link_pos_w` instead." + ) + return self.body_link_pos_w + + @property + def object_quat_w(self) -> torch.Tensor: + """Deprecated property. Please use :attr:`body_link_quat_w` instead.""" + logger.warning( + "The `object_quat_w` property will be deprecated in a future release. Please use" + " `body_link_quat_w` instead." + ) + return self.body_link_quat_w + + @property + def object_vel_w(self) -> torch.Tensor: + """Deprecated property. Please use :attr:`body_com_vel_w` instead.""" + logger.warning( + "The `object_vel_w` property will be deprecated in a future release. Please use" + " `body_com_vel_w` instead." + ) + return self.body_com_vel_w + + @property + def object_lin_vel_w(self) -> torch.Tensor: + """Deprecated property. Please use :attr:`body_com_lin_vel_w` instead.""" + logger.warning( + "The `object_lin_vel_w` property will be deprecated in a future release. Please use" + " `body_com_lin_vel_w` instead." + ) + return self.body_com_lin_vel_w + + @property + def object_ang_vel_w(self) -> torch.Tensor: + """Deprecated property. Please use :attr:`body_com_ang_vel_w` instead.""" + logger.warning( + "The `object_ang_vel_w` property will be deprecated in a future release. Please use" + " `body_com_ang_vel_w` instead." + ) + return self.body_com_ang_vel_w + + @property + def object_lin_vel_b(self) -> torch.Tensor: + """Deprecated property. Please use :attr:`body_com_lin_vel_b` instead.""" + logger.warning( + "The `object_lin_vel_b` property will be deprecated in a future release. Please use" + " `body_com_lin_vel_b` instead." + ) + return self.body_com_lin_vel_b + + @property + def object_ang_vel_b(self) -> torch.Tensor: + """Deprecated property. Please use :attr:`body_com_ang_vel_b` instead.""" + logger.warning( + "The `object_ang_vel_b` property will be deprecated in a future release. Please use" + " `body_com_ang_vel_b` instead." + ) + return self.body_com_ang_vel_b + + @property + def object_acc_w(self) -> torch.Tensor: + """Deprecated property. Please use :attr:`body_com_acc_w` instead.""" + logger.warning( + "The `object_acc_w` property will be deprecated in a future release. Please use" + " `body_com_acc_w` instead." + ) + return self.body_com_acc_w + + @property + def object_lin_acc_w(self) -> torch.Tensor: + """Deprecated property. Please use :attr:`body_com_lin_acc_w` instead.""" + logger.warning( + "The `object_lin_acc_w` property will be deprecated in a future release. Please use" + " `body_com_lin_acc_w` instead." + ) + return self.body_com_lin_acc_w + + @property + def object_ang_acc_w(self) -> torch.Tensor: + """Deprecated property. Please use :attr:`body_com_ang_acc_w` instead.""" + logger.warning( + "The `object_ang_acc_w` property will be deprecated in a future release. Please use" + " `body_com_ang_acc_w` instead." + ) + return self.body_com_ang_acc_w + + @property + def com_pos_b(self) -> torch.Tensor: + """Deprecated property. Please use :attr:`body_com_pos_b` instead.""" + logger.warning( + "The `com_pos_b` property will be deprecated in a future release. Please use" + " `body_com_pos_b` instead." + ) + return self.body_com_pos_b + + @property + def com_quat_b(self) -> torch.Tensor: + """Deprecated property. Please use :attr:`body_com_quat_b` instead.""" + logger.warning( + "The `com_quat_b` property will be deprecated in a future release. Please use" + " `body_com_quat_b` instead." + ) + return self.body_com_quat_b + + """ + Removed - Default values are no longer stored. + """ + + @property + def default_mass(self) -> torch.Tensor: + """Removed: Default mass is no longer stored.""" + raise RuntimeError( + "The property 'default_mass' has been removed. Default values are no longer stored. " + "Please use 'body_mass' to get the current mass values from the simulation." + ) + + @property + def default_inertia(self) -> torch.Tensor: + """Removed: Default inertia is no longer stored.""" + raise RuntimeError( + "The property 'default_inertia' has been removed. Default values are no longer stored. " + "Please use 'body_inertia' to get the current inertia values from the simulation." + ) + + """ + Helpers. + """ + + def _reshape_view_to_data(self, data: torch.Tensor) -> torch.Tensor: + """Reshapes and arranges the data from the physics view to (num_instances, num_bodies, data_size). + + Args: + data: The data from the physics view. Shape is (num_instances * num_bodies, data_size). + + Returns: + The reshaped data. Shape is (num_bodies, num_instances, data_size). + """ + return torch.einsum("ijk -> jik", data.reshape(self.num_bodies, self.num_instances, -1)) \ No newline at end of file diff --git a/source/isaaclab_physx/isaaclab_physx/assets/rigid_object_collection/rigid_object_collection_data_old.py b/source/isaaclab_physx/isaaclab_physx/assets/rigid_object_collection/rigid_object_collection_data_old.py new file mode 100644 index 00000000000..5156ef729e4 --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/assets/rigid_object_collection/rigid_object_collection_data_old.py @@ -0,0 +1,515 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +import weakref + +import torch + +import omni.physics.tensors.impl.api as physx + +import isaaclab.utils.math as math_utils +from isaaclab.sim.utils.stage import get_current_stage_id +from isaaclab.utils.buffers import TimestampedBuffer + + +class RigidObjectCollectionData: + """Data container for a rigid object collection. + + This class contains the data for a rigid object collection in the simulation. The data includes the state of + all the bodies in the collection. The data is stored in the simulation world frame unless otherwise specified. + The data is in the order ``(num_instances, num_objects, data_size)``, where data_size is the size of the data. + + For a rigid body, there are two frames of reference that are used: + + - Actor frame: The frame of reference of the rigid body prim. This typically corresponds to the Xform prim + with the rigid body schema. + - Center of mass frame: The frame of reference of the center of mass of the rigid body. + + Depending on the settings of the simulation, the actor frame and the center of mass frame may be the same. + This needs to be taken into account when interpreting the data. + + The data is lazily updated, meaning that the data is only updated when it is accessed. This is useful + when the data is expensive to compute or retrieve. The data is updated when the timestamp of the buffer + is older than the current simulation timestamp. The timestamp is updated whenever the data is updated. + """ + + def __init__(self, root_physx_view: physx.RigidBodyView, num_objects: int, device: str): + """Initializes the data. + + Args: + root_physx_view: The root rigid body view. + num_objects: The number of objects in the collection. + device: The device used for processing. + """ + # Set the parameters + self.device = device + self.num_objects = num_objects + # Set the root rigid body view + # note: this is stored as a weak reference to avoid circular references between the asset class + # and the data container. This is important to avoid memory leaks. + self._root_physx_view: physx.RigidBodyView = weakref.proxy(root_physx_view) + self.num_instances = self._root_physx_view.count // self.num_objects + + # Set initial time stamp + self._sim_timestamp = 0.0 + + # Obtain global physics sim view + stage_id = get_current_stage_id() + physics_sim_view = physx.create_simulation_view("torch", stage_id) + physics_sim_view.set_subspace_roots("/") + gravity = physics_sim_view.get_gravity() + # Convert to direction vector + gravity_dir = torch.tensor((gravity[0], gravity[1], gravity[2]), device=self.device) + gravity_dir = math_utils.normalize(gravity_dir.unsqueeze(0)).squeeze(0) + + # Initialize constants + self.GRAVITY_VEC_W = gravity_dir.repeat(self.num_instances, self.num_objects, 1) + self.FORWARD_VEC_B = torch.tensor((1.0, 0.0, 0.0), device=self.device).repeat( + self.num_instances, self.num_objects, 1 + ) + + # Initialize the lazy buffers. + # -- link frame w.r.t. world frame + self._object_link_pose_w = TimestampedBuffer() + self._object_link_vel_w = TimestampedBuffer() + # -- com frame w.r.t. link frame + self._object_com_pose_b = TimestampedBuffer() + # -- com frame w.r.t. world frame + self._object_com_pose_w = TimestampedBuffer() + self._object_com_vel_w = TimestampedBuffer() + self._object_com_acc_w = TimestampedBuffer() + # -- combined state(these are cached as they concatenate) + self._object_state_w = TimestampedBuffer() + self._object_link_state_w = TimestampedBuffer() + self._object_com_state_w = TimestampedBuffer() + + def update(self, dt: float): + """Updates the data for the rigid object collection. + + Args: + dt: The time step for the update. This must be a positive value. + """ + # update the simulation timestamp + self._sim_timestamp += dt + + ## + # Names. + ## + + object_names: list[str] = None + """Object names in the order parsed by the simulation view.""" + + ## + # Defaults. + ## + + default_object_state: torch.Tensor = None + """Default object state ``[pos, quat, lin_vel, ang_vel]`` in local environment frame. + Shape is (num_instances, num_objects, 13). + + The position and quaternion are of each object's rigid body's actor frame. Meanwhile, the linear and + angular velocities are of the center of mass frame. + """ + + default_mass: torch.Tensor = None + """Default object mass read from the simulation. Shape is (num_instances, num_objects, 1).""" + + default_inertia: torch.Tensor = None + """Default object inertia tensor read from the simulation. Shape is (num_instances, num_objects, 9). + + The inertia tensor should be given with respect to the center of mass, expressed in the rigid body's actor frame. + The values are stored in the order :math:`[I_{xx}, I_{yx}, I_{zx}, I_{xy}, I_{yy}, I_{zy}, I_{xz}, I_{yz}, I_{zz}]`. + However, due to the symmetry of inertia tensors, row- and column-major orders are equivalent. + + This quantity is parsed from the USD schema at the time of initialization. + """ + + ## + # Root state properties. + ## + + @property + def object_link_pose_w(self): + """Object link pose ``[pos, quat]`` in simulation world frame. Shape is (num_instances, num_objects, 7). + + The position and orientation are of the rigid body's actor frame. + """ + if self._object_link_pose_w.timestamp < self._sim_timestamp: + # read data from simulation + pose = self._reshape_view_to_data(self._root_physx_view.get_transforms().clone()) + pose[..., 3:7] = math_utils.convert_quat(pose[..., 3:7], to="wxyz") + # set the buffer data and timestamp + self._object_link_pose_w.data = pose + self._object_link_pose_w.timestamp = self._sim_timestamp + + return self._object_link_pose_w.data + + @property + def object_link_vel_w(self): + """Object link velocity ``[lin_vel, ang_vel]`` in simulation world frame. + Shape is (num_instances, num_objects, 6). + + The linear and angular velocities are of the rigid body's actor frame. + """ + if self._object_link_vel_w.timestamp < self._sim_timestamp: + # read data from simulation + velocity = self.object_com_vel_w.clone() + # adjust linear velocity to link from center of mass + velocity[..., :3] += torch.linalg.cross( + velocity[..., 3:], math_utils.quat_apply(self.object_link_quat_w, -self.object_com_pos_b), dim=-1 + ) + # set the buffer data and timestamp + self._object_link_vel_w.data = velocity + self._object_link_vel_w.timestamp = self._sim_timestamp + + return self._object_link_vel_w.data + + @property + def object_com_pose_w(self): + """Object center of mass pose ``[pos, quat]`` in simulation world frame. + Shape is (num_instances, num_objects, 7). + + The position and orientation are of the rigid body's center of mass frame. + """ + if self._object_com_pose_w.timestamp < self._sim_timestamp: + # adjust pose to center of mass + pos, quat = math_utils.combine_frame_transforms( + self.object_link_pos_w, self.object_link_quat_w, self.object_com_pos_b, self.object_com_quat_b + ) + # set the buffer data and timestamp + self._object_com_pose_w.data = torch.cat((pos, quat), dim=-1) + self._object_com_pose_w.timestamp = self._sim_timestamp + + return self._object_com_pose_w.data + + @property + def object_com_vel_w(self): + """Object center of mass velocity ``[lin_vel, ang_vel]`` in simulation world frame. + Shape is (num_instances, num_objects, 6). + + The linear and angular velocities are of the rigid body's center of mass frame. + """ + if self._object_com_vel_w.timestamp < self._sim_timestamp: + self._object_com_vel_w.data = self._reshape_view_to_data(self._root_physx_view.get_velocities()) + self._object_com_vel_w.timestamp = self._sim_timestamp + + return self._object_com_vel_w.data + + @property + def object_state_w(self): + """Object state ``[pos, quat, lin_vel, ang_vel]`` in simulation world frame. + Shape is (num_instances, num_objects, 13). + + The position and orientation are of the rigid body's actor frame. Meanwhile, the linear and angular + velocities are of the rigid body's center of mass frame. + """ + if self._object_state_w.timestamp < self._sim_timestamp: + self._object_state_w.data = torch.cat((self.object_link_pose_w, self.object_com_vel_w), dim=-1) + self._object_state_w.timestamp = self._sim_timestamp + + return self._object_state_w.data + + @property + def object_link_state_w(self): + """Object center of mass state ``[pos, quat, lin_vel, ang_vel]`` in simulation world frame. + Shape is (num_instances, num_objects, 13). + + The position, quaternion, and linear/angular velocity are of the rigid body root frame relative to the + world. + """ + if self._object_link_state_w.timestamp < self._sim_timestamp: + self._object_link_state_w.data = torch.cat((self.object_link_pose_w, self.object_link_vel_w), dim=-1) + self._object_link_state_w.timestamp = self._sim_timestamp + + return self._object_link_state_w.data + + @property + def object_com_state_w(self): + """Object state ``[pos, quat, lin_vel, ang_vel]`` in simulation world frame. + Shape is (num_instances, num_objects, 13). + + The position, quaternion, and linear/angular velocity are of the rigid body's center of mass frame + relative to the world. Center of mass frame has the orientation along the principle axes of inertia. + """ + if self._object_com_state_w.timestamp < self._sim_timestamp: + self._object_com_state_w.data = torch.cat((self.object_com_pose_w, self.object_com_vel_w), dim=-1) + self._object_com_state_w.timestamp = self._sim_timestamp + + return self._object_com_state_w.data + + @property + def object_com_acc_w(self): + """Acceleration of all objects. Shape is (num_instances, num_objects, 6). + + This quantity is the acceleration of the rigid bodies' center of mass frame. + """ + if self._object_com_acc_w.timestamp < self._sim_timestamp: + self._object_com_acc_w.data = self._reshape_view_to_data(self._root_physx_view.get_accelerations()) + self._object_com_acc_w.timestamp = self._sim_timestamp + return self._object_com_acc_w.data + + @property + def object_com_pose_b(self): + """Object center of mass pose ``[pos, quat]`` in their respective body's link frame. + Shape is (num_instances, num_objects, 7). + + The position and orientation are of the rigid body's center of mass frame. + The orientation is provided in (w, x, y, z) format. + """ + if self._object_com_pose_b.timestamp < self._sim_timestamp: + # obtain the coms + poses = self._root_physx_view.get_coms().to(self.device) + poses[:, 3:7] = math_utils.convert_quat(poses[:, 3:7], to="wxyz") + # read data from simulation + self._object_com_pose_b.data = self._reshape_view_to_data(poses) + self._object_com_pose_b.timestamp = self._sim_timestamp + + return self._object_com_pose_b.data + + ## + # Derived properties. + ## + + @property + def projected_gravity_b(self): + """Projection of the gravity direction on base frame. Shape is (num_instances, num_objects, 3).""" + return math_utils.quat_apply_inverse(self.object_link_quat_w, self.GRAVITY_VEC_W) + + @property + def heading_w(self): + """Yaw heading of the base frame (in radians). Shape is (num_instances, num_objects,). + + Note: + This quantity is computed by assuming that the forward-direction of the base + frame is along x-direction, i.e. :math:`(1, 0, 0)`. + """ + forward_w = math_utils.quat_apply(self.object_link_quat_w, self.FORWARD_VEC_B) + return torch.atan2(forward_w[..., 1], forward_w[..., 0]) + + @property + def object_link_lin_vel_b(self) -> torch.Tensor: + """Object link linear velocity in base frame. Shape is (num_instances, num_objects, 3). + + This quantity is the linear velocity of the actor frame of the root rigid body frame with + respect to the rigid body's actor frame. + """ + return math_utils.quat_apply_inverse(self.object_link_quat_w, self.object_link_lin_vel_w) + + @property + def object_link_ang_vel_b(self) -> torch.Tensor: + """Object link angular velocity in base world frame. Shape is (num_instances, num_objects, 3). + + This quantity is the angular velocity of the actor frame of the root rigid body frame with + respect to the rigid body's actor frame. + """ + return math_utils.quat_apply_inverse(self.object_link_quat_w, self.object_link_ang_vel_w) + + @property + def object_com_lin_vel_b(self) -> torch.Tensor: + """Object center of mass linear velocity in base frame. Shape is (num_instances, num_objects, 3). + + This quantity is the linear velocity of the center of mass frame of the root rigid body frame with + respect to the rigid body's actor frame. + """ + return math_utils.quat_apply_inverse(self.object_link_quat_w, self.object_com_lin_vel_w) + + @property + def object_com_ang_vel_b(self) -> torch.Tensor: + """Object center of mass angular velocity in base world frame. Shape is (num_instances, num_objects, 3). + + This quantity is the angular velocity of the center of mass frame of the root rigid body frame with + respect to the rigid body's actor frame. + """ + return math_utils.quat_apply_inverse(self.object_link_quat_w, self.object_com_ang_vel_w) + + ## + # Sliced properties. + ## + + @property + def object_link_pos_w(self) -> torch.Tensor: + """Object link position in simulation world frame. Shape is (num_instances, num_objects, 3). + + This quantity is the position of the actor frame of the rigid bodies. + """ + return self.object_link_pose_w[..., :3] + + @property + def object_link_quat_w(self) -> torch.Tensor: + """Object link orientation (w, x, y, z) in simulation world frame. Shape is (num_instances, num_objects, 4). + + This quantity is the orientation of the actor frame of the rigid bodies. + """ + return self.object_link_pose_w[..., 3:7] + + @property + def object_link_lin_vel_w(self) -> torch.Tensor: + """Object link linear velocity in simulation world frame. Shape is (num_instances, num_objects, 3). + + This quantity is the linear velocity of the rigid bodies' actor frame. + """ + return self.object_link_vel_w[..., :3] + + @property + def object_link_ang_vel_w(self) -> torch.Tensor: + """Object link angular velocity in simulation world frame. Shape is (num_instances, num_objects, 3). + + This quantity is the angular velocity of the rigid bodies' actor frame. + """ + return self.object_link_vel_w[..., 3:6] + + @property + def object_com_pos_w(self) -> torch.Tensor: + """Object center of mass position in simulation world frame. Shape is (num_instances, num_objects, 3). + + This quantity is the position of the center of mass frame of the rigid bodies. + """ + return self.object_com_pose_w[..., :3] + + @property + def object_com_quat_w(self) -> torch.Tensor: + """Object center of mass orientation (w, x, y, z) in simulation world frame. + Shape is (num_instances, num_objects, 4). + + This quantity is the orientation of the center of mass frame of the rigid bodies. + """ + return self.object_com_pose_w[..., 3:7] + + @property + def object_com_lin_vel_w(self) -> torch.Tensor: + """Object center of mass linear velocity in simulation world frame. Shape is (num_instances, num_objects, 3). + + This quantity is the linear velocity of the rigid bodies' center of mass frame. + """ + return self.object_com_vel_w[..., :3] + + @property + def object_com_ang_vel_w(self) -> torch.Tensor: + """Object center of mass angular velocity in simulation world frame. Shape is (num_instances, num_objects, 3). + + This quantity is the angular velocity of the rigid bodies' center of mass frame. + """ + return self.object_com_vel_w[..., 3:6] + + @property + def object_com_lin_acc_w(self) -> torch.Tensor: + """Object center of mass linear acceleration in simulation world frame. + Shape is (num_instances, num_objects, 3). + + This quantity is the linear acceleration of the rigid bodies' center of mass frame. + """ + return self.object_com_acc_w[..., :3] + + @property + def object_com_ang_acc_w(self) -> torch.Tensor: + """Object center of mass angular acceleration in simulation world frame. + Shape is (num_instances, num_objects, 3). + + This quantity is the angular acceleration of the rigid bodies' center of mass frame. + """ + return self.object_com_acc_w[..., 3:6] + + @property + def object_com_pos_b(self) -> torch.Tensor: + """Center of mass of all of the bodies in their respective body's link frame. + Shape is (num_instances, num_objects, 3). + + This quantity is the center of mass location relative to its body link frame. + """ + return self.object_com_pose_b[..., :3] + + @property + def object_com_quat_b(self) -> torch.Tensor: + """Orientation (w,x,y,z) of the principle axis of inertia of all of the bodies in simulation world frame. + Shape is (num_instances, num_objects, 4). + + This quantity is the orientation of the principles axes of inertia relative to its body link frame. + The orientation is provided in (w, x, y, z) format. + """ + return self.object_com_pose_b[..., 3:7] + + ## + # Properties for backwards compatibility. + ## + + @property + def object_pose_w(self) -> torch.Tensor: + """Same as :attr:`object_link_pose_w`.""" + return self.object_link_pose_w + + @property + def object_pos_w(self) -> torch.Tensor: + """Same as :attr:`object_link_pos_w`.""" + return self.object_link_pos_w + + @property + def object_quat_w(self) -> torch.Tensor: + """Same as :attr:`object_link_quat_w`.""" + return self.object_link_quat_w + + @property + def object_vel_w(self) -> torch.Tensor: + """Same as :attr:`object_com_vel_w`.""" + return self.object_com_vel_w + + @property + def object_lin_vel_w(self) -> torch.Tensor: + """Same as :attr:`object_com_lin_vel_w`.""" + return self.object_com_lin_vel_w + + @property + def object_ang_vel_w(self) -> torch.Tensor: + """Same as :attr:`object_com_ang_vel_w`.""" + return self.object_com_ang_vel_w + + @property + def object_lin_vel_b(self) -> torch.Tensor: + """Same as :attr:`object_com_lin_vel_b`.""" + return self.object_com_lin_vel_b + + @property + def object_ang_vel_b(self) -> torch.Tensor: + """Same as :attr:`object_com_ang_vel_b`.""" + return self.object_com_ang_vel_b + + @property + def object_acc_w(self) -> torch.Tensor: + """Same as :attr:`object_com_acc_w`.""" + return self.object_com_acc_w + + @property + def object_lin_acc_w(self) -> torch.Tensor: + """Same as :attr:`object_com_lin_acc_w`.""" + return self.object_com_lin_acc_w + + @property + def object_ang_acc_w(self) -> torch.Tensor: + """Same as :attr:`object_com_ang_acc_w`.""" + return self.object_com_ang_acc_w + + @property + def com_pos_b(self) -> torch.Tensor: + """Same as :attr:`object_com_pos_b`.""" + return self.object_com_pos_b + + @property + def com_quat_b(self) -> torch.Tensor: + """Same as :attr:`object_com_quat_b`.""" + return self.object_com_quat_b + + ## + # Helpers. + ## + + def _reshape_view_to_data(self, data: torch.Tensor) -> torch.Tensor: + """Reshapes and arranges the data from the physics view to (num_instances, num_objects, data_size). + + Args: + data: The data from the physics view. Shape is (num_instances * num_objects, data_size). + + Returns: + The reshaped data. Shape is (num_objects, num_instances, data_size). + """ + return torch.einsum("ijk -> jik", data.reshape(self.num_objects, self.num_instances, -1)) diff --git a/source/isaaclab_physx/isaaclab_physx/assets/rigid_object_collection/rigid_object_collection_old.py b/source/isaaclab_physx/isaaclab_physx/assets/rigid_object_collection/rigid_object_collection_old.py new file mode 100644 index 00000000000..b458b15b403 --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/assets/rigid_object_collection/rigid_object_collection_old.py @@ -0,0 +1,783 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from __future__ import annotations + +import logging +import re +from collections.abc import Sequence +from typing import TYPE_CHECKING + +import torch +import warp as wp + +import omni.physics.tensors.impl.api as physx +from isaacsim.core.simulation_manager import SimulationManager +from pxr import UsdPhysics + +import isaaclab.sim as sim_utils +import isaaclab.utils.math as math_utils +import isaaclab.utils.string as string_utils +from isaaclab.utils.wrench_composer import WrenchComposer + +from ..asset_base import AssetBase +from .rigid_object_collection_data import RigidObjectCollectionData + +if TYPE_CHECKING: + from .rigid_object_collection_cfg import RigidObjectCollectionCfg + +# import logger +logger = logging.getLogger(__name__) + + +class RigidObjectCollection(AssetBase): + """A rigid object collection class. + + This class represents a collection of rigid objects in the simulation, where the state of the + rigid objects can be accessed and modified using a batched ``(env_ids, object_ids)`` API. + + For each rigid body in the collection, the root prim of the asset must have the `USD RigidBodyAPI`_ + applied to it. This API is used to define the simulation properties of the rigid bodies. On playing the + simulation, the physics engine will automatically register the rigid bodies and create a corresponding + rigid body handle. This handle can be accessed using the :attr:`root_physx_view` attribute. + + Rigid objects in the collection are uniquely identified via the key of the dictionary + :attr:`~isaaclab.assets.RigidObjectCollectionCfg.rigid_objects` in the + :class:`~isaaclab.assets.RigidObjectCollectionCfg` configuration class. + This differs from the :class:`~isaaclab.assets.RigidObject` class, where a rigid object is identified by + the name of the Xform where the `USD RigidBodyAPI`_ is applied. This would not be possible for the rigid + object collection since the :attr:`~isaaclab.assets.RigidObjectCollectionCfg.rigid_objects` dictionary + could contain the same rigid object multiple times, leading to ambiguity. + + .. _`USD RigidBodyAPI`: https://openusd.org/dev/api/class_usd_physics_rigid_body_a_p_i.html + """ + + cfg: RigidObjectCollectionCfg + """Configuration instance for the rigid object collection.""" + + def __init__(self, cfg: RigidObjectCollectionCfg): + """Initialize the rigid object collection. + + Args: + cfg: A configuration instance. + """ + # Note: We never call the parent constructor as it tries to call its own spawning which we don't want. + # check that the config is valid + cfg.validate() + # store inputs + self.cfg = cfg.copy() + # flag for whether the asset is initialized + self._is_initialized = False + self._prim_paths = [] + # spawn the rigid objects + for rigid_object_cfg in self.cfg.rigid_objects.values(): + # check if the rigid object path is valid + # note: currently the spawner does not work if there is a regex pattern in the leaf + # For example, if the prim path is "/World/Object_[1,2]" since the spawner will not + # know which prim to spawn. This is a limitation of the spawner and not the asset. + asset_path = rigid_object_cfg.prim_path.split("/")[-1] + asset_path_is_regex = re.match(r"^[a-zA-Z0-9/_]+$", asset_path) is None + # spawn the asset + if rigid_object_cfg.spawn is not None and not asset_path_is_regex: + rigid_object_cfg.spawn.func( + rigid_object_cfg.prim_path, + rigid_object_cfg.spawn, + translation=rigid_object_cfg.init_state.pos, + orientation=rigid_object_cfg.init_state.rot, + ) + # check that spawn was successful + matching_prims = sim_utils.find_matching_prims(rigid_object_cfg.prim_path) + if len(matching_prims) == 0: + raise RuntimeError(f"Could not find prim with path {rigid_object_cfg.prim_path}.") + self._prim_paths.append(rigid_object_cfg.prim_path) + # stores object names + self._object_names_list = [] + + # register various callback functions + self._register_callbacks() + self._debug_vis_handle = None + + """ + Properties + """ + + @property + def data(self) -> RigidObjectCollectionData: + return self._data + + @property + def num_instances(self) -> int: + """Number of instances of the collection.""" + return self.root_physx_view.count // self.num_objects + + @property + def num_objects(self) -> int: + """Number of objects in the collection. + + This corresponds to the distinct number of rigid bodies in the collection. + """ + return len(self.object_names) + + @property + def object_names(self) -> list[str]: + """Ordered names of objects in the rigid object collection.""" + return self._object_names_list + + @property + def root_physx_view(self) -> physx.RigidBodyView: + """Rigid body view for the rigid body collection (PhysX). + + Note: + Use this view with caution. It requires handling of tensors in a specific way. + """ + return self._root_physx_view # type: ignore + + @property + def instantaneous_wrench_composer(self) -> WrenchComposer: + """Instantaneous wrench composer. + + Returns a :class:`~isaaclab.utils.wrench_composer.WrenchComposer` instance. Wrenches added or set to this wrench + composer are only valid for the current simulation step. At the end of the simulation step, the wrenches set + to this object are discarded. This is useful to apply forces that change all the time, things like drag forces + for instance. + """ + return self._instantaneous_wrench_composer + + @property + def permanent_wrench_composer(self) -> WrenchComposer: + """Permanent wrench composer. + + Returns a :class:`~isaaclab.utils.wrench_composer.WrenchComposer` instance. Wrenches added or set to this wrench + composer are persistent and are applied to the simulation at every step. This is useful to apply forces that + are constant over a period of time, things like the thrust of a motor for instance. + """ + return self._permanent_wrench_composer + + """ + Operations. + """ + + def reset(self, env_ids: torch.Tensor | None = None, object_ids: slice | torch.Tensor | None = None): + """Resets all internal buffers of selected environments and objects. + + Args: + env_ids: The indices of the object to reset. Defaults to None (all instances). + object_ids: The indices of the object to reset. Defaults to None (all objects). + """ + # resolve all indices + if env_ids is None: + env_ids = self._ALL_ENV_INDICES + if object_ids is None: + object_ids = self._ALL_OBJ_INDICES + # reset external wrench + self._instantaneous_wrench_composer.reset(env_ids) + self._permanent_wrench_composer.reset(env_ids) + + def write_data_to_sim(self): + """Write external wrench to the simulation. + + Note: + We write external wrench to the simulation here since this function is called before the simulation step. + This ensures that the external wrench is applied at every simulation step. + """ + # write external wrench + if self._instantaneous_wrench_composer.active or self._permanent_wrench_composer.active: + if self._instantaneous_wrench_composer.active: + # Compose instantaneous wrench with permanent wrench + self._instantaneous_wrench_composer.add_forces_and_torques( + forces=self._permanent_wrench_composer.composed_force, + torques=self._permanent_wrench_composer.composed_torque, + body_ids=self._ALL_OBJ_INDICES_WP, + env_ids=self._ALL_ENV_INDICES_WP, + ) + # Apply both instantaneous and permanent wrench to the simulation + self.root_physx_view.apply_forces_and_torques_at_position( + force_data=self.reshape_data_to_view(self._instantaneous_wrench_composer.composed_force_as_torch), + torque_data=self.reshape_data_to_view(self._instantaneous_wrench_composer.composed_torque_as_torch), + position_data=None, + indices=self._env_obj_ids_to_view_ids(self._ALL_ENV_INDICES, self._ALL_OBJ_INDICES), + is_global=False, + ) + else: + # Apply permanent wrench to the simulation + self.root_physx_view.apply_forces_and_torques_at_position( + force_data=self.reshape_data_to_view(self._permanent_wrench_composer.composed_force_as_torch), + torque_data=self.reshape_data_to_view(self._permanent_wrench_composer.composed_torque_as_torch), + position_data=None, + indices=self._env_obj_ids_to_view_ids(self._ALL_ENV_INDICES, self._ALL_OBJ_INDICES), + is_global=False, + ) + self._instantaneous_wrench_composer.reset() + + def update(self, dt: float): + self._data.update(dt) + + """ + Operations - Finders. + """ + + def find_objects( + self, name_keys: str | Sequence[str], preserve_order: bool = False + ) -> tuple[torch.Tensor, list[str]]: + """Find objects in the collection based on the name keys. + + Please check the :meth:`isaaclab.utils.string_utils.resolve_matching_names` function for more + information on the name matching. + + Args: + name_keys: A regular expression or a list of regular expressions to match the object names. + preserve_order: Whether to preserve the order of the name keys in the output. Defaults to False. + + Returns: + A tuple containing the object indices and names. + """ + obj_ids, obj_names = string_utils.resolve_matching_names(name_keys, self.object_names, preserve_order) + return torch.tensor(obj_ids, device=self.device), obj_names + + """ + Operations - Write to simulation. + """ + + def write_object_state_to_sim( + self, + object_state: torch.Tensor, + env_ids: torch.Tensor | None = None, + object_ids: slice | torch.Tensor | None = None, + ): + """Set the object state over selected environment and object indices into the simulation. + + The object state comprises of the cartesian position, quaternion orientation in (w, x, y, z), and linear + and angular velocity. All the quantities are in the simulation frame. + + Args: + object_state: Object state in simulation frame. Shape is (len(env_ids), len(object_ids), 13). + env_ids: Environment indices. If None, then all indices are used. + object_ids: Object indices. If None, then all indices are used. + """ + self.write_object_link_pose_to_sim(object_state[..., :7], env_ids=env_ids, object_ids=object_ids) + self.write_object_com_velocity_to_sim(object_state[..., 7:], env_ids=env_ids, object_ids=object_ids) + + def write_object_com_state_to_sim( + self, + object_state: torch.Tensor, + env_ids: torch.Tensor | None = None, + object_ids: slice | torch.Tensor | None = None, + ): + """Set the object center of mass state over selected environment indices into the simulation. + + The object state comprises of the cartesian position, quaternion orientation in (w, x, y, z), and linear + and angular velocity. All the quantities are in the simulation frame. + + Args: + object_state: Object state in simulation frame. Shape is (len(env_ids), len(object_ids), 13). + env_ids: Environment indices. If None, then all indices are used. + object_ids: Object indices. If None, then all indices are used. + """ + self.write_object_com_pose_to_sim(object_state[..., :7], env_ids=env_ids, object_ids=object_ids) + self.write_object_com_velocity_to_sim(object_state[..., 7:], env_ids=env_ids, object_ids=object_ids) + + def write_object_link_state_to_sim( + self, + object_state: torch.Tensor, + env_ids: torch.Tensor | None = None, + object_ids: slice | torch.Tensor | None = None, + ): + """Set the object link state over selected environment indices into the simulation. + + The object state comprises of the cartesian position, quaternion orientation in (w, x, y, z), and linear + and angular velocity. All the quantities are in the simulation frame. + + Args: + object_state: Object state in simulation frame. Shape is (len(env_ids), len(object_ids), 13). + env_ids: Environment indices. If None, then all indices are used. + object_ids: Object indices. If None, then all indices are used. + """ + self.write_object_link_pose_to_sim(object_state[..., :7], env_ids=env_ids, object_ids=object_ids) + self.write_object_link_velocity_to_sim(object_state[..., 7:], env_ids=env_ids, object_ids=object_ids) + + def write_object_pose_to_sim( + self, + object_pose: torch.Tensor, + env_ids: torch.Tensor | None = None, + object_ids: slice | torch.Tensor | None = None, + ): + """Set the object pose over selected environment and object indices into the simulation. + + The object pose comprises of the cartesian position and quaternion orientation in (w, x, y, z). + + Args: + object_pose: Object poses in simulation frame. Shape is (len(env_ids), len(object_ids), 7). + env_ids: Environment indices. If None, then all indices are used. + object_ids: Object indices. If None, then all indices are used. + """ + self.write_object_link_pose_to_sim(object_pose, env_ids=env_ids, object_ids=object_ids) + + def write_object_link_pose_to_sim( + self, + object_pose: torch.Tensor, + env_ids: torch.Tensor | None = None, + object_ids: slice | torch.Tensor | None = None, + ): + """Set the object pose over selected environment and object indices into the simulation. + + The object pose comprises of the cartesian position and quaternion orientation in (w, x, y, z). + + Args: + object_pose: Object poses in simulation frame. Shape is (len(env_ids), len(object_ids), 7). + env_ids: Environment indices. If None, then all indices are used. + object_ids: Object indices. If None, then all indices are used. + """ + # resolve all indices + # -- env_ids + if env_ids is None: + env_ids = self._ALL_ENV_INDICES + # -- object_ids + if object_ids is None: + object_ids = self._ALL_OBJ_INDICES + + # note: we need to do this here since tensors are not set into simulation until step. + # set into internal buffers + self._data.object_link_pose_w[env_ids[:, None], object_ids] = object_pose.clone() + # update these buffers only if the user is using them. Otherwise this adds to overhead. + if self._data._object_link_state_w.data is not None: + self._data.object_link_state_w[env_ids[:, None], object_ids, :7] = object_pose.clone() + if self._data._object_state_w.data is not None: + self._data.object_state_w[env_ids[:, None], object_ids, :7] = object_pose.clone() + if self._data._object_com_state_w.data is not None: + # get CoM pose in link frame + com_pos_b = self.data.object_com_pos_b[env_ids[:, None], object_ids] + com_quat_b = self.data.object_com_quat_b[env_ids[:, None], object_ids] + com_pos, com_quat = math_utils.combine_frame_transforms( + object_pose[..., :3], + object_pose[..., 3:7], + com_pos_b, + com_quat_b, + ) + self._data.object_com_state_w[env_ids[:, None], object_ids, :3] = com_pos + self._data.object_com_state_w[env_ids[:, None], object_ids, 3:7] = com_quat + + # convert the quaternion from wxyz to xyzw + poses_xyzw = self._data.object_link_pose_w.clone() + poses_xyzw[..., 3:] = math_utils.convert_quat(poses_xyzw[..., 3:], to="xyzw") + + # set into simulation + view_ids = self._env_obj_ids_to_view_ids(env_ids, object_ids) + self.root_physx_view.set_transforms(self.reshape_data_to_view(poses_xyzw), indices=view_ids) + + def write_object_com_pose_to_sim( + self, + object_pose: torch.Tensor, + env_ids: torch.Tensor | None = None, + object_ids: slice | torch.Tensor | None = None, + ): + """Set the object center of mass pose over selected environment indices into the simulation. + + The object pose comprises of the cartesian position and quaternion orientation in (w, x, y, z). + The orientation is the orientation of the principle axes of inertia. + + Args: + object_pose: Object poses in simulation frame. Shape is (len(env_ids), len(object_ids), 7). + env_ids: Environment indices. If None, then all indices are used. + object_ids: Object indices. If None, then all indices are used. + """ + # resolve all indices + # -- env_ids + if env_ids is None: + env_ids = self._ALL_ENV_INDICES + # -- object_ids + if object_ids is None: + object_ids = self._ALL_OBJ_INDICES + + # set into internal buffers + self._data.object_com_pose_w[env_ids[:, None], object_ids] = object_pose.clone() + # update these buffers only if the user is using them. Otherwise this adds to overhead. + if self._data._object_com_state_w.data is not None: + self._data.object_com_state_w[env_ids[:, None], object_ids, :7] = object_pose.clone() + + # get CoM pose in link frame + com_pos_b = self.data.object_com_pos_b[env_ids[:, None], object_ids] + com_quat_b = self.data.object_com_quat_b[env_ids[:, None], object_ids] + # transform input CoM pose to link frame + object_link_pos, object_link_quat = math_utils.combine_frame_transforms( + object_pose[..., :3], + object_pose[..., 3:7], + math_utils.quat_apply(math_utils.quat_inv(com_quat_b), -com_pos_b), + math_utils.quat_inv(com_quat_b), + ) + + # write transformed pose in link frame to sim + object_link_pose = torch.cat((object_link_pos, object_link_quat), dim=-1) + self.write_object_link_pose_to_sim(object_link_pose, env_ids=env_ids, object_ids=object_ids) + + def write_object_velocity_to_sim( + self, + object_velocity: torch.Tensor, + env_ids: torch.Tensor | None = None, + object_ids: slice | torch.Tensor | None = None, + ): + """Set the object velocity over selected environment and object indices into the simulation. + + Args: + object_velocity: Object velocities in simulation frame. Shape is (len(env_ids), len(object_ids), 6). + env_ids: Environment indices. If None, then all indices are used. + object_ids: Object indices. If None, then all indices are used. + """ + self.write_object_com_velocity_to_sim(object_velocity, env_ids=env_ids, object_ids=object_ids) + + def write_object_com_velocity_to_sim( + self, + object_velocity: torch.Tensor, + env_ids: torch.Tensor | None = None, + object_ids: slice | torch.Tensor | None = None, + ): + """Set the object center of mass velocity over selected environment and object indices into the simulation. + + Args: + object_velocity: Object velocities in simulation frame. Shape is (len(env_ids), len(object_ids), 6). + env_ids: Environment indices. If None, then all indices are used. + object_ids: Object indices. If None, then all indices are used. + """ + # resolve all indices + # -- env_ids + if env_ids is None: + env_ids = self._ALL_ENV_INDICES + # -- object_ids + if object_ids is None: + object_ids = self._ALL_OBJ_INDICES + + # note: we need to do this here since tensors are not set into simulation until step. + # set into internal buffers + self._data.object_com_vel_w[env_ids[:, None], object_ids] = object_velocity.clone() + # update these buffers only if the user is using them. Otherwise this adds to overhead. + if self._data._object_com_state_w.data is not None: + self._data.object_com_state_w[env_ids[:, None], object_ids, 7:] = object_velocity.clone() + if self._data._object_state_w.data is not None: + self._data.object_state_w[env_ids[:, None], object_ids, 7:] = object_velocity.clone() + if self._data._object_link_state_w.data is not None: + self._data.object_link_state_w[env_ids[:, None], object_ids, 7:] = object_velocity.clone() + # make the acceleration zero to prevent reporting old values + self._data.object_com_acc_w[env_ids[:, None], object_ids] = 0.0 + + # set into simulation + view_ids = self._env_obj_ids_to_view_ids(env_ids, object_ids) + self.root_physx_view.set_velocities(self.reshape_data_to_view(self._data.object_com_vel_w), indices=view_ids) + + def write_object_link_velocity_to_sim( + self, + object_velocity: torch.Tensor, + env_ids: torch.Tensor | None = None, + object_ids: slice | torch.Tensor | None = None, + ): + """Set the object link velocity over selected environment indices into the simulation. + + The velocity comprises linear velocity (x, y, z) and angular velocity (x, y, z) in that order. + NOTE: This sets the velocity of the object's frame rather than the objects center of mass. + + Args: + object_velocity: Object velocities in simulation frame. Shape is (len(env_ids), len(object_ids), 6). + env_ids: Environment indices. If None, then all indices are used. + object_ids: Object indices. If None, then all indices are used. + """ + # resolve all indices + # -- env_ids + if env_ids is None: + env_ids = self._ALL_ENV_INDICES + # -- object_ids + if object_ids is None: + object_ids = self._ALL_OBJ_INDICES + + # set into internal buffers + self._data.object_link_vel_w[env_ids[:, None], object_ids] = object_velocity.clone() + # update these buffers only if the user is using them. Otherwise this adds to overhead. + if self._data._object_link_state_w.data is not None: + self._data.object_link_state_w[env_ids[:, None], object_ids, 7:] = object_velocity.clone() + + # get CoM pose in link frame + quat = self.data.object_link_quat_w[env_ids[:, None], object_ids] + com_pos_b = self.data.object_com_pos_b[env_ids[:, None], object_ids] + # transform input velocity to center of mass frame + object_com_velocity = object_velocity.clone() + object_com_velocity[..., :3] += torch.linalg.cross( + object_com_velocity[..., 3:], math_utils.quat_apply(quat, com_pos_b), dim=-1 + ) + + # write center of mass velocity to sim + self.write_object_com_velocity_to_sim(object_com_velocity, env_ids=env_ids, object_ids=object_ids) + + """ + Operations - Setters. + """ + + def set_external_force_and_torque( + self, + forces: torch.Tensor, + torques: torch.Tensor, + positions: torch.Tensor | None = None, + object_ids: slice | torch.Tensor | None = None, + env_ids: torch.Tensor | None = None, + is_global: bool = False, + ): + """Set external force and torque to apply on the objects' bodies in their local frame. + + For many applications, we want to keep the applied external force on rigid bodies constant over a period of + time (for instance, during the policy control). This function allows us to store the external force and torque + into buffers which are then applied to the simulation at every step. + + .. caution:: + If the function is called with empty forces and torques, then this function disables the application + of external wrench to the simulation. + + .. code-block:: python + + # example of disabling external wrench + asset.set_external_force_and_torque(forces=torch.zeros(0, 0, 3), torques=torch.zeros(0, 0, 3)) + + .. note:: + This function does not apply the external wrench to the simulation. It only fills the buffers with + the desired values. To apply the external wrench, call the :meth:`write_data_to_sim` function + right before the simulation step. + + Args: + forces: External forces in bodies' local frame. Shape is (len(env_ids), len(object_ids), 3). + torques: External torques in bodies' local frame. Shape is (len(env_ids), len(object_ids), 3). + positions: External wrench positions in bodies' local frame. Shape is (len(env_ids), len(object_ids), 3). + object_ids: Object indices to apply external wrench to. Defaults to None (all objects). + env_ids: Environment indices to apply external wrench to. Defaults to None (all instances). + is_global: Whether to apply the external wrench in the global frame. Defaults to False. If set to False, + the external wrench is applied in the link frame of the bodies. + """ + logger.warning( + "The function 'set_external_force_and_torque' will be deprecated in a future release. Please" + " use 'permanent_wrench_composer.set_forces_and_torques' instead." + ) + + if forces is None and torques is None: + logger.warning("No forces or torques provided. No permanent external wrench will be applied.") + + # resolve all indices + # -- env_ids + if env_ids is None: + env_ids = self._ALL_ENV_INDICES_WP + elif not isinstance(env_ids, torch.Tensor): + env_ids = wp.array(env_ids, dtype=wp.int32, device=self.device) + else: + env_ids = wp.from_torch(env_ids.to(torch.int32), dtype=wp.int32) + # -- object_ids + if object_ids is None: + object_ids = self._ALL_OBJ_INDICES_WP + elif isinstance(object_ids, slice): + object_ids = wp.from_torch( + torch.arange(self.num_objects, dtype=torch.int32, device=self.device)[object_ids], dtype=wp.int32 + ) + elif not isinstance(object_ids, torch.Tensor): + object_ids = wp.array(object_ids, dtype=wp.int32, device=self.device) + else: + object_ids = wp.from_torch(object_ids.to(torch.int32), dtype=wp.int32) + + # Write to wrench composer + self._permanent_wrench_composer.set_forces_and_torques( + forces=wp.from_torch(forces, dtype=wp.vec3f) if forces is not None else None, + torques=wp.from_torch(torques, dtype=wp.vec3f) if torques is not None else None, + positions=wp.from_torch(positions, dtype=wp.vec3f) if positions is not None else None, + body_ids=object_ids, + env_ids=env_ids, + is_global=is_global, + ) + + """ + Helper functions. + """ + + def reshape_view_to_data(self, data: torch.Tensor) -> torch.Tensor: + """Reshapes and arranges the data coming from the :attr:`root_physx_view` to + (num_instances, num_objects, data_dim). + + Args: + data: The data coming from the :attr:`root_physx_view`. Shape is (num_instances * num_objects, data_dim). + + Returns: + The reshaped data. Shape is (num_instances, num_objects, data_dim). + """ + return torch.einsum("ijk -> jik", data.reshape(self.num_objects, self.num_instances, -1)) + + def reshape_data_to_view(self, data: torch.Tensor) -> torch.Tensor: + """Reshapes and arranges the data to the be consistent with data from the :attr:`root_physx_view`. + + Args: + data: The data to be reshaped. Shape is (num_instances, num_objects, data_dim). + + Returns: + The reshaped data. Shape is (num_instances * num_objects, data_dim). + """ + return torch.einsum("ijk -> jik", data).reshape(self.num_objects * self.num_instances, *data.shape[2:]) + + """ + Internal helper. + """ + + def _initialize_impl(self): + # clear object names list to prevent double counting on re-initialization + self._object_names_list.clear() + # obtain global simulation view + self._physics_sim_view = SimulationManager.get_physics_sim_view() + root_prim_path_exprs = [] + for name, rigid_object_cfg in self.cfg.rigid_objects.items(): + # obtain the first prim in the regex expression (all others are assumed to be a copy of this) + template_prim = sim_utils.find_first_matching_prim(rigid_object_cfg.prim_path) + if template_prim is None: + raise RuntimeError(f"Failed to find prim for expression: '{rigid_object_cfg.prim_path}'.") + template_prim_path = template_prim.GetPath().pathString + + # find rigid root prims + root_prims = sim_utils.get_all_matching_child_prims( + template_prim_path, + predicate=lambda prim: prim.HasAPI(UsdPhysics.RigidBodyAPI), + traverse_instance_prims=False, + ) + if len(root_prims) == 0: + raise RuntimeError( + f"Failed to find a rigid body when resolving '{rigid_object_cfg.prim_path}'." + " Please ensure that the prim has 'USD RigidBodyAPI' applied." + ) + if len(root_prims) > 1: + raise RuntimeError( + f"Failed to find a single rigid body when resolving '{rigid_object_cfg.prim_path}'." + f" Found multiple '{root_prims}' under '{template_prim_path}'." + " Please ensure that there is only one rigid body in the prim path tree." + ) + + # check that no rigid object has an articulation root API, which decreases simulation performance + articulation_prims = sim_utils.get_all_matching_child_prims( + template_prim_path, + predicate=lambda prim: prim.HasAPI(UsdPhysics.ArticulationRootAPI), + traverse_instance_prims=False, + ) + if len(articulation_prims) != 0: + if articulation_prims[0].GetAttribute("physxArticulation:articulationEnabled").Get(): + raise RuntimeError( + f"Found an articulation root when resolving '{rigid_object_cfg.prim_path}' in the rigid object" + f" collection. These are located at: '{articulation_prims}' under '{template_prim_path}'." + " Please disable the articulation root in the USD or from code by setting the parameter" + " 'ArticulationRootPropertiesCfg.articulation_enabled' to False in the spawn configuration." + ) + + # resolve root prim back into regex expression + root_prim_path = root_prims[0].GetPath().pathString + root_prim_path_expr = rigid_object_cfg.prim_path + root_prim_path[len(template_prim_path) :] + root_prim_path_exprs.append(root_prim_path_expr.replace(".*", "*")) + + self._object_names_list.append(name) + + # -- object view + self._root_physx_view = self._physics_sim_view.create_rigid_body_view(root_prim_path_exprs) + + # check if the rigid body was created + if self._root_physx_view._backend is None: + raise RuntimeError("Failed to create rigid body collection. Please check PhysX logs.") + + # log information about the rigid body + logger.info(f"Number of instances: {self.num_instances}") + logger.info(f"Number of distinct objects: {self.num_objects}") + logger.info(f"Object names: {self.object_names}") + + # container for data access + self._data = RigidObjectCollectionData(self.root_physx_view, self.num_objects, self.device) + + # create buffers + self._create_buffers() + # process configuration + self._process_cfg() + # update the rigid body data + self.update(0.0) + + def _create_buffers(self): + """Create buffers for storing data.""" + # constants + self._ALL_ENV_INDICES = torch.arange(self.num_instances, dtype=torch.long, device=self.device) + self._ALL_OBJ_INDICES = torch.arange(self.num_objects, dtype=torch.long, device=self.device) + self._ALL_ENV_INDICES_WP = wp.from_torch(self._ALL_ENV_INDICES.to(torch.int32), dtype=wp.int32) + self._ALL_OBJ_INDICES_WP = wp.from_torch(self._ALL_OBJ_INDICES.to(torch.int32), dtype=wp.int32) + + # external wrench composer + self._instantaneous_wrench_composer = WrenchComposer(self) + self._permanent_wrench_composer = WrenchComposer(self) + + # set information about rigid body into data + self._data.object_names = self.object_names + self._data.default_mass = self.reshape_view_to_data(self.root_physx_view.get_masses().clone()) + self._data.default_inertia = self.reshape_view_to_data(self.root_physx_view.get_inertias().clone()) + + def _process_cfg(self): + """Post processing of configuration parameters.""" + # default state + # -- object state + default_object_states = [] + for rigid_object_cfg in self.cfg.rigid_objects.values(): + default_object_state = ( + tuple(rigid_object_cfg.init_state.pos) + + tuple(rigid_object_cfg.init_state.rot) + + tuple(rigid_object_cfg.init_state.lin_vel) + + tuple(rigid_object_cfg.init_state.ang_vel) + ) + default_object_state = ( + torch.tensor(default_object_state, dtype=torch.float, device=self.device) + .repeat(self.num_instances, 1) + .unsqueeze(1) + ) + default_object_states.append(default_object_state) + # concatenate the default state for each object + default_object_states = torch.cat(default_object_states, dim=1) + self._data.default_object_state = default_object_states + + def _env_obj_ids_to_view_ids( + self, env_ids: torch.Tensor, object_ids: Sequence[int] | slice | torch.Tensor + ) -> torch.Tensor: + """Converts environment and object indices to indices consistent with data from :attr:`root_physx_view`. + + Args: + env_ids: Environment indices. + object_ids: Object indices. + + Returns: + The view indices. + """ + # the order is env_0/object_0, env_0/object_1, env_0/object_..., env_1/object_0, env_1/object_1, ... + # return a flat tensor of indices + if isinstance(object_ids, slice): + object_ids = self._ALL_OBJ_INDICES + elif isinstance(object_ids, Sequence): + object_ids = torch.tensor(object_ids, device=self.device) + return (object_ids.unsqueeze(1) * self.num_instances + env_ids).flatten() + + """ + Internal simulation callbacks. + """ + + def _invalidate_initialize_callback(self, event): + """Invalidates the scene elements.""" + # call parent + super()._invalidate_initialize_callback(event) + # set all existing views to None to invalidate them + self._root_physx_view = None + + def _on_prim_deletion(self, prim_path: str) -> None: + """Invalidates and deletes the callbacks when the prim is deleted. + + Args: + prim_path: The path to the prim that is being deleted. + + Note: + This function is called when the prim is deleted. + """ + if prim_path == "/": + self._clear_callbacks() + return + for prim_path_expr in self._prim_paths: + result = re.match( + pattern="^" + "/".join(prim_path_expr.split("/")[: prim_path.count("/") + 1]) + "$", string=prim_path + ) + if result: + self._clear_callbacks() + return diff --git a/source/isaaclab/isaaclab/assets/surface_gripper/__init__.py b/source/isaaclab_physx/isaaclab_physx/assets/surface_gripper/__init__.py similarity index 84% rename from source/isaaclab/isaaclab/assets/surface_gripper/__init__.py rename to source/isaaclab_physx/isaaclab_physx/assets/surface_gripper/__init__.py index 3786976617c..856946e9ecd 100644 --- a/source/isaaclab/isaaclab/assets/surface_gripper/__init__.py +++ b/source/isaaclab_physx/isaaclab_physx/assets/surface_gripper/__init__.py @@ -7,3 +7,8 @@ from .surface_gripper import SurfaceGripper from .surface_gripper_cfg import SurfaceGripperCfg + +__all__ = [ + "SurfaceGripper", + "SurfaceGripperCfg", +] \ No newline at end of file diff --git a/source/isaaclab/isaaclab/assets/surface_gripper/surface_gripper.py b/source/isaaclab_physx/isaaclab_physx/assets/surface_gripper/surface_gripper.py similarity index 100% rename from source/isaaclab/isaaclab/assets/surface_gripper/surface_gripper.py rename to source/isaaclab_physx/isaaclab_physx/assets/surface_gripper/surface_gripper.py diff --git a/source/isaaclab/isaaclab/assets/surface_gripper/surface_gripper_cfg.py b/source/isaaclab_physx/isaaclab_physx/assets/surface_gripper/surface_gripper_cfg.py similarity index 94% rename from source/isaaclab/isaaclab/assets/surface_gripper/surface_gripper_cfg.py rename to source/isaaclab_physx/isaaclab_physx/assets/surface_gripper/surface_gripper_cfg.py index 6648e183e2f..8b13b80195b 100644 --- a/source/isaaclab/isaaclab/assets/surface_gripper/surface_gripper_cfg.py +++ b/source/isaaclab_physx/isaaclab_physx/assets/surface_gripper/surface_gripper_cfg.py @@ -7,7 +7,7 @@ from isaaclab.utils import configclass -from ..asset_base_cfg import AssetBaseCfg +from isaaclab.assets.asset_base_cfg import AssetBaseCfg from .surface_gripper import SurfaceGripper diff --git a/source/isaaclab_physx/isaaclab_physx/sensors/__init__.py b/source/isaaclab_physx/isaaclab_physx/sensors/__init__.py new file mode 100644 index 00000000000..1e9273803a0 --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/sensors/__init__.py @@ -0,0 +1,47 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Sub-package containing various sensor classes implementations. + +This subpackage contains the sensor classes that are compatible with Isaac Sim. We include both +USD-based and custom sensors: + +* **USD-prim sensors**: Available in Omniverse and require creating a USD prim for them. + For instance, RTX ray tracing camera and lidar sensors. +* **USD-schema sensors**: Available in Omniverse and require creating a USD schema on an existing prim. + For instance, contact sensors and frame transformers. +* **Custom sensors**: Implemented in Python and do not require creating any USD prim or schema. + For instance, warp-based ray-casters. + +Due to the above categorization, the prim paths passed to the sensor's configuration class +are interpreted differently based on the sensor type. The following table summarizes the +interpretation of the prim paths for different sensor types: + ++---------------------+---------------------------+---------------------------------------------------------------+ +| Sensor Type | Example Prim Path | Pre-check | ++=====================+===========================+===============================================================+ +| Camera | /World/robot/base/camera | Leaf is available, and it will spawn a USD camera | ++---------------------+---------------------------+---------------------------------------------------------------+ +| Contact Sensor | /World/robot/feet_* | Leaf is available and checks if the schema exists | ++---------------------+---------------------------+---------------------------------------------------------------+ +| Ray Caster | /World/robot/base | Leaf exists and is a physics body (Articulation / Rigid Body) | ++---------------------+---------------------------+---------------------------------------------------------------+ +| Frame Transformer | /World/robot/base | Leaf exists and is a physics body (Articulation / Rigid Body) | ++---------------------+---------------------------+---------------------------------------------------------------+ +| Imu | /World/robot/base | Leaf exists and is a physics body (Rigid Body) | ++---------------------+---------------------------+---------------------------------------------------------------+ +| Visuo-Tactile Sensor| /World/robot/base | Leaf exists and is a physics body (Rigid Body) | ++---------------------+---------------------------+---------------------------------------------------------------+ + +""" + +from .camera import * # noqa: F401, F403 +from .contact_sensor import * # noqa: F401, F403 +from .frame_transformer import * # noqa: F401 +from .imu import * # noqa: F401, F403 +from .ray_caster import * # noqa: F401, F403 +from .sensor_base import SensorBase # noqa: F401 +from .sensor_base_cfg import SensorBaseCfg # noqa: F401 +from .tacsl_sensor import * # noqa: F401, F403 diff --git a/source/isaaclab_physx/isaaclab_physx/sensors/camera/__init__.py b/source/isaaclab_physx/isaaclab_physx/sensors/camera/__init__.py new file mode 100644 index 00000000000..f2318067b58 --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/sensors/camera/__init__.py @@ -0,0 +1,13 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Sub-module for camera wrapper around USD camera prim.""" + +from .camera import Camera +from .camera_cfg import CameraCfg +from .camera_data import CameraData +from .tiled_camera import TiledCamera +from .tiled_camera_cfg import TiledCameraCfg +from .utils import * # noqa: F401, F403 diff --git a/source/isaaclab_physx/isaaclab_physx/sensors/camera/camera.py b/source/isaaclab_physx/isaaclab_physx/sensors/camera/camera.py new file mode 100644 index 00000000000..b4fa558d713 --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/sensors/camera/camera.py @@ -0,0 +1,722 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from __future__ import annotations + +import json +import logging +import re +from collections.abc import Sequence +from typing import TYPE_CHECKING, Any, Literal + +import numpy as np +import torch +from packaging import version + +import carb +import omni.usd +from pxr import Sdf, UsdGeom + +import isaaclab.sim as sim_utils +import isaaclab.utils.sensors as sensor_utils +from isaaclab.sim.views import XformPrimView +from isaaclab.utils import to_camel_case +from isaaclab.utils.array import convert_to_torch +from isaaclab.utils.math import ( + convert_camera_frame_orientation_convention, + create_rotation_matrix_from_view, + quat_from_matrix, +) +from isaaclab.utils.version import get_isaac_sim_version + +from ..sensor_base import SensorBase +from .camera_data import CameraData + +if TYPE_CHECKING: + from .camera_cfg import CameraCfg + +# import logger +logger = logging.getLogger(__name__) + + +class Camera(SensorBase): + r"""The camera sensor for acquiring visual data. + + This class wraps over the `UsdGeom Camera`_ for providing a consistent API for acquiring visual data. + It ensures that the camera follows the ROS convention for the coordinate system. + + Summarizing from the `replicator extension`_, the following sensor types are supported: + + - ``"rgb"``: A 3-channel rendered color image. + - ``"rgba"``: A 4-channel rendered color image with alpha channel. + - ``"distance_to_camera"``: An image containing the distance to camera optical center. + - ``"distance_to_image_plane"``: An image containing distances of 3D points from camera plane along camera's z-axis. + - ``"depth"``: The same as ``"distance_to_image_plane"``. + - ``"normals"``: An image containing the local surface normal vectors at each pixel. + - ``"motion_vectors"``: An image containing the motion vector data at each pixel. + - ``"semantic_segmentation"``: The semantic segmentation data. + - ``"instance_segmentation_fast"``: The instance segmentation data. + - ``"instance_id_segmentation_fast"``: The instance id segmentation data. + + .. note:: + Currently the following sensor types are not supported in a "view" format: + + - ``"instance_segmentation"``: The instance segmentation data. Please use the fast counterparts instead. + - ``"instance_id_segmentation"``: The instance id segmentation data. Please use the fast counterparts instead. + - ``"bounding_box_2d_tight"``: The tight 2D bounding box data (only contains non-occluded regions). + - ``"bounding_box_2d_tight_fast"``: The tight 2D bounding box data (only contains non-occluded regions). + - ``"bounding_box_2d_loose"``: The loose 2D bounding box data (contains occluded regions). + - ``"bounding_box_2d_loose_fast"``: The loose 2D bounding box data (contains occluded regions). + - ``"bounding_box_3d"``: The 3D view space bounding box data. + - ``"bounding_box_3d_fast"``: The 3D view space bounding box data. + + .. _replicator extension: https://docs.omniverse.nvidia.com/extensions/latest/ext_replicator/annotators_details.html#annotator-output + .. _USDGeom Camera: https://graphics.pixar.com/usd/docs/api/class_usd_geom_camera.html + + """ + + cfg: CameraCfg + """The configuration parameters.""" + + UNSUPPORTED_TYPES: set[str] = { + "instance_id_segmentation", + "instance_segmentation", + "bounding_box_2d_tight", + "bounding_box_2d_loose", + "bounding_box_3d", + "bounding_box_2d_tight_fast", + "bounding_box_2d_loose_fast", + "bounding_box_3d_fast", + } + """The set of sensor types that are not supported by the camera class.""" + + def __init__(self, cfg: CameraCfg): + """Initializes the camera sensor. + + Args: + cfg: The configuration parameters. + + Raises: + RuntimeError: If no camera prim is found at the given path. + ValueError: If the provided data types are not supported by the camera. + """ + # check if sensor path is valid + # note: currently we do not handle environment indices if there is a regex pattern in the leaf + # For example, if the prim path is "/World/Sensor_[1,2]". + sensor_path = cfg.prim_path.split("/")[-1] + sensor_path_is_regex = re.match(r"^[a-zA-Z0-9/_]+$", sensor_path) is None + if sensor_path_is_regex: + raise RuntimeError( + f"Invalid prim path for the camera sensor: {self.cfg.prim_path}." + "\n\tHint: Please ensure that the prim path does not contain any regex patterns in the leaf." + ) + # perform check on supported data types + self._check_supported_data_types(cfg) + # initialize base class + super().__init__(cfg) + + # toggle rendering of rtx sensors as True + # this flag is read by SimulationContext to determine if rtx sensors should be rendered + carb_settings_iface = carb.settings.get_settings() + carb_settings_iface.set_bool("/isaaclab/render/rtx_sensors", True) + + # spawn the asset + if self.cfg.spawn is not None: + # compute the rotation offset + rot = torch.tensor(self.cfg.offset.rot, dtype=torch.float32, device="cpu").unsqueeze(0) + rot_offset = convert_camera_frame_orientation_convention( + rot, origin=self.cfg.offset.convention, target="opengl" + ) + rot_offset = rot_offset.squeeze(0).cpu().numpy() + # ensure vertical aperture is set, otherwise replace with default for squared pixels + if self.cfg.spawn.vertical_aperture is None: + self.cfg.spawn.vertical_aperture = self.cfg.spawn.horizontal_aperture * self.cfg.height / self.cfg.width + # spawn the asset + self.cfg.spawn.func( + self.cfg.prim_path, self.cfg.spawn, translation=self.cfg.offset.pos, orientation=rot_offset + ) + # check that spawn was successful + matching_prims = sim_utils.find_matching_prims(self.cfg.prim_path) + if len(matching_prims) == 0: + raise RuntimeError(f"Could not find prim with path {self.cfg.prim_path}.") + + # UsdGeom Camera prim for the sensor + self._sensor_prims: list[UsdGeom.Camera] = list() + # Create empty variables for storing output data + self._data = CameraData() + + # HACK: We need to disable instancing for semantic_segmentation and instance_segmentation_fast to work + # checks for Isaac Sim v4.5 as this issue exists there + if get_isaac_sim_version() == version.parse("4.5"): + if "semantic_segmentation" in self.cfg.data_types or "instance_segmentation_fast" in self.cfg.data_types: + logger.warning( + "Isaac Sim 4.5 introduced a bug in Camera and TiledCamera when outputting instance and semantic" + " segmentation outputs for instanceable assets. As a workaround, the instanceable flag on assets" + " will be disabled in the current workflow and may lead to longer load times and increased memory" + " usage." + ) + with Sdf.ChangeBlock(): + for prim in self.stage.Traverse(): + prim.SetInstanceable(False) + + def __del__(self): + """Unsubscribes from callbacks and detach from the replicator registry.""" + # unsubscribe callbacks + super().__del__() + # delete from replicator registry + for _, annotators in self._rep_registry.items(): + for annotator, render_product_path in zip(annotators, self._render_product_paths): + annotator.detach([render_product_path]) + annotator = None + + def __str__(self) -> str: + """Returns: A string containing information about the instance.""" + # message for class + return ( + f"Camera @ '{self.cfg.prim_path}': \n" + f"\tdata types : {list(self.data.output.keys())} \n" + f"\tsemantic filter : {self.cfg.semantic_filter}\n" + f"\tcolorize semantic segm. : {self.cfg.colorize_semantic_segmentation}\n" + f"\tcolorize instance segm. : {self.cfg.colorize_instance_segmentation}\n" + f"\tcolorize instance id segm.: {self.cfg.colorize_instance_id_segmentation}\n" + f"\tupdate period (s): {self.cfg.update_period}\n" + f"\tshape : {self.image_shape}\n" + f"\tnumber of sensors : {self._view.count}" + ) + + """ + Properties + """ + + @property + def num_instances(self) -> int: + return self._view.count + + @property + def data(self) -> CameraData: + # update sensors if needed + self._update_outdated_buffers() + # return the data + return self._data + + @property + def frame(self) -> torch.tensor: + """Frame number when the measurement took place.""" + return self._frame + + @property + def render_product_paths(self) -> list[str]: + """The path of the render products for the cameras. + + This can be used via replicator interfaces to attach to writes or external annotator registry. + """ + return self._render_product_paths + + @property + def image_shape(self) -> tuple[int, int]: + """A tuple containing (height, width) of the camera sensor.""" + return (self.cfg.height, self.cfg.width) + + """ + Configuration + """ + + def set_intrinsic_matrices( + self, matrices: torch.Tensor, focal_length: float | None = None, env_ids: Sequence[int] | None = None + ): + """Set parameters of the USD camera from its intrinsic matrix. + + The intrinsic matrix is used to set the following parameters to the USD camera: + + - ``focal_length``: The focal length of the camera. + - ``horizontal_aperture``: The horizontal aperture of the camera. + - ``vertical_aperture``: The vertical aperture of the camera. + - ``horizontal_aperture_offset``: The horizontal offset of the camera. + - ``vertical_aperture_offset``: The vertical offset of the camera. + + .. warning:: + + Due to limitations of Omniverse camera, we need to assume that the camera is a spherical lens, + i.e. has square pixels, and the optical center is centered at the camera eye. If this assumption + is not true in the input intrinsic matrix, then the camera will not set up correctly. + + Args: + matrices: The intrinsic matrices for the camera. Shape is (N, 3, 3). + focal_length: Perspective focal length (in cm) used to calculate pixel size. Defaults to None. If None, + focal_length will be calculated 1 / width. + env_ids: A sensor ids to manipulate. Defaults to None, which means all sensor indices. + """ + # resolve env_ids + if env_ids is None: + env_ids = self._ALL_INDICES + # convert matrices to numpy tensors + if isinstance(matrices, torch.Tensor): + matrices = matrices.cpu().numpy() + else: + matrices = np.asarray(matrices, dtype=float) + # iterate over env_ids + for i, intrinsic_matrix in zip(env_ids, matrices): + height, width = self.image_shape + + params = sensor_utils.convert_camera_intrinsics_to_usd( + intrinsic_matrix=intrinsic_matrix.reshape(-1), height=height, width=width, focal_length=focal_length + ) + + # change data for corresponding camera index + sensor_prim = self._sensor_prims[i] + # set parameters for camera + for param_name, param_value in params.items(): + # convert to camel case (CC) + param_name = to_camel_case(param_name, to="CC") + # get attribute from the class + param_attr = getattr(sensor_prim, f"Get{param_name}Attr") + # set value + # note: We have to do it this way because the camera might be on a different + # layer (default cameras are on session layer), and this is the simplest + # way to set the property on the right layer. + omni.usd.set_prop_val(param_attr(), param_value) + # update the internal buffers + self._update_intrinsic_matrices(env_ids) + + """ + Operations - Set pose. + """ + + def set_world_poses( + self, + positions: torch.Tensor | None = None, + orientations: torch.Tensor | None = None, + env_ids: Sequence[int] | None = None, + convention: Literal["opengl", "ros", "world"] = "ros", + ): + r"""Set the pose of the camera w.r.t. the world frame using specified convention. + + Since different fields use different conventions for camera orientations, the method allows users to + set the camera poses in the specified convention. Possible conventions are: + + - :obj:`"opengl"` - forward axis: -Z - up axis +Y - Offset is applied in the OpenGL (Usd.Camera) convention + - :obj:`"ros"` - forward axis: +Z - up axis -Y - Offset is applied in the ROS convention + - :obj:`"world"` - forward axis: +X - up axis +Z - Offset is applied in the World Frame convention + + See :meth:`isaaclab.sensors.camera.utils.convert_camera_frame_orientation_convention` for more details + on the conventions. + + Args: + positions: The cartesian coordinates (in meters). Shape is (N, 3). + Defaults to None, in which case the camera position in not changed. + orientations: The quaternion orientation in (w, x, y, z). Shape is (N, 4). + Defaults to None, in which case the camera orientation in not changed. + env_ids: A sensor ids to manipulate. Defaults to None, which means all sensor indices. + convention: The convention in which the poses are fed. Defaults to "ros". + + Raises: + RuntimeError: If the camera prim is not set. Need to call :meth:`initialize` method first. + """ + # resolve env_ids + if env_ids is None: + env_ids = self._ALL_INDICES + # convert to backend tensor + if positions is not None: + if isinstance(positions, np.ndarray): + positions = torch.from_numpy(positions).to(device=self._device) + elif not isinstance(positions, torch.Tensor): + positions = torch.tensor(positions, device=self._device) + # convert rotation matrix from input convention to OpenGL + if orientations is not None: + if isinstance(orientations, np.ndarray): + orientations = torch.from_numpy(orientations).to(device=self._device) + elif not isinstance(orientations, torch.Tensor): + orientations = torch.tensor(orientations, device=self._device) + orientations = convert_camera_frame_orientation_convention(orientations, origin=convention, target="opengl") + # set the pose + self._view.set_world_poses(positions, orientations, env_ids) + + def set_world_poses_from_view( + self, eyes: torch.Tensor, targets: torch.Tensor, env_ids: Sequence[int] | None = None + ): + """Set the poses of the camera from the eye position and look-at target position. + + Args: + eyes: The positions of the camera's eye. Shape is (N, 3). + targets: The target locations to look at. Shape is (N, 3). + env_ids: A sensor ids to manipulate. Defaults to None, which means all sensor indices. + + Raises: + RuntimeError: If the camera prim is not set. Need to call :meth:`initialize` method first. + NotImplementedError: If the stage up-axis is not "Y" or "Z". + """ + # resolve env_ids + if env_ids is None: + env_ids = self._ALL_INDICES + # get up axis of current stage + up_axis = UsdGeom.GetStageUpAxis(self.stage) + # set camera poses using the view + orientations = quat_from_matrix(create_rotation_matrix_from_view(eyes, targets, up_axis, device=self._device)) + self._view.set_world_poses(eyes, orientations, env_ids) + + """ + Operations + """ + + def reset(self, env_ids: Sequence[int] | None = None): + if not self._is_initialized: + raise RuntimeError( + "Camera could not be initialized. Please ensure --enable_cameras is used to enable rendering." + ) + # reset the timestamps + super().reset(env_ids) + # resolve None + # note: cannot do smart indexing here since we do a for loop over data. + if env_ids is None: + env_ids = self._ALL_INDICES + # reset the data + # note: this recomputation is useful if one performs events such as randomizations on the camera poses. + self._update_poses(env_ids) + # Reset the frame count + self._frame[env_ids] = 0 + + """ + Implementation. + """ + + def _initialize_impl(self): + """Initializes the sensor handles and internal buffers. + + This function creates handles and registers the provided data types with the replicator registry to + be able to access the data from the sensor. It also initializes the internal buffers to store the data. + + Raises: + RuntimeError: If the number of camera prims in the view does not match the number of environments. + RuntimeError: If replicator was not found. + """ + carb_settings_iface = carb.settings.get_settings() + if not carb_settings_iface.get("/isaaclab/cameras_enabled"): + raise RuntimeError( + "A camera was spawned without the --enable_cameras flag. Please use --enable_cameras to enable" + " rendering." + ) + + import omni.replicator.core as rep + from omni.syntheticdata.scripts.SyntheticData import SyntheticData + + # Initialize parent class + super()._initialize_impl() + # Create a view for the sensor with Fabric enabled for fast pose querie, otherwise position will be staling + self._view = XformPrimView( + self.cfg.prim_path, device=self._device, stage=self.stage, sync_usd_on_fabric_write=True + ) + # Check that sizes are correct + if self._view.count != self._num_envs: + raise RuntimeError( + f"Number of camera prims in the view ({self._view.count}) does not match" + f" the number of environments ({self._num_envs})." + ) + + # Create all env_ids buffer + self._ALL_INDICES = torch.arange(self._view.count, device=self._device, dtype=torch.long) + # Create frame count buffer + self._frame = torch.zeros(self._view.count, device=self._device, dtype=torch.long) + + # Attach the sensor data types to render node + self._render_product_paths: list[str] = list() + self._rep_registry: dict[str, list[rep.annotators.Annotator]] = {name: list() for name in self.cfg.data_types} + + # Convert all encapsulated prims to Camera + for cam_prim in self._view.prims: + # Obtain the prim path + cam_prim_path = cam_prim.GetPath().pathString + # Check if prim is a camera + if not cam_prim.IsA(UsdGeom.Camera): + raise RuntimeError(f"Prim at path '{cam_prim_path}' is not a Camera.") + # Add to list + sensor_prim = UsdGeom.Camera(cam_prim) + self._sensor_prims.append(sensor_prim) + + # Get render product + # From Isaac Sim 2023.1 onwards, render product is a HydraTexture so we need to extract the path + render_prod_path = rep.create.render_product(cam_prim_path, resolution=(self.cfg.width, self.cfg.height)) + if not isinstance(render_prod_path, str): + render_prod_path = render_prod_path.path + self._render_product_paths.append(render_prod_path) + + # Check if semantic types or semantic filter predicate is provided + if isinstance(self.cfg.semantic_filter, list): + semantic_filter_predicate = ":*; ".join(self.cfg.semantic_filter) + ":*" + elif isinstance(self.cfg.semantic_filter, str): + semantic_filter_predicate = self.cfg.semantic_filter + else: + raise ValueError(f"Semantic types must be a list or a string. Received: {self.cfg.semantic_filter}.") + # set the semantic filter predicate + # copied from rep.scripts.writes_default.basic_writer.py + SyntheticData.Get().set_instance_mapping_semantic_filter(semantic_filter_predicate) + + # Iterate over each data type and create annotator + # TODO: This will move out of the loop once Replicator supports multiple render products within a single + # annotator, i.e.: rep_annotator.attach(self._render_product_paths) + for name in self.cfg.data_types: + # note: we are verbose here to make it easier to understand the code. + # if colorize is true, the data is mapped to colors and a uint8 4 channel image is returned. + # if colorize is false, the data is returned as a uint32 image with ids as values. + if name == "semantic_segmentation": + init_params = { + "colorize": self.cfg.colorize_semantic_segmentation, + "mapping": json.dumps(self.cfg.semantic_segmentation_mapping), + } + elif name == "instance_segmentation_fast": + init_params = {"colorize": self.cfg.colorize_instance_segmentation} + elif name == "instance_id_segmentation_fast": + init_params = {"colorize": self.cfg.colorize_instance_id_segmentation} + else: + init_params = None + + # Resolve device name + if "cuda" in self._device: + device_name = self._device.split(":")[0] + else: + device_name = "cpu" + + # Map special cases to their corresponding annotator names + special_cases = {"rgba": "rgb", "depth": "distance_to_image_plane"} + # Get the annotator name, falling back to the original name if not a special case + annotator_name = special_cases.get(name, name) + # Create the annotator node + rep_annotator = rep.AnnotatorRegistry.get_annotator(annotator_name, init_params, device=device_name) + + # attach annotator to render product + rep_annotator.attach(render_prod_path) + # add to registry + self._rep_registry[name].append(rep_annotator) + + # Create internal buffers + self._create_buffers() + self._update_intrinsic_matrices(self._ALL_INDICES) + + def _update_buffers_impl(self, env_ids: Sequence[int]): + # Increment frame count + self._frame[env_ids] += 1 + # -- pose + if self.cfg.update_latest_camera_pose: + self._update_poses(env_ids) + # -- read the data from annotator registry + # check if buffer is called for the first time. If so then, allocate the memory + if len(self._data.output) == 0: + # this is the first time buffer is called + # it allocates memory for all the sensors + self._create_annotator_data() + else: + # iterate over all the data types + for name, annotators in self._rep_registry.items(): + # iterate over all the annotators + for index in env_ids: + # get the output + output = annotators[index].get_data() + # process the output + data, info = self._process_annotator_output(name, output) + # add data to output + self._data.output[name][index] = data + # add info to output + self._data.info[index][name] = info + # NOTE: The `distance_to_camera` annotator returns the distance to the camera optical center. However, + # the replicator depth clipping is applied w.r.t. to the image plane which may result in values + # larger than the clipping range in the output. We apply an additional clipping to ensure values + # are within the clipping range for all the annotators. + if name == "distance_to_camera": + self._data.output[name][self._data.output[name] > self.cfg.spawn.clipping_range[1]] = torch.inf + # apply defined clipping behavior + if ( + name == "distance_to_camera" or name == "distance_to_image_plane" + ) and self.cfg.depth_clipping_behavior != "none": + self._data.output[name][torch.isinf(self._data.output[name])] = ( + 0.0 if self.cfg.depth_clipping_behavior == "zero" else self.cfg.spawn.clipping_range[1] + ) + + """ + Private Helpers + """ + + def _check_supported_data_types(self, cfg: CameraCfg): + """Checks if the data types are supported by the ray-caster camera.""" + # check if there is any intersection in unsupported types + # reason: these use np structured data types which we can't yet convert to torch tensor + common_elements = set(cfg.data_types) & Camera.UNSUPPORTED_TYPES + if common_elements: + # provide alternative fast counterparts + fast_common_elements = [] + for item in common_elements: + if "instance_segmentation" in item or "instance_id_segmentation" in item: + fast_common_elements.append(item + "_fast") + # raise error + raise ValueError( + f"Camera class does not support the following sensor types: {common_elements}." + "\n\tThis is because these sensor types output numpy structured data types which" + "can't be converted to torch tensors easily." + "\n\tHint: If you need to work with these sensor types, we recommend using their fast counterparts." + f"\n\t\tFast counterparts: {fast_common_elements}" + ) + + def _create_buffers(self): + """Create buffers for storing data.""" + # create the data object + # -- pose of the cameras + self._data.pos_w = torch.zeros((self._view.count, 3), device=self._device) + self._data.quat_w_world = torch.zeros((self._view.count, 4), device=self._device) + # -- intrinsic matrix + self._data.intrinsic_matrices = torch.zeros((self._view.count, 3, 3), device=self._device) + self._data.image_shape = self.image_shape + # -- output data + # lazy allocation of data dictionary + # since the size of the output data is not known in advance, we leave it as None + # the memory will be allocated when the buffer() function is called for the first time. + self._data.output = {} + self._data.info = [{name: None for name in self.cfg.data_types} for _ in range(self._view.count)] + + def _update_intrinsic_matrices(self, env_ids: Sequence[int]): + """Compute camera's matrix of intrinsic parameters. + + Also called calibration matrix. This matrix works for linear depth images. We assume square pixels. + + Note: + The calibration matrix projects points in the 3D scene onto an imaginary screen of the camera. + The coordinates of points on the image plane are in the homogeneous representation. + """ + # iterate over all cameras + for i in env_ids: + # Get corresponding sensor prim + sensor_prim = self._sensor_prims[i] + # get camera parameters + # currently rendering does not use aperture offsets or vertical aperture + focal_length = sensor_prim.GetFocalLengthAttr().Get() + horiz_aperture = sensor_prim.GetHorizontalApertureAttr().Get() + + # get viewport parameters + height, width = self.image_shape + # extract intrinsic parameters + f_x = (width * focal_length) / horiz_aperture + f_y = f_x + c_x = width * 0.5 + c_y = height * 0.5 + # create intrinsic matrix for depth linear + self._data.intrinsic_matrices[i, 0, 0] = f_x + self._data.intrinsic_matrices[i, 0, 2] = c_x + self._data.intrinsic_matrices[i, 1, 1] = f_y + self._data.intrinsic_matrices[i, 1, 2] = c_y + self._data.intrinsic_matrices[i, 2, 2] = 1 + + def _update_poses(self, env_ids: Sequence[int]): + """Computes the pose of the camera in the world frame with ROS convention. + + This methods uses the ROS convention to resolve the input pose. In this convention, + we assume that the camera front-axis is +Z-axis and up-axis is -Y-axis. + + Returns: + A tuple of the position (in meters) and quaternion (w, x, y, z). + """ + # check camera prim exists + if len(self._sensor_prims) == 0: + raise RuntimeError("Camera prim is None. Please call 'sim.play()' first.") + + # get the poses from the view + poses, quat = self._view.get_world_poses(env_ids) + self._data.pos_w[env_ids] = poses + self._data.quat_w_world[env_ids] = convert_camera_frame_orientation_convention( + quat, origin="opengl", target="world" + ) + + def _create_annotator_data(self): + """Create the buffers to store the annotator data. + + We create a buffer for each annotator and store the data in a dictionary. Since the data + shape is not known beforehand, we create a list of buffers and concatenate them later. + + This is an expensive operation and should be called only once. + """ + # add data from the annotators + for name, annotators in self._rep_registry.items(): + # create a list to store the data for each annotator + data_all_cameras = list() + # iterate over all the annotators + for index in self._ALL_INDICES: + # get the output + output = annotators[index].get_data() + # process the output + data, info = self._process_annotator_output(name, output) + # append the data + data_all_cameras.append(data) + # store the info + self._data.info[index][name] = info + # concatenate the data along the batch dimension + self._data.output[name] = torch.stack(data_all_cameras, dim=0) + # NOTE: `distance_to_camera` and `distance_to_image_plane` are not both clipped to the maximum defined + # in the clipping range. The clipping is applied only to `distance_to_image_plane` and then both + # outputs are only clipped where the values in `distance_to_image_plane` exceed the threshold. To + # have a unified behavior between all cameras, we clip both outputs to the maximum value defined. + if name == "distance_to_camera": + self._data.output[name][self._data.output[name] > self.cfg.spawn.clipping_range[1]] = torch.inf + # clip the data if needed + if ( + name == "distance_to_camera" or name == "distance_to_image_plane" + ) and self.cfg.depth_clipping_behavior != "none": + self._data.output[name][torch.isinf(self._data.output[name])] = ( + 0.0 if self.cfg.depth_clipping_behavior == "zero" else self.cfg.spawn.clipping_range[1] + ) + + def _process_annotator_output(self, name: str, output: Any) -> tuple[torch.tensor, dict | None]: + """Process the annotator output. + + This function is called after the data has been collected from all the cameras. + """ + # extract info and data from the output + if isinstance(output, dict): + data = output["data"] + info = output["info"] + else: + data = output + info = None + # convert data into torch tensor + data = convert_to_torch(data, device=self.device) + + # process data for different segmentation types + # Note: Replicator returns raw buffers of dtype int32 for segmentation types + # so we need to convert them to uint8 4 channel images for colorized types + height, width = self.image_shape + if name == "semantic_segmentation": + if self.cfg.colorize_semantic_segmentation: + data = data.view(torch.uint8).reshape(height, width, -1) + else: + data = data.view(height, width, 1) + elif name == "instance_segmentation_fast": + if self.cfg.colorize_instance_segmentation: + data = data.view(torch.uint8).reshape(height, width, -1) + else: + data = data.view(height, width, 1) + elif name == "instance_id_segmentation_fast": + if self.cfg.colorize_instance_id_segmentation: + data = data.view(torch.uint8).reshape(height, width, -1) + else: + data = data.view(height, width, 1) + # make sure buffer dimensions are consistent as (H, W, C) + elif name == "distance_to_camera" or name == "distance_to_image_plane" or name == "depth": + data = data.view(height, width, 1) + # we only return the RGB channels from the RGBA output if rgb is required + # normals return (x, y, z) in first 3 channels, 4th channel is unused + elif name == "rgb" or name == "normals": + data = data[..., :3] + # motion vectors return (x, y) in first 2 channels, 3rd and 4th channels are unused + elif name == "motion_vectors": + data = data[..., :2] + + # return the data and info + return data, info + + """ + Internal simulation callbacks. + """ + + def _invalidate_initialize_callback(self, event): + """Invalidates the scene elements.""" + # call parent + super()._invalidate_initialize_callback(event) + # set all existing views to None to invalidate them + self._view = None diff --git a/source/isaaclab_physx/isaaclab_physx/sensors/camera/camera_cfg.py b/source/isaaclab_physx/isaaclab_physx/sensors/camera/camera_cfg.py new file mode 100644 index 00000000000..8fd9f307d18 --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/sensors/camera/camera_cfg.py @@ -0,0 +1,143 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from dataclasses import MISSING +from typing import Literal + +from isaaclab.sim import FisheyeCameraCfg, PinholeCameraCfg +from isaaclab.utils import configclass + +from ..sensor_base_cfg import SensorBaseCfg +from .camera import Camera + + +@configclass +class CameraCfg(SensorBaseCfg): + """Configuration for a camera sensor.""" + + @configclass + class OffsetCfg: + """The offset pose of the sensor's frame from the sensor's parent frame.""" + + pos: tuple[float, float, float] = (0.0, 0.0, 0.0) + """Translation w.r.t. the parent frame. Defaults to (0.0, 0.0, 0.0).""" + + rot: tuple[float, float, float, float] = (1.0, 0.0, 0.0, 0.0) + """Quaternion rotation (w, x, y, z) w.r.t. the parent frame. Defaults to (1.0, 0.0, 0.0, 0.0).""" + + convention: Literal["opengl", "ros", "world"] = "ros" + """The convention in which the frame offset is applied. Defaults to "ros". + + - ``"opengl"`` - forward axis: ``-Z`` - up axis: ``+Y`` - Offset is applied in the OpenGL (Usd.Camera) + convention. + - ``"ros"`` - forward axis: ``+Z`` - up axis: ``-Y`` - Offset is applied in the ROS convention. + - ``"world"`` - forward axis: ``+X`` - up axis: ``+Z`` - Offset is applied in the World Frame convention. + + """ + + class_type: type = Camera + + offset: OffsetCfg = OffsetCfg() + """The offset pose of the sensor's frame from the sensor's parent frame. Defaults to identity. + + Note: + The parent frame is the frame the sensor attaches to. For example, the parent frame of a + camera at path ``/World/envs/env_0/Robot/Camera`` is ``/World/envs/env_0/Robot``. + """ + + spawn: PinholeCameraCfg | FisheyeCameraCfg | None = MISSING + """Spawn configuration for the asset. + + If None, then the prim is not spawned by the asset. Instead, it is assumed that the + asset is already present in the scene. + """ + + depth_clipping_behavior: Literal["max", "zero", "none"] = "none" + """Clipping behavior for the camera for values exceed the maximum value. Defaults to "none". + + - ``"max"``: Values are clipped to the maximum value. + - ``"zero"``: Values are clipped to zero. + - ``"none``: No clipping is applied. Values will be returned as ``inf``. + """ + + data_types: list[str] = ["rgb"] + """List of sensor names/types to enable for the camera. Defaults to ["rgb"]. + + Please refer to the :class:`Camera` class for a list of available data types. + """ + + width: int = MISSING + """Width of the image in pixels.""" + + height: int = MISSING + """Height of the image in pixels.""" + + update_latest_camera_pose: bool = False + """Whether to update the latest camera pose when fetching the camera's data. Defaults to False. + + If True, the latest camera pose is updated in the camera's data which will slow down performance + due to the use of :class:`XformPrimView`. + If False, the pose of the camera during initialization is returned. + """ + + semantic_filter: str | list[str] = "*:*" + """A string or a list specifying a semantic filter predicate. Defaults to ``"*:*"``. + + If a string, it should be a disjunctive normal form of (semantic type, labels). For examples: + + * ``"typeA : labelA & !labelB | labelC , typeB: labelA ; typeC: labelE"``: + All prims with semantic type "typeA" and label "labelA" but not "labelB" or with label "labelC". + Also, all prims with semantic type "typeB" and label "labelA", or with semantic type "typeC" and label "labelE". + * ``"typeA : * ; * : labelA"``: All prims with semantic type "typeA" or with label "labelA" + + If a list of strings, each string should be a semantic type. The segmentation for prims with + semantics of the specified types will be retrieved. For example, if the list is ["class"], only + the segmentation for prims with semantics of type "class" will be retrieved. + + .. seealso:: + + For more information on the semantics filter, see the documentation on `Replicator Semantics Schema Editor`_. + + .. _Replicator Semantics Schema Editor: https://docs.omniverse.nvidia.com/extensions/latest/ext_replicator/semantics_schema_editor.html#semantics-filtering + """ + + colorize_semantic_segmentation: bool = True + """Whether to colorize the semantic segmentation images. Defaults to True. + + If True, semantic segmentation is converted to an image where semantic IDs are mapped to colors + and returned as a ``uint8`` 4-channel array. If False, the output is returned as a ``int32`` array. + """ + + colorize_instance_id_segmentation: bool = True + """Whether to colorize the instance ID segmentation images. Defaults to True. + + If True, instance id segmentation is converted to an image where instance IDs are mapped to colors. + and returned as a ``uint8`` 4-channel array. If False, the output is returned as a ``int32`` array. + """ + + colorize_instance_segmentation: bool = True + """Whether to colorize the instance ID segmentation images. Defaults to True. + + If True, instance segmentation is converted to an image where instance IDs are mapped to colors. + and returned as a ``uint8`` 4-channel array. If False, the output is returned as a ``int32`` array. + """ + + semantic_segmentation_mapping: dict = {} + """Dictionary mapping semantics to specific colours + + Eg. + + .. code-block:: python + + { + "class:cube_1": (255, 36, 66, 255), + "class:cube_2": (255, 184, 48, 255), + "class:cube_3": (55, 255, 139, 255), + "class:table": (255, 237, 218, 255), + "class:ground": (100, 100, 100, 255), + "class:robot": (61, 178, 255, 255), + } + + """ diff --git a/source/isaaclab_physx/isaaclab_physx/sensors/camera/camera_data.py b/source/isaaclab_physx/isaaclab_physx/sensors/camera/camera_data.py new file mode 100644 index 00000000000..ec3288b04e9 --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/sensors/camera/camera_data.py @@ -0,0 +1,92 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from dataclasses import dataclass +from typing import Any + +import torch + +from isaaclab.utils.math import convert_camera_frame_orientation_convention + + +@dataclass +class CameraData: + """Data container for the camera sensor.""" + + ## + # Frame state. + ## + + pos_w: torch.Tensor = None + """Position of the sensor origin in world frame, following ROS convention. + + Shape is (N, 3) where N is the number of sensors. + """ + + quat_w_world: torch.Tensor = None + """Quaternion orientation `(w, x, y, z)` of the sensor origin in world frame, following the world coordinate frame + + .. note:: + World frame convention follows the camera aligned with forward axis +X and up axis +Z. + + Shape is (N, 4) where N is the number of sensors. + """ + + ## + # Camera data + ## + + image_shape: tuple[int, int] = None + """A tuple containing (height, width) of the camera sensor.""" + + intrinsic_matrices: torch.Tensor = None + """The intrinsic matrices for the camera. + + Shape is (N, 3, 3) where N is the number of sensors. + """ + + output: dict[str, torch.Tensor] = None + """The retrieved sensor data with sensor types as key. + + The format of the data is available in the `Replicator Documentation`_. For semantic-based data, + this corresponds to the ``"data"`` key in the output of the sensor. + + .. _Replicator Documentation: https://docs.omniverse.nvidia.com/prod_extensions/prod_extensions/ext_replicator/annotators_details.html#annotator-output + """ + + info: list[dict[str, Any]] = None + """The retrieved sensor info with sensor types as key. + + This contains extra information provided by the sensor such as semantic segmentation label mapping, prim paths. + For semantic-based data, this corresponds to the ``"info"`` key in the output of the sensor. For other sensor + types, the info is empty. + """ + + ## + # Additional Frame orientation conventions + ## + + @property + def quat_w_ros(self) -> torch.Tensor: + """Quaternion orientation `(w, x, y, z)` of the sensor origin in the world frame, following ROS convention. + + .. note:: + ROS convention follows the camera aligned with forward axis +Z and up axis -Y. + + Shape is (N, 4) where N is the number of sensors. + """ + return convert_camera_frame_orientation_convention(self.quat_w_world, origin="world", target="ros") + + @property + def quat_w_opengl(self) -> torch.Tensor: + """Quaternion orientation `(w, x, y, z)` of the sensor origin in the world frame, following + Opengl / USD Camera convention. + + .. note:: + OpenGL convention follows the camera aligned with forward axis -Z and up axis +Y. + + Shape is (N, 4) where N is the number of sensors. + """ + return convert_camera_frame_orientation_convention(self.quat_w_world, origin="world", target="opengl") diff --git a/source/isaaclab_physx/isaaclab_physx/sensors/camera/tiled_camera.py b/source/isaaclab_physx/isaaclab_physx/sensors/camera/tiled_camera.py new file mode 100644 index 00000000000..4b3676158e7 --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/sensors/camera/tiled_camera.py @@ -0,0 +1,425 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from __future__ import annotations + +import json +import math +from collections.abc import Sequence +from typing import TYPE_CHECKING, Any + +import numpy as np +import torch +import warp as wp + +import carb +from pxr import UsdGeom + +from isaaclab.sim.views import XformPrimView +from isaaclab.utils.warp.kernels import reshape_tiled_image + +from ..sensor_base import SensorBase +from .camera import Camera + +if TYPE_CHECKING: + from .tiled_camera_cfg import TiledCameraCfg + + +class TiledCamera(Camera): + r"""The tiled rendering based camera sensor for acquiring the same data as the Camera class. + + This class inherits from the :class:`Camera` class but uses the tiled-rendering API to acquire + the visual data. Tiled-rendering concatenates the rendered images from multiple cameras into a single image. + This allows for rendering multiple cameras in parallel and is useful for rendering large scenes with multiple + cameras efficiently. + + The following sensor types are supported: + + - ``"rgb"``: A 3-channel rendered color image. + - ``"rgba"``: A 4-channel rendered color image with alpha channel. + - ``"distance_to_camera"``: An image containing the distance to camera optical center. + - ``"distance_to_image_plane"``: An image containing distances of 3D points from camera plane along camera's z-axis. + - ``"depth"``: Alias for ``"distance_to_image_plane"``. + - ``"normals"``: An image containing the local surface normal vectors at each pixel. + - ``"motion_vectors"``: An image containing the motion vector data at each pixel. + - ``"semantic_segmentation"``: The semantic segmentation data. + - ``"instance_segmentation_fast"``: The instance segmentation data. + - ``"instance_id_segmentation_fast"``: The instance id segmentation data. + + .. note:: + Currently the following sensor types are not supported in a "view" format: + + - ``"instance_segmentation"``: The instance segmentation data. Please use the fast counterparts instead. + - ``"instance_id_segmentation"``: The instance id segmentation data. Please use the fast counterparts instead. + - ``"bounding_box_2d_tight"``: The tight 2D bounding box data (only contains non-occluded regions). + - ``"bounding_box_2d_tight_fast"``: The tight 2D bounding box data (only contains non-occluded regions). + - ``"bounding_box_2d_loose"``: The loose 2D bounding box data (contains occluded regions). + - ``"bounding_box_2d_loose_fast"``: The loose 2D bounding box data (contains occluded regions). + - ``"bounding_box_3d"``: The 3D view space bounding box data. + - ``"bounding_box_3d_fast"``: The 3D view space bounding box data. + + .. _replicator extension: https://docs.omniverse.nvidia.com/extensions/latest/ext_replicator/annotators_details.html#annotator-output + .. _USDGeom Camera: https://graphics.pixar.com/usd/docs/api/class_usd_geom_camera.html + + .. versionadded:: v1.0.0 + + This feature is available starting from Isaac Sim 4.2. Before this version, the tiled rendering APIs + were not available. + + """ + + cfg: TiledCameraCfg + """The configuration parameters.""" + + def __init__(self, cfg: TiledCameraCfg): + """Initializes the tiled camera sensor. + + Args: + cfg: The configuration parameters. + + Raises: + RuntimeError: If no camera prim is found at the given path. + ValueError: If the provided data types are not supported by the camera. + """ + super().__init__(cfg) + + def __del__(self): + """Unsubscribes from callbacks and detach from the replicator registry.""" + # unsubscribe from callbacks + SensorBase.__del__(self) + # detach from the replicator registry + for annotator in self._annotators.values(): + annotator.detach(self.render_product_paths) + + def __str__(self) -> str: + """Returns: A string containing information about the instance.""" + # message for class + return ( + f"Tiled Camera @ '{self.cfg.prim_path}': \n" + f"\tdata types : {list(self.data.output.keys())} \n" + f"\tsemantic filter : {self.cfg.semantic_filter}\n" + f"\tcolorize semantic segm. : {self.cfg.colorize_semantic_segmentation}\n" + f"\tcolorize instance segm. : {self.cfg.colorize_instance_segmentation}\n" + f"\tcolorize instance id segm.: {self.cfg.colorize_instance_id_segmentation}\n" + f"\tupdate period (s): {self.cfg.update_period}\n" + f"\tshape : {self.image_shape}\n" + f"\tnumber of sensors : {self._view.count}" + ) + + """ + Operations + """ + + def reset(self, env_ids: Sequence[int] | None = None): + if not self._is_initialized: + raise RuntimeError( + "TiledCamera could not be initialized. Please ensure --enable_cameras is used to enable rendering." + ) + # reset the timestamps + SensorBase.reset(self, env_ids) + # resolve None + if env_ids is None: + env_ids = slice(None) + # reset the frame count + self._frame[env_ids] = 0 + + """ + Implementation. + """ + + def _initialize_impl(self): + """Initializes the sensor handles and internal buffers. + + This function creates handles and registers the provided data types with the replicator registry to + be able to access the data from the sensor. It also initializes the internal buffers to store the data. + + Raises: + RuntimeError: If the number of camera prims in the view does not match the number of environments. + RuntimeError: If replicator was not found. + """ + carb_settings_iface = carb.settings.get_settings() + if not carb_settings_iface.get("/isaaclab/cameras_enabled"): + raise RuntimeError( + "A camera was spawned without the --enable_cameras flag. Please use --enable_cameras to enable" + " rendering." + ) + + import omni.replicator.core as rep + + # Initialize parent class + SensorBase._initialize_impl(self) + # Create a view for the sensor + self._view = XformPrimView(self.cfg.prim_path, device=self._device, stage=self.stage) + # Check that sizes are correct + if self._view.count != self._num_envs: + raise RuntimeError( + f"Number of camera prims in the view ({self._view.count}) does not match" + f" the number of environments ({self._num_envs})." + ) + + # Create all env_ids buffer + self._ALL_INDICES = torch.arange(self._view.count, device=self._device, dtype=torch.long) + # Create frame count buffer + self._frame = torch.zeros(self._view.count, device=self._device, dtype=torch.long) + + # Convert all encapsulated prims to Camera + cam_prim_paths = [] + for cam_prim in self._view.prims: + # Get camera prim + cam_prim_path = cam_prim.GetPath().pathString + # Check if prim is a camera + if not cam_prim.IsA(UsdGeom.Camera): + raise RuntimeError(f"Prim at path '{cam_prim_path}' is not a Camera.") + # Add to list + self._sensor_prims.append(UsdGeom.Camera(cam_prim)) + cam_prim_paths.append(cam_prim_path) + + # Create replicator tiled render product + rp = rep.create.render_product_tiled(cameras=cam_prim_paths, tile_resolution=(self.cfg.width, self.cfg.height)) + self._render_product_paths = [rp.path] + + # Define the annotators based on requested data types + self._annotators = dict() + for annotator_type in self.cfg.data_types: + if annotator_type == "rgba" or annotator_type == "rgb": + annotator = rep.AnnotatorRegistry.get_annotator("rgb", device=self.device, do_array_copy=False) + self._annotators["rgba"] = annotator + elif annotator_type == "depth" or annotator_type == "distance_to_image_plane": + # keep depth for backwards compatibility + annotator = rep.AnnotatorRegistry.get_annotator( + "distance_to_image_plane", device=self.device, do_array_copy=False + ) + self._annotators[annotator_type] = annotator + # note: we are verbose here to make it easier to understand the code. + # if colorize is true, the data is mapped to colors and a uint8 4 channel image is returned. + # if colorize is false, the data is returned as a uint32 image with ids as values. + else: + init_params = None + if annotator_type == "semantic_segmentation": + init_params = { + "colorize": self.cfg.colorize_semantic_segmentation, + "mapping": json.dumps(self.cfg.semantic_segmentation_mapping), + } + elif annotator_type == "instance_segmentation_fast": + init_params = {"colorize": self.cfg.colorize_instance_segmentation} + elif annotator_type == "instance_id_segmentation_fast": + init_params = {"colorize": self.cfg.colorize_instance_id_segmentation} + + annotator = rep.AnnotatorRegistry.get_annotator( + annotator_type, init_params, device=self.device, do_array_copy=False + ) + self._annotators[annotator_type] = annotator + + # Attach the annotator to the render product + for annotator in self._annotators.values(): + annotator.attach(self._render_product_paths) + + # Create internal buffers + self._create_buffers() + + def _update_buffers_impl(self, env_ids: Sequence[int]): + # Increment frame count + self._frame[env_ids] += 1 + + # update latest camera pose + if self.cfg.update_latest_camera_pose: + self._update_poses(env_ids) + + # Extract the flattened image buffer + for data_type, annotator in self._annotators.items(): + # check whether returned data is a dict (used for segmentation) + output = annotator.get_data() + if isinstance(output, dict): + tiled_data_buffer = output["data"] + self._data.info[data_type] = output["info"] + else: + tiled_data_buffer = output + + # convert data buffer to warp array + if isinstance(tiled_data_buffer, np.ndarray): + # Let warp infer the dtype from numpy array instead of hardcoding uint8 + # Different annotators return different dtypes: RGB(uint8), depth(float32), segmentation(uint32) + tiled_data_buffer = wp.array(tiled_data_buffer, device=self.device) + else: + tiled_data_buffer = tiled_data_buffer.to(device=self.device) + + # process data for different segmentation types + # Note: Replicator returns raw buffers of dtype uint32 for segmentation types + # so we need to convert them to uint8 4 channel images for colorized types + if ( + (data_type == "semantic_segmentation" and self.cfg.colorize_semantic_segmentation) + or (data_type == "instance_segmentation_fast" and self.cfg.colorize_instance_segmentation) + or (data_type == "instance_id_segmentation_fast" and self.cfg.colorize_instance_id_segmentation) + ): + tiled_data_buffer = wp.array( + ptr=tiled_data_buffer.ptr, shape=(*tiled_data_buffer.shape, 4), dtype=wp.uint8, device=self.device + ) + + # For motion vectors, we only require the first two channels of the tiled buffer + # Note: Not doing this breaks the alignment of the data (check: https://github.com/isaac-sim/IsaacLab/issues/2003) + if data_type == "motion_vectors": + tiled_data_buffer = tiled_data_buffer[:, :, :2].contiguous() + + # For normals, we only require the first three channels of the tiled buffer + # Note: Not doing this breaks the alignment of the data (check: https://github.com/isaac-sim/IsaacLab/issues/4239) + if data_type == "normals": + tiled_data_buffer = tiled_data_buffer[:, :, :3].contiguous() + + wp.launch( + kernel=reshape_tiled_image, + dim=(self._view.count, self.cfg.height, self.cfg.width), + inputs=[ + tiled_data_buffer.flatten(), + wp.from_torch(self._data.output[data_type]), # zero-copy alias + *list(self._data.output[data_type].shape[1:]), # height, width, num_channels + self._tiling_grid_shape()[0], # num_tiles_x + ], + device=self.device, + ) + + # alias rgb as first 3 channels of rgba + if data_type == "rgba" and "rgb" in self.cfg.data_types: + self._data.output["rgb"] = self._data.output["rgba"][..., :3] + + # NOTE: The `distance_to_camera` annotator returns the distance to the camera optical center. However, + # the replicator depth clipping is applied w.r.t. to the image plane which may result in values + # larger than the clipping range in the output. We apply an additional clipping to ensure values + # are within the clipping range for all the annotators. + if data_type == "distance_to_camera": + self._data.output[data_type][self._data.output[data_type] > self.cfg.spawn.clipping_range[1]] = ( + torch.inf + ) + # apply defined clipping behavior + if ( + data_type == "distance_to_camera" or data_type == "distance_to_image_plane" or data_type == "depth" + ) and self.cfg.depth_clipping_behavior != "none": + self._data.output[data_type][torch.isinf(self._data.output[data_type])] = ( + 0.0 if self.cfg.depth_clipping_behavior == "zero" else self.cfg.spawn.clipping_range[1] + ) + + """ + Private Helpers + """ + + def _check_supported_data_types(self, cfg: TiledCameraCfg): + """Checks if the data types are supported by the ray-caster camera.""" + # check if there is any intersection in unsupported types + # reason: these use np structured data types which we can't yet convert to torch tensor + common_elements = set(cfg.data_types) & Camera.UNSUPPORTED_TYPES + if common_elements: + # provide alternative fast counterparts + fast_common_elements = [] + for item in common_elements: + if "instance_segmentation" in item or "instance_id_segmentation" in item: + fast_common_elements.append(item + "_fast") + # raise error + raise ValueError( + f"TiledCamera class does not support the following sensor types: {common_elements}." + "\n\tThis is because these sensor types output numpy structured data types which" + "can't be converted to torch tensors easily." + "\n\tHint: If you need to work with these sensor types, we recommend using their fast counterparts." + f"\n\t\tFast counterparts: {fast_common_elements}" + ) + + def _create_buffers(self): + """Create buffers for storing data.""" + # create the data object + # -- pose of the cameras + self._data.pos_w = torch.zeros((self._view.count, 3), device=self._device) + self._data.quat_w_world = torch.zeros((self._view.count, 4), device=self._device) + self._update_poses(self._ALL_INDICES) + # -- intrinsic matrix + self._data.intrinsic_matrices = torch.zeros((self._view.count, 3, 3), device=self._device) + self._update_intrinsic_matrices(self._ALL_INDICES) + self._data.image_shape = self.image_shape + # -- output data + data_dict = dict() + if "rgba" in self.cfg.data_types or "rgb" in self.cfg.data_types: + data_dict["rgba"] = torch.zeros( + (self._view.count, self.cfg.height, self.cfg.width, 4), device=self.device, dtype=torch.uint8 + ).contiguous() + if "rgb" in self.cfg.data_types: + # RGB is the first 3 channels of RGBA + data_dict["rgb"] = data_dict["rgba"][..., :3] + if "distance_to_image_plane" in self.cfg.data_types: + data_dict["distance_to_image_plane"] = torch.zeros( + (self._view.count, self.cfg.height, self.cfg.width, 1), device=self.device, dtype=torch.float32 + ).contiguous() + if "depth" in self.cfg.data_types: + data_dict["depth"] = torch.zeros( + (self._view.count, self.cfg.height, self.cfg.width, 1), device=self.device, dtype=torch.float32 + ).contiguous() + if "distance_to_camera" in self.cfg.data_types: + data_dict["distance_to_camera"] = torch.zeros( + (self._view.count, self.cfg.height, self.cfg.width, 1), device=self.device, dtype=torch.float32 + ).contiguous() + if "normals" in self.cfg.data_types: + data_dict["normals"] = torch.zeros( + (self._view.count, self.cfg.height, self.cfg.width, 3), device=self.device, dtype=torch.float32 + ).contiguous() + if "motion_vectors" in self.cfg.data_types: + data_dict["motion_vectors"] = torch.zeros( + (self._view.count, self.cfg.height, self.cfg.width, 2), device=self.device, dtype=torch.float32 + ).contiguous() + if "semantic_segmentation" in self.cfg.data_types: + if self.cfg.colorize_semantic_segmentation: + data_dict["semantic_segmentation"] = torch.zeros( + (self._view.count, self.cfg.height, self.cfg.width, 4), device=self.device, dtype=torch.uint8 + ).contiguous() + else: + data_dict["semantic_segmentation"] = torch.zeros( + (self._view.count, self.cfg.height, self.cfg.width, 1), device=self.device, dtype=torch.int32 + ).contiguous() + if "instance_segmentation_fast" in self.cfg.data_types: + if self.cfg.colorize_instance_segmentation: + data_dict["instance_segmentation_fast"] = torch.zeros( + (self._view.count, self.cfg.height, self.cfg.width, 4), device=self.device, dtype=torch.uint8 + ).contiguous() + else: + data_dict["instance_segmentation_fast"] = torch.zeros( + (self._view.count, self.cfg.height, self.cfg.width, 1), device=self.device, dtype=torch.int32 + ).contiguous() + if "instance_id_segmentation_fast" in self.cfg.data_types: + if self.cfg.colorize_instance_id_segmentation: + data_dict["instance_id_segmentation_fast"] = torch.zeros( + (self._view.count, self.cfg.height, self.cfg.width, 4), device=self.device, dtype=torch.uint8 + ).contiguous() + else: + data_dict["instance_id_segmentation_fast"] = torch.zeros( + (self._view.count, self.cfg.height, self.cfg.width, 1), device=self.device, dtype=torch.int32 + ).contiguous() + + self._data.output = data_dict + self._data.info = dict() + + def _tiled_image_shape(self) -> tuple[int, int]: + """Returns a tuple containing the dimension of the tiled image.""" + cols, rows = self._tiling_grid_shape() + return (self.cfg.width * cols, self.cfg.height * rows) + + def _tiling_grid_shape(self) -> tuple[int, int]: + """Returns a tuple containing the tiling grid dimension.""" + cols = math.ceil(math.sqrt(self._view.count)) + rows = math.ceil(self._view.count / cols) + return (cols, rows) + + def _create_annotator_data(self): + # we do not need to create annotator data for the tiled camera sensor + raise RuntimeError("This function should not be called for the tiled camera sensor.") + + def _process_annotator_output(self, name: str, output: Any) -> tuple[torch.tensor, dict | None]: + # we do not need to process annotator output for the tiled camera sensor + raise RuntimeError("This function should not be called for the tiled camera sensor.") + + """ + Internal simulation callbacks. + """ + + def _invalidate_initialize_callback(self, event): + """Invalidates the scene elements.""" + # call parent + super()._invalidate_initialize_callback(event) + # set all existing views to None to invalidate them + self._view = None diff --git a/source/isaaclab_physx/isaaclab_physx/sensors/camera/tiled_camera_cfg.py b/source/isaaclab_physx/isaaclab_physx/sensors/camera/tiled_camera_cfg.py new file mode 100644 index 00000000000..2241a0648fd --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/sensors/camera/tiled_camera_cfg.py @@ -0,0 +1,16 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from isaaclab.utils import configclass + +from .camera_cfg import CameraCfg +from .tiled_camera import TiledCamera + + +@configclass +class TiledCameraCfg(CameraCfg): + """Configuration for a tiled rendering-based camera sensor.""" + + class_type: type = TiledCamera diff --git a/source/isaaclab_physx/isaaclab_physx/sensors/camera/utils.py b/source/isaaclab_physx/isaaclab_physx/sensors/camera/utils.py new file mode 100644 index 00000000000..f9db81551b4 --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/sensors/camera/utils.py @@ -0,0 +1,272 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Helper functions to project between pointcloud and depth images.""" + +# needed to import for allowing type-hinting: torch.device | str | None +from __future__ import annotations + +from collections.abc import Sequence + +import numpy as np +import torch +import warp as wp + +import isaaclab.utils.math as math_utils +from isaaclab.utils.array import TensorData, convert_to_torch + +""" +Depth <-> Pointcloud conversions. +""" + + +def transform_points( + points: TensorData, + position: Sequence[float] | None = None, + orientation: Sequence[float] | None = None, + device: torch.device | str | None = None, +) -> np.ndarray | torch.Tensor: + r"""Transform input points in a given frame to a target frame. + + This function transform points from a source frame to a target frame. The transformation is defined by the + position ``t`` and orientation ``R`` of the target frame in the source frame. + + .. math:: + p_{target} = R_{target} \times p_{source} + t_{target} + + If either the inputs `position` and `orientation` are None, the corresponding transformation is not applied. + + Args: + points: a tensor of shape (p, 3) or (n, p, 3) comprising of 3d points in source frame. + position: The position of source frame in target frame. Defaults to None. + orientation: The orientation (w, x, y, z) of source frame in target frame. + Defaults to None. + device: The device for torch where the computation + should be executed. Defaults to None, i.e. takes the device that matches the depth image. + + Returns: + A tensor of shape (N, 3) comprising of 3D points in target frame. + If the input is a numpy array, the output is a numpy array. Otherwise, it is a torch tensor. + """ + # check if numpy + is_numpy = isinstance(points, np.ndarray) + # decide device + if device is None and is_numpy: + device = torch.device("cpu") + # convert to torch + points = convert_to_torch(points, dtype=torch.float32, device=device) + # update the device with the device of the depth image + # note: this is needed since warp does not provide the device directly + device = points.device + # apply rotation + if orientation is not None: + orientation = convert_to_torch(orientation, dtype=torch.float32, device=device) + # apply translation + if position is not None: + position = convert_to_torch(position, dtype=torch.float32, device=device) + # apply transformation + points = math_utils.transform_points(points, position, orientation) + + # return everything according to input type + if is_numpy: + return points.detach().cpu().numpy() + else: + return points + + +def create_pointcloud_from_depth( + intrinsic_matrix: np.ndarray | torch.Tensor | wp.array, + depth: np.ndarray | torch.Tensor | wp.array, + keep_invalid: bool = False, + position: Sequence[float] | None = None, + orientation: Sequence[float] | None = None, + device: torch.device | str | None = None, +) -> np.ndarray | torch.Tensor: + r"""Creates pointcloud from input depth image and camera intrinsic matrix. + + This function creates a pointcloud from a depth image and camera intrinsic matrix. The pointcloud is + computed using the following equation: + + .. math:: + p_{camera} = K^{-1} \times [u, v, 1]^T \times d + + where :math:`K` is the camera intrinsic matrix, :math:`u` and :math:`v` are the pixel coordinates and + :math:`d` is the depth value at the pixel. + + Additionally, the pointcloud can be transformed from the camera frame to a target frame by providing + the position ``t`` and orientation ``R`` of the camera in the target frame: + + .. math:: + p_{target} = R_{target} \times p_{camera} + t_{target} + + Args: + intrinsic_matrix: A (3, 3) array providing camera's calibration matrix. + depth: An array of shape (H, W) with values encoding the depth measurement. + keep_invalid: Whether to keep invalid points in the cloud or not. Invalid points + correspond to pixels with depth values 0.0 or NaN. Defaults to False. + position: The position of the camera in a target frame. Defaults to None. + orientation: The orientation (w, x, y, z) of the camera in a target frame. Defaults to None. + device: The device for torch where the computation should be executed. + Defaults to None, i.e. takes the device that matches the depth image. + + Returns: + An array/tensor of shape (N, 3) comprising of 3D coordinates of points. + The returned datatype is torch if input depth is of type torch.tensor or wp.array. Otherwise, a np.ndarray + is returned. + """ + # We use PyTorch here for matrix multiplication since it is compiled with Intel MKL while numpy + # by default uses OpenBLAS. With PyTorch (CPU), we could process a depth image of size (480, 640) + # in 0.0051 secs, while with numpy it took 0.0292 secs. + + # convert to numpy matrix + is_numpy = isinstance(depth, np.ndarray) + # decide device + if device is None and is_numpy: + device = torch.device("cpu") + # convert depth to torch tensor + depth = convert_to_torch(depth, dtype=torch.float32, device=device) + # update the device with the device of the depth image + # note: this is needed since warp does not provide the device directly + device = depth.device + # convert inputs to torch tensors + intrinsic_matrix = convert_to_torch(intrinsic_matrix, dtype=torch.float32, device=device) + if position is not None: + position = convert_to_torch(position, dtype=torch.float32, device=device) + if orientation is not None: + orientation = convert_to_torch(orientation, dtype=torch.float32, device=device) + # compute pointcloud + depth_cloud = math_utils.unproject_depth(depth, intrinsic_matrix) + # convert 3D points to world frame + depth_cloud = math_utils.transform_points(depth_cloud, position, orientation) + + # keep only valid entries if flag is set + if not keep_invalid: + pts_idx_to_keep = torch.all(torch.logical_and(~torch.isnan(depth_cloud), ~torch.isinf(depth_cloud)), dim=1) + depth_cloud = depth_cloud[pts_idx_to_keep, ...] + + # return everything according to input type + if is_numpy: + return depth_cloud.detach().cpu().numpy() + else: + return depth_cloud + + +def create_pointcloud_from_rgbd( + intrinsic_matrix: torch.Tensor | np.ndarray | wp.array, + depth: torch.Tensor | np.ndarray | wp.array, + rgb: torch.Tensor | wp.array | np.ndarray | tuple[float, float, float] = None, + normalize_rgb: bool = False, + position: Sequence[float] | None = None, + orientation: Sequence[float] | None = None, + device: torch.device | str | None = None, + num_channels: int = 3, +) -> tuple[torch.Tensor, torch.Tensor] | tuple[np.ndarray, np.ndarray]: + """Creates pointcloud from input depth image and camera transformation matrix. + + This function provides the same functionality as :meth:`create_pointcloud_from_depth` but also allows + to provide the RGB values for each point. + + The ``rgb`` attribute is used to resolve the corresponding point's color: + + - If a ``np.array``/``wp.array``/``torch.tensor`` of shape (H, W, 3), then the corresponding channels + encode the RGB values. + - If a tuple, then the point cloud has a single color specified by the values (r, g, b). + - If None, then default color is white, i.e. (0, 0, 0). + + If the input ``normalize_rgb`` is set to :obj:`True`, then the RGB values are normalized to be in the range [0, 1]. + + Args: + intrinsic_matrix: A (3, 3) array/tensor providing camera's calibration matrix. + depth: An array/tensor of shape (H, W) with values encoding the depth measurement. + rgb: Color for generated point cloud. Defaults to None. + normalize_rgb: Whether to normalize input rgb. Defaults to False. + position: The position of the camera in a target frame. Defaults to None. + orientation: The orientation `(w, x, y, z)` of the camera in a target frame. Defaults to None. + device: The device for torch where the computation should be executed. Defaults to None, in which case + it takes the device that matches the depth image. + num_channels: Number of channels in RGB pointcloud. Defaults to 3. + + Returns: + A tuple of (N, 3) arrays or tensors containing the 3D coordinates of points and their RGB color respectively. + The returned datatype is torch if input depth is of type torch.tensor or wp.array. Otherwise, a np.ndarray + is returned. + + Raises: + ValueError: When rgb image is a numpy array but not of shape (H, W, 3) or (H, W, 4). + """ + # check valid inputs + if rgb is not None and not isinstance(rgb, tuple): + if len(rgb.shape) == 3: + if rgb.shape[2] not in [3, 4]: + raise ValueError(f"Input rgb image of invalid shape: {rgb.shape} != (H, W, 3) or (H, W, 4).") + else: + raise ValueError(f"Input rgb image not three-dimensional. Received shape: {rgb.shape}.") + if num_channels not in [3, 4]: + raise ValueError(f"Invalid number of channels: {num_channels} != 3 or 4.") + + # check if input depth is numpy array + is_numpy = isinstance(depth, np.ndarray) + # decide device + if device is None and is_numpy: + device = torch.device("cpu") + # convert depth to torch tensor + if is_numpy: + depth = torch.from_numpy(depth).to(device=device) + # retrieve XYZ pointcloud + points_xyz = create_pointcloud_from_depth(intrinsic_matrix, depth, True, position, orientation, device=device) + + # get image height and width + im_height, im_width = depth.shape[:2] + # total number of points + num_points = im_height * im_width + # extract color value + if rgb is not None: + if isinstance(rgb, (np.ndarray, torch.Tensor, wp.array)): + # copy numpy array to preserve + rgb = convert_to_torch(rgb, device=device, dtype=torch.float32) + rgb = rgb[:, :, :3] + # convert the matrix to (W, H, 3) from (H, W, 3) since depth processing + # is done in the order (u, v) where u: (0, W-1) and v: (0 - H-1) + points_rgb = rgb.permute(1, 0, 2).reshape(-1, 3) + elif isinstance(rgb, (tuple, list)): + # same color for all points + points_rgb = torch.Tensor((rgb,) * num_points, device=device, dtype=torch.uint8) + else: + # default color is white + points_rgb = torch.Tensor(((0, 0, 0),) * num_points, device=device, dtype=torch.uint8) + else: + points_rgb = torch.Tensor(((0, 0, 0),) * num_points, device=device, dtype=torch.uint8) + # normalize color values + if normalize_rgb: + points_rgb = points_rgb.float() / 255 + + # remove invalid points + pts_idx_to_keep = torch.all(torch.logical_and(~torch.isnan(points_xyz), ~torch.isinf(points_xyz)), dim=1) + points_rgb = points_rgb[pts_idx_to_keep, ...] + points_xyz = points_xyz[pts_idx_to_keep, ...] + + # add additional channels if required + if num_channels == 4: + points_rgb = torch.nn.functional.pad(points_rgb, (0, 1), mode="constant", value=1.0) + + # return everything according to input type + if is_numpy: + return points_xyz.cpu().numpy(), points_rgb.cpu().numpy() + else: + return points_xyz, points_rgb + + +def save_images_to_file(images: torch.Tensor, file_path: str): + """Save images to file. + + Args: + images: A tensor of shape (N, H, W, C) containing the images. + file_path: The path to save the images to. + """ + from torchvision.utils import make_grid, save_image + + save_image( + make_grid(torch.swapaxes(images.unsqueeze(1), 1, -1).squeeze(-1), nrow=round(images.shape[0] ** 0.5)), file_path + ) diff --git a/source/isaaclab_physx/isaaclab_physx/sensors/contact_sensor/__init__.py b/source/isaaclab_physx/isaaclab_physx/sensors/contact_sensor/__init__.py new file mode 100644 index 00000000000..94b402d41a3 --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/sensors/contact_sensor/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Sub-module for rigid contact sensor.""" + +from .contact_sensor import ContactSensor +from .contact_sensor_cfg import ContactSensorCfg +from .contact_sensor_data import ContactSensorData diff --git a/source/isaaclab_physx/isaaclab_physx/sensors/contact_sensor/contact_sensor.py b/source/isaaclab_physx/isaaclab_physx/sensors/contact_sensor/contact_sensor.py new file mode 100644 index 00000000000..2a85a2661f6 --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/sensors/contact_sensor/contact_sensor.py @@ -0,0 +1,539 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +# Ignore optional memory usage warning globally +# pyright: reportOptionalSubscript=false + +from __future__ import annotations + +from collections.abc import Sequence +from typing import TYPE_CHECKING + +import torch + +import carb +import omni.physics.tensors.impl.api as physx +from isaacsim.core.simulation_manager import SimulationManager +from pxr import PhysxSchema + +import isaaclab.sim as sim_utils +import isaaclab.utils.string as string_utils +from isaaclab.markers import VisualizationMarkers +from isaaclab.utils.math import convert_quat + +from ..sensor_base import SensorBase +from .contact_sensor_data import ContactSensorData + +if TYPE_CHECKING: + from .contact_sensor_cfg import ContactSensorCfg + + +class ContactSensor(SensorBase): + """A contact reporting sensor. + + The contact sensor reports the normal contact forces on a rigid body in the world frame. + It relies on the `PhysX ContactReporter`_ API to be activated on the rigid bodies. + + To enable the contact reporter on a rigid body, please make sure to enable the + :attr:`isaaclab.sim.spawner.RigidObjectSpawnerCfg.activate_contact_sensors` on your + asset spawner configuration. This will enable the contact reporter on all the rigid bodies + in the asset. + + The sensor can be configured to report the contact forces on a set of bodies with a given + filter pattern using the :attr:`ContactSensorCfg.filter_prim_paths_expr`. This is useful + when you want to report the contact forces between the sensor bodies and a specific set of + bodies in the scene. The data can be accessed using the :attr:`ContactSensorData.force_matrix_w`. + Please check the documentation on `RigidContact`_ for more details. + + The reporting of the filtered contact forces is only possible as one-to-many. This means that only one + sensor body in an environment can be filtered against multiple bodies in that environment. If you need to + filter multiple sensor bodies against multiple bodies, you need to create separate sensors for each sensor + body. + + As an example, suppose you want to report the contact forces for all the feet of a robot against an object + exclusively. In that case, setting the :attr:`ContactSensorCfg.prim_path` and + :attr:`ContactSensorCfg.filter_prim_paths_expr` with ``{ENV_REGEX_NS}/Robot/.*_FOOT`` and ``{ENV_REGEX_NS}/Object`` + respectively will not work. Instead, you need to create a separate sensor for each foot and filter + it against the object. + + .. _PhysX ContactReporter: https://docs.omniverse.nvidia.com/kit/docs/omni_usd_schema_physics/104.2/class_physx_schema_physx_contact_report_a_p_i.html + .. _RigidContact: https://docs.isaacsim.omniverse.nvidia.com/latest/py/source/extensions/isaacsim.core.api/docs/index.html#isaacsim.core.api.sensors.RigidContactView + """ + + cfg: ContactSensorCfg + """The configuration parameters.""" + + def __init__(self, cfg: ContactSensorCfg): + """Initializes the contact sensor object. + + Args: + cfg: The configuration parameters. + """ + # initialize base class + super().__init__(cfg) + + # Enable contact processing + carb_settings_iface = carb.settings.get_settings() + carb_settings_iface.set_bool("/physics/disableContactProcessing", False) + + # Create empty variables for storing output data + self._data: ContactSensorData = ContactSensorData() + # initialize self._body_physx_view for running in extension mode + self._body_physx_view = None + + def __str__(self) -> str: + """Returns: A string containing information about the instance.""" + return ( + f"Contact sensor @ '{self.cfg.prim_path}': \n" + f"\tview type : {self.body_physx_view.__class__}\n" + f"\tupdate period (s) : {self.cfg.update_period}\n" + f"\tnumber of bodies : {self.num_bodies}\n" + f"\tbody names : {self.body_names}\n" + ) + + """ + Properties + """ + + @property + def num_instances(self) -> int: + return self.body_physx_view.count + + @property + def data(self) -> ContactSensorData: + # update sensors if needed + self._update_outdated_buffers() + # return the data + return self._data + + @property + def num_bodies(self) -> int: + """Number of bodies with contact sensors attached.""" + return self._num_bodies + + @property + def body_names(self) -> list[str]: + """Ordered names of bodies with contact sensors attached.""" + prim_paths = self.body_physx_view.prim_paths[: self.num_bodies] + return [path.split("/")[-1] for path in prim_paths] + + @property + def body_physx_view(self) -> physx.RigidBodyView: + """View for the rigid bodies captured (PhysX). + + Note: + Use this view with caution. It requires handling of tensors in a specific way. + """ + return self._body_physx_view + + @property + def contact_physx_view(self) -> physx.RigidContactView: + """Contact reporter view for the bodies (PhysX). + + Note: + Use this view with caution. It requires handling of tensors in a specific way. + """ + return self._contact_physx_view + + """ + Operations + """ + + def reset(self, env_ids: Sequence[int] | None = None): + # reset the timers and counters + super().reset(env_ids) + # resolve None + if env_ids is None: + env_ids = slice(None) + # reset accumulative data buffers + self._data.net_forces_w[env_ids] = 0.0 + self._data.net_forces_w_history[env_ids] = 0.0 + # reset force matrix + if len(self.cfg.filter_prim_paths_expr) != 0: + self._data.force_matrix_w[env_ids] = 0.0 + self._data.force_matrix_w_history[env_ids] = 0.0 + # reset the current air time + if self.cfg.track_air_time: + self._data.current_air_time[env_ids] = 0.0 + self._data.last_air_time[env_ids] = 0.0 + self._data.current_contact_time[env_ids] = 0.0 + self._data.last_contact_time[env_ids] = 0.0 + # reset contact positions + if self.cfg.track_contact_points: + self._data.contact_pos_w[env_ids, :] = torch.nan + # reset friction forces + if self.cfg.track_friction_forces: + self._data.friction_forces_w[env_ids, :] = 0.0 + + def find_bodies(self, name_keys: str | Sequence[str], preserve_order: bool = False) -> tuple[list[int], list[str]]: + """Find bodies in the articulation based on the name keys. + + Args: + name_keys: A regular expression or a list of regular expressions to match the body names. + preserve_order: Whether to preserve the order of the name keys in the output. Defaults to False. + + Returns: + A tuple of lists containing the body indices and names. + """ + return string_utils.resolve_matching_names(name_keys, self.body_names, preserve_order) + + def compute_first_contact(self, dt: float, abs_tol: float = 1.0e-8) -> torch.Tensor: + """Checks if bodies that have established contact within the last :attr:`dt` seconds. + + This function checks if the bodies have established contact within the last :attr:`dt` seconds + by comparing the current contact time with the given time period. If the contact time is less + than the given time period, then the bodies are considered to be in contact. + + Note: + The function assumes that :attr:`dt` is a factor of the sensor update time-step. In other + words :math:`dt / dt_sensor = n`, where :math:`n` is a natural number. This is always true + if the sensor is updated by the physics or the environment stepping time-step and the sensor + is read by the environment stepping time-step. + + Args: + dt: The time period since the contact was established. + abs_tol: The absolute tolerance for the comparison. + + Returns: + A boolean tensor indicating the bodies that have established contact within the last + :attr:`dt` seconds. Shape is (N, B), where N is the number of sensors and B is the + number of bodies in each sensor. + + Raises: + RuntimeError: If the sensor is not configured to track contact time. + """ + # check if the sensor is configured to track contact time + if not self.cfg.track_air_time: + raise RuntimeError( + "The contact sensor is not configured to track contact time." + "Please enable the 'track_air_time' in the sensor configuration." + ) + # check if the bodies are in contact + currently_in_contact = self.data.current_contact_time > 0.0 + less_than_dt_in_contact = self.data.current_contact_time < (dt + abs_tol) + return currently_in_contact * less_than_dt_in_contact + + def compute_first_air(self, dt: float, abs_tol: float = 1.0e-8) -> torch.Tensor: + """Checks if bodies that have broken contact within the last :attr:`dt` seconds. + + This function checks if the bodies have broken contact within the last :attr:`dt` seconds + by comparing the current air time with the given time period. If the air time is less + than the given time period, then the bodies are considered to not be in contact. + + Note: + It assumes that :attr:`dt` is a factor of the sensor update time-step. In other words, + :math:`dt / dt_sensor = n`, where :math:`n` is a natural number. This is always true if + the sensor is updated by the physics or the environment stepping time-step and the sensor + is read by the environment stepping time-step. + + Args: + dt: The time period since the contract is broken. + abs_tol: The absolute tolerance for the comparison. + + Returns: + A boolean tensor indicating the bodies that have broken contact within the last :attr:`dt` seconds. + Shape is (N, B), where N is the number of sensors and B is the number of bodies in each sensor. + + Raises: + RuntimeError: If the sensor is not configured to track contact time. + """ + # check if the sensor is configured to track contact time + if not self.cfg.track_air_time: + raise RuntimeError( + "The contact sensor is not configured to track contact time." + "Please enable the 'track_air_time' in the sensor configuration." + ) + # check if the sensor is configured to track contact time + currently_detached = self.data.current_air_time > 0.0 + less_than_dt_detached = self.data.current_air_time < (dt + abs_tol) + return currently_detached * less_than_dt_detached + + """ + Implementation. + """ + + def _initialize_impl(self): + super()._initialize_impl() + # obtain global simulation view + self._physics_sim_view = SimulationManager.get_physics_sim_view() + # check that only rigid bodies are selected + leaf_pattern = self.cfg.prim_path.rsplit("/", 1)[-1] + template_prim_path = self._parent_prims[0].GetPath().pathString + body_names = list() + for prim in sim_utils.find_matching_prims(template_prim_path + "/" + leaf_pattern): + # check if prim has contact reporter API + if prim.HasAPI(PhysxSchema.PhysxContactReportAPI): + prim_path = prim.GetPath().pathString + body_names.append(prim_path.rsplit("/", 1)[-1]) + # check that there is at least one body with contact reporter API + if not body_names: + raise RuntimeError( + f"Sensor at path '{self.cfg.prim_path}' could not find any bodies with contact reporter API." + "\nHINT: Make sure to enable 'activate_contact_sensors' in the corresponding asset spawn configuration." + ) + + # construct regex expression for the body names + body_names_regex = r"(" + "|".join(body_names) + r")" + body_names_regex = f"{self.cfg.prim_path.rsplit('/', 1)[0]}/{body_names_regex}" + # convert regex expressions to glob expressions for PhysX + body_names_glob = body_names_regex.replace(".*", "*") + filter_prim_paths_glob = [expr.replace(".*", "*") for expr in self.cfg.filter_prim_paths_expr] + + # create a rigid prim view for the sensor + self._body_physx_view = self._physics_sim_view.create_rigid_body_view(body_names_glob) + self._contact_physx_view = self._physics_sim_view.create_rigid_contact_view( + body_names_glob, + filter_patterns=filter_prim_paths_glob, + max_contact_data_count=self.cfg.max_contact_data_count_per_prim * len(body_names) * self._num_envs, + ) + # resolve the true count of bodies + self._num_bodies = self.body_physx_view.count // self._num_envs + # check that contact reporter succeeded + if self._num_bodies != len(body_names): + raise RuntimeError( + "Failed to initialize contact reporter for specified bodies." + f"\n\tInput prim path : {self.cfg.prim_path}" + f"\n\tResolved prim paths: {body_names_regex}" + ) + + # prepare data buffers + self._data.net_forces_w = torch.zeros(self._num_envs, self._num_bodies, 3, device=self._device) + # optional buffers + # -- history of net forces + if self.cfg.history_length > 0: + self._data.net_forces_w_history = torch.zeros( + self._num_envs, self.cfg.history_length, self._num_bodies, 3, device=self._device + ) + else: + self._data.net_forces_w_history = self._data.net_forces_w.unsqueeze(1) + # -- pose of sensor origins + if self.cfg.track_pose: + self._data.pos_w = torch.zeros(self._num_envs, self._num_bodies, 3, device=self._device) + self._data.quat_w = torch.zeros(self._num_envs, self._num_bodies, 4, device=self._device) + + # check if filter paths are valid + if self.cfg.track_contact_points or self.cfg.track_friction_forces: + if len(self.cfg.filter_prim_paths_expr) == 0: + raise ValueError( + "The 'filter_prim_paths_expr' is empty. Please specify a valid filter pattern to track" + f" {'contact points' if self.cfg.track_contact_points else 'friction forces'}." + ) + if self.cfg.max_contact_data_count_per_prim < 1: + raise ValueError( + f"The 'max_contact_data_count_per_prim' is {self.cfg.max_contact_data_count_per_prim}. " + "Please set it to a value greater than 0 to track" + f" {'contact points' if self.cfg.track_contact_points else 'friction forces'}." + ) + + # -- position of contact points + if self.cfg.track_contact_points: + self._data.contact_pos_w = torch.full( + (self._num_envs, self._num_bodies, self.contact_physx_view.filter_count, 3), + torch.nan, + device=self._device, + ) + # -- friction forces at contact points + if self.cfg.track_friction_forces: + self._data.friction_forces_w = torch.full( + (self._num_envs, self._num_bodies, self.contact_physx_view.filter_count, 3), + 0.0, + device=self._device, + ) + # -- air/contact time between contacts + if self.cfg.track_air_time: + self._data.last_air_time = torch.zeros(self._num_envs, self._num_bodies, device=self._device) + self._data.current_air_time = torch.zeros(self._num_envs, self._num_bodies, device=self._device) + self._data.last_contact_time = torch.zeros(self._num_envs, self._num_bodies, device=self._device) + self._data.current_contact_time = torch.zeros(self._num_envs, self._num_bodies, device=self._device) + # force matrix: (num_envs, num_bodies, num_filter_shapes, 3) + # force matrix history: (num_envs, history_length, num_bodies, num_filter_shapes, 3) + if len(self.cfg.filter_prim_paths_expr) != 0: + num_filters = self.contact_physx_view.filter_count + self._data.force_matrix_w = torch.zeros( + self._num_envs, self._num_bodies, num_filters, 3, device=self._device + ) + if self.cfg.history_length > 0: + self._data.force_matrix_w_history = torch.zeros( + self._num_envs, self.cfg.history_length, self._num_bodies, num_filters, 3, device=self._device + ) + else: + self._data.force_matrix_w_history = self._data.force_matrix_w.unsqueeze(1) + + def _update_buffers_impl(self, env_ids: Sequence[int]): + """Fills the buffers of the sensor data.""" + # default to all sensors + if len(env_ids) == self._num_envs: + env_ids = slice(None) + + # obtain the contact forces + # TODO: We are handling the indexing ourself because of the shape; (N, B) vs expected (N * B). + # This isn't the most efficient way to do this, but it's the easiest to implement. + net_forces_w = self.contact_physx_view.get_net_contact_forces(dt=self._sim_physics_dt) + self._data.net_forces_w[env_ids, :, :] = net_forces_w.view(-1, self._num_bodies, 3)[env_ids] + # update contact force history + if self.cfg.history_length > 0: + self._data.net_forces_w_history[env_ids] = self._data.net_forces_w_history[env_ids].roll(1, dims=1) + self._data.net_forces_w_history[env_ids, 0] = self._data.net_forces_w[env_ids] + + # obtain the contact force matrix + if len(self.cfg.filter_prim_paths_expr) != 0: + # shape of the filtering matrix: (num_envs, num_bodies, num_filter_shapes, 3) + num_filters = self.contact_physx_view.filter_count + # acquire and shape the force matrix + force_matrix_w = self.contact_physx_view.get_contact_force_matrix(dt=self._sim_physics_dt) + force_matrix_w = force_matrix_w.view(-1, self._num_bodies, num_filters, 3) + self._data.force_matrix_w[env_ids] = force_matrix_w[env_ids] + if self.cfg.history_length > 0: + self._data.force_matrix_w_history[env_ids] = self._data.force_matrix_w_history[env_ids].roll(1, dims=1) + self._data.force_matrix_w_history[env_ids, 0] = self._data.force_matrix_w[env_ids] + + # obtain the pose of the sensor origin + if self.cfg.track_pose: + pose = self.body_physx_view.get_transforms().view(-1, self._num_bodies, 7)[env_ids] + pose[..., 3:] = convert_quat(pose[..., 3:], to="wxyz") + self._data.pos_w[env_ids], self._data.quat_w[env_ids] = pose.split([3, 4], dim=-1) + + # obtain contact points + if self.cfg.track_contact_points: + _, buffer_contact_points, _, _, buffer_count, buffer_start_indices = ( + self.contact_physx_view.get_contact_data(dt=self._sim_physics_dt) + ) + self._data.contact_pos_w[env_ids] = self._unpack_contact_buffer_data( + buffer_contact_points, buffer_count, buffer_start_indices + )[env_ids] + + # obtain friction forces + if self.cfg.track_friction_forces: + friction_forces, _, buffer_count, buffer_start_indices = self.contact_physx_view.get_friction_data( + dt=self._sim_physics_dt + ) + self._data.friction_forces_w[env_ids] = self._unpack_contact_buffer_data( + friction_forces, buffer_count, buffer_start_indices, avg=False, default=0.0 + )[env_ids] + + # obtain the air time + if self.cfg.track_air_time: + # -- time elapsed since last update + # since this function is called every frame, we can use the difference to get the elapsed time + elapsed_time = self._timestamp[env_ids] - self._timestamp_last_update[env_ids] + # -- check contact state of bodies + is_contact = torch.norm(self._data.net_forces_w[env_ids, :, :], dim=-1) > self.cfg.force_threshold + is_first_contact = (self._data.current_air_time[env_ids] > 0) * is_contact + is_first_detached = (self._data.current_contact_time[env_ids] > 0) * ~is_contact + # -- update the last contact time if body has just become in contact + self._data.last_air_time[env_ids] = torch.where( + is_first_contact, + self._data.current_air_time[env_ids] + elapsed_time.unsqueeze(-1), + self._data.last_air_time[env_ids], + ) + # -- increment time for bodies that are not in contact + self._data.current_air_time[env_ids] = torch.where( + ~is_contact, self._data.current_air_time[env_ids] + elapsed_time.unsqueeze(-1), 0.0 + ) + # -- update the last contact time if body has just detached + self._data.last_contact_time[env_ids] = torch.where( + is_first_detached, + self._data.current_contact_time[env_ids] + elapsed_time.unsqueeze(-1), + self._data.last_contact_time[env_ids], + ) + # -- increment time for bodies that are in contact + self._data.current_contact_time[env_ids] = torch.where( + is_contact, self._data.current_contact_time[env_ids] + elapsed_time.unsqueeze(-1), 0.0 + ) + + def _unpack_contact_buffer_data( + self, + contact_data: torch.Tensor, + buffer_count: torch.Tensor, + buffer_start_indices: torch.Tensor, + avg: bool = True, + default: float = float("nan"), + ) -> torch.Tensor: + """ + Unpacks and aggregates contact data for each (env, body, filter) group. + + This function vectorizes the following nested loop: + + for i in range(self._num_bodies * self._num_envs): + for j in range(self.contact_physx_view.filter_count): + start_index_ij = buffer_start_indices[i, j] + count_ij = buffer_count[i, j] + self._contact_position_aggregate_buffer[i, j, :] = torch.mean( + contact_data[start_index_ij : (start_index_ij + count_ij), :], dim=0 + ) + + For more details, see the `RigidContactView.get_contact_data() documentation `_. + + Args: + contact_data: Flat tensor of contact data, shape (N_envs * N_bodies, 3). + buffer_count: Number of contact points per (env, body, filter), shape (N_envs * N_bodies, N_filters). + buffer_start_indices: Start indices for each (env, body, filter), shape (N_envs * N_bodies, N_filters). + avg: If True, average the contact data for each group; if False, sum the data. Defaults to True. + default: Default value to use for groups with zero contacts. Defaults to NaN. + + Returns: + Aggregated contact data, shape (N_envs, N_bodies, N_filters, 3). + """ + counts, starts = buffer_count.view(-1), buffer_start_indices.view(-1) + n_rows, total = counts.numel(), int(counts.sum()) + agg = torch.full((n_rows, 3), default, device=self._device, dtype=contact_data.dtype) + if total > 0: + row_ids = torch.repeat_interleave(torch.arange(n_rows, device=self._device), counts) + + block_starts = counts.cumsum(0) - counts + deltas = torch.arange(row_ids.numel(), device=counts.device) - block_starts.repeat_interleave(counts) + flat_idx = starts[row_ids] + deltas + + pts = contact_data.index_select(0, flat_idx) + agg = agg.zero_().index_add_(0, row_ids, pts) + agg = agg / counts.clamp_min(1).unsqueeze(-1) if avg else agg + agg[counts == 0] = default + + return agg.view(self._num_envs * self.num_bodies, -1, 3).view( + self._num_envs, self._num_bodies, self.contact_physx_view.filter_count, 3 + ) + + def _set_debug_vis_impl(self, debug_vis: bool): + # set visibility of markers + # note: parent only deals with callbacks. not their visibility + if debug_vis: + # create markers if necessary for the first time + if not hasattr(self, "contact_visualizer"): + self.contact_visualizer = VisualizationMarkers(self.cfg.visualizer_cfg) + # set their visibility to true + self.contact_visualizer.set_visibility(True) + else: + if hasattr(self, "contact_visualizer"): + self.contact_visualizer.set_visibility(False) + + def _debug_vis_callback(self, event): + # safely return if view becomes invalid + # note: this invalidity happens because of isaac sim view callbacks + if self.body_physx_view is None: + return + # marker indices + # 0: contact, 1: no contact + net_contact_force_w = torch.norm(self._data.net_forces_w, dim=-1) + marker_indices = torch.where(net_contact_force_w > self.cfg.force_threshold, 0, 1) + # check if prim is visualized + if self.cfg.track_pose: + frame_origins: torch.Tensor = self._data.pos_w + else: + pose = self.body_physx_view.get_transforms() + frame_origins = pose.view(-1, self._num_bodies, 7)[:, :, :3] + # visualize + self.contact_visualizer.visualize(frame_origins.view(-1, 3), marker_indices=marker_indices.view(-1)) + + """ + Internal simulation callbacks. + """ + + def _invalidate_initialize_callback(self, event): + """Invalidates the scene elements.""" + # call parent + super()._invalidate_initialize_callback(event) + # set all existing views to None to invalidate them + self._body_physx_view = None + self._contact_physx_view = None diff --git a/source/isaaclab_physx/isaaclab_physx/sensors/contact_sensor/contact_sensor_cfg.py b/source/isaaclab_physx/isaaclab_physx/sensors/contact_sensor/contact_sensor_cfg.py new file mode 100644 index 00000000000..c811a7ca63d --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/sensors/contact_sensor/contact_sensor_cfg.py @@ -0,0 +1,79 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from isaaclab.markers import VisualizationMarkersCfg +from isaaclab.markers.config import CONTACT_SENSOR_MARKER_CFG +from isaaclab.utils import configclass + +from ..sensor_base_cfg import SensorBaseCfg +from .contact_sensor import ContactSensor + + +@configclass +class ContactSensorCfg(SensorBaseCfg): + """Configuration for the contact sensor.""" + + class_type: type = ContactSensor + + track_pose: bool = False + """Whether to track the pose of the sensor's origin. Defaults to False.""" + + track_contact_points: bool = False + """Whether to track the contact point locations. Defaults to False.""" + + track_friction_forces: bool = False + """Whether to track the friction forces at the contact points. Defaults to False.""" + + max_contact_data_count_per_prim: int = 4 + """The maximum number of contacts across all batches of the sensor to keep track of. Default is 4. + + This parameter sets the total maximum counts of the simulation across all bodies and environments. The total number + of contacts allowed is max_contact_data_count_per_prim*num_envs*num_sensor_bodies. + + .. note:: + + If the environment is very contact rich it is suggested to increase this parameter to avoid out of bounds memory + errors and loss of contact data leading to inaccurate measurements. + + """ + + track_air_time: bool = False + """Whether to track the air/contact time of the bodies (time between contacts). Defaults to False.""" + + force_threshold: float = 1.0 + """The threshold on the norm of the contact force that determines whether two bodies are in collision or not. + + This value is only used for tracking the mode duration (the time in contact or in air), + if :attr:`track_air_time` is True. + """ + + filter_prim_paths_expr: list[str] = list() + """The list of primitive paths (or expressions) to filter contacts with. Defaults to an empty list, in which case + no filtering is applied. + + The contact sensor allows reporting contacts between the primitive specified with :attr:`prim_path` and + other primitives in the scene. For instance, in a scene containing a robot, a ground plane and an object, + you can obtain individual contact reports of the base of the robot with the ground plane and the object. + + .. note:: + The expression in the list can contain the environment namespace regex ``{ENV_REGEX_NS}`` which + will be replaced with the environment namespace. + + Example: ``{ENV_REGEX_NS}/Object`` will be replaced with ``/World/envs/env_.*/Object``. + + .. attention:: + The reporting of filtered contacts only works when the sensor primitive :attr:`prim_path` corresponds to a + single primitive in that environment. If the sensor primitive corresponds to multiple primitives, the + filtering will not work as expected. Please check :class:`~isaaclab.sensors.contact_sensor.ContactSensor` + for more details. + If track_contact_points is true, then filter_prim_paths_expr cannot be an empty list! + """ + + visualizer_cfg: VisualizationMarkersCfg = CONTACT_SENSOR_MARKER_CFG.replace(prim_path="/Visuals/ContactSensor") + """The configuration object for the visualization markers. Defaults to CONTACT_SENSOR_MARKER_CFG. + + .. note:: + This attribute is only used when debug visualization is enabled. + """ diff --git a/source/isaaclab_physx/isaaclab_physx/sensors/contact_sensor/contact_sensor_data.py b/source/isaaclab_physx/isaaclab_physx/sensors/contact_sensor/contact_sensor_data.py new file mode 100644 index 00000000000..fd6c15ebe96 --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/sensors/contact_sensor/contact_sensor_data.py @@ -0,0 +1,153 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +# needed to import for allowing type-hinting: torch.Tensor | None +from __future__ import annotations + +from dataclasses import dataclass + +import torch + + +@dataclass +class ContactSensorData: + """Data container for the contact reporting sensor.""" + + pos_w: torch.Tensor | None = None + """Position of the sensor origin in world frame. + + Shape is (N, 3), where N is the number of sensors. + + Note: + If the :attr:`ContactSensorCfg.track_pose` is False, then this quantity is None. + + """ + + contact_pos_w: torch.Tensor | None = None + """Average of the positions of contact points between sensor body and filter prim in world frame. + + Shape is (N, B, M, 3), where N is the number of sensors, B is number of bodies in each sensor + and M is the number of filtered bodies. + + Collision pairs not in contact will result in NaN. + + Note: + + * If the :attr:`ContactSensorCfg.track_contact_points` is False, then this quantity is None. + * If the :attr:`ContactSensorCfg.track_contact_points` is True, a ValueError will be raised if: + + * If the :attr:`ContactSensorCfg.filter_prim_paths_expr` is empty. + * If the :attr:`ContactSensorCfg.max_contact_data_per_prim` is not specified or less than 1. + will not be calculated. + + """ + + friction_forces_w: torch.Tensor | None = None + """Sum of the friction forces between sensor body and filter prim in world frame. + + Shape is (N, B, M, 3), where N is the number of sensors, B is number of bodies in each sensor + and M is the number of filtered bodies. + + Collision pairs not in contact will result in NaN. + + Note: + + * If the :attr:`ContactSensorCfg.track_friction_forces` is False, then this quantity is None. + * If the :attr:`ContactSensorCfg.track_friction_forces` is True, a ValueError will be raised if: + + * The :attr:`ContactSensorCfg.filter_prim_paths_expr` is empty. + * The :attr:`ContactSensorCfg.max_contact_data_per_prim` is not specified or less than 1. + + """ + + quat_w: torch.Tensor | None = None + """Orientation of the sensor origin in quaternion (w, x, y, z) in world frame. + + Shape is (N, 4), where N is the number of sensors. + + Note: + If the :attr:`ContactSensorCfg.track_pose` is False, then this quantity is None. + """ + + net_forces_w: torch.Tensor | None = None + """The net normal contact forces in world frame. + + Shape is (N, B, 3), where N is the number of sensors and B is the number of bodies in each sensor. + + Note: + This quantity is the sum of the normal contact forces acting on the sensor bodies. It must not be confused + with the total contact forces acting on the sensor bodies (which also includes the tangential forces). + """ + + net_forces_w_history: torch.Tensor | None = None + """The net normal contact forces in world frame. + + Shape is (N, T, B, 3), where N is the number of sensors, T is the configured history length + and B is the number of bodies in each sensor. + + In the history dimension, the first index is the most recent and the last index is the oldest. + + Note: + This quantity is the sum of the normal contact forces acting on the sensor bodies. It must not be confused + with the total contact forces acting on the sensor bodies (which also includes the tangential forces). + """ + + force_matrix_w: torch.Tensor | None = None + """The normal contact forces filtered between the sensor bodies and filtered bodies in world frame. + + Shape is (N, B, M, 3), where N is the number of sensors, B is number of bodies in each sensor + and M is the number of filtered bodies. + + Note: + If the :attr:`ContactSensorCfg.filter_prim_paths_expr` is empty, then this quantity is None. + """ + + force_matrix_w_history: torch.Tensor | None = None + """The normal contact forces filtered between the sensor bodies and filtered bodies in world frame. + + Shape is (N, T, B, M, 3), where N is the number of sensors, T is the configured history length, + B is number of bodies in each sensor and M is the number of filtered bodies. + + In the history dimension, the first index is the most recent and the last index is the oldest. + + Note: + If the :attr:`ContactSensorCfg.filter_prim_paths_expr` is empty, then this quantity is None. + """ + + last_air_time: torch.Tensor | None = None + """Time spent (in s) in the air before the last contact. + + Shape is (N, B), where N is the number of sensors and B is the number of bodies in each sensor. + + Note: + If the :attr:`ContactSensorCfg.track_air_time` is False, then this quantity is None. + """ + + current_air_time: torch.Tensor | None = None + """Time spent (in s) in the air since the last detach. + + Shape is (N, B), where N is the number of sensors and B is the number of bodies in each sensor. + + Note: + If the :attr:`ContactSensorCfg.track_air_time` is False, then this quantity is None. + """ + + last_contact_time: torch.Tensor | None = None + """Time spent (in s) in contact before the last detach. + + Shape is (N, B), where N is the number of sensors and B is the number of bodies in each sensor. + + Note: + If the :attr:`ContactSensorCfg.track_air_time` is False, then this quantity is None. + """ + + current_contact_time: torch.Tensor | None = None + """Time spent (in s) in contact since the last contact. + + Shape is (N, B), where N is the number of sensors and B is the number of bodies in each sensor. + + Note: + If the :attr:`ContactSensorCfg.track_air_time` is False, then this quantity is None. + """ diff --git a/source/isaaclab_physx/isaaclab_physx/sensors/frame_transformer/__init__.py b/source/isaaclab_physx/isaaclab_physx/sensors/frame_transformer/__init__.py new file mode 100644 index 00000000000..d5db853e8cc --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/sensors/frame_transformer/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Sub-module for frame transformer sensor.""" + +from .frame_transformer import FrameTransformer +from .frame_transformer_cfg import FrameTransformerCfg, OffsetCfg +from .frame_transformer_data import FrameTransformerData diff --git a/source/isaaclab_physx/isaaclab_physx/sensors/frame_transformer/frame_transformer.py b/source/isaaclab_physx/isaaclab_physx/sensors/frame_transformer/frame_transformer.py new file mode 100644 index 00000000000..ed83392b3aa --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/sensors/frame_transformer/frame_transformer.py @@ -0,0 +1,560 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from __future__ import annotations + +import logging +import re +from collections.abc import Sequence +from typing import TYPE_CHECKING + +import torch + +from isaacsim.core.simulation_manager import SimulationManager +from pxr import UsdPhysics + +import isaaclab.sim as sim_utils +import isaaclab.utils.string as string_utils +from isaaclab.markers import VisualizationMarkers +from isaaclab.utils.math import ( + combine_frame_transforms, + convert_quat, + is_identity_pose, + normalize, + quat_from_angle_axis, + subtract_frame_transforms, +) + +from ..sensor_base import SensorBase +from .frame_transformer_data import FrameTransformerData + +if TYPE_CHECKING: + from .frame_transformer_cfg import FrameTransformerCfg + +# import logger +logger = logging.getLogger(__name__) + + +class FrameTransformer(SensorBase): + """A sensor for reporting frame transforms. + + This class provides an interface for reporting the transform of one or more frames (target frames) + with respect to another frame (source frame). The source frame is specified by the user as a prim path + (:attr:`FrameTransformerCfg.prim_path`) and the target frames are specified by the user as a list of + prim paths (:attr:`FrameTransformerCfg.target_frames`). + + The source frame and target frames are assumed to be rigid bodies. The transform of the target frames + with respect to the source frame is computed by first extracting the transform of the source frame + and target frames from the physics engine and then computing the relative transform between the two. + + Additionally, the user can specify an offset for the source frame and each target frame. This is useful + for specifying the transform of the desired frame with respect to the body's center of mass, for instance. + + A common example of using this sensor is to track the position and orientation of the end effector of a + robotic manipulator. In this case, the source frame would be the body corresponding to the base frame of the + manipulator, and the target frame would be the body corresponding to the end effector. Since the end-effector is + typically a fictitious body, the user may need to specify an offset from the end-effector to the body of the + manipulator. + + """ + + cfg: FrameTransformerCfg + """The configuration parameters.""" + + def __init__(self, cfg: FrameTransformerCfg): + """Initializes the frame transformer object. + + Args: + cfg: The configuration parameters. + """ + # initialize base class + super().__init__(cfg) + # Create empty variables for storing output data + self._data: FrameTransformerData = FrameTransformerData() + + def __str__(self) -> str: + """Returns: A string containing information about the instance.""" + return ( + f"FrameTransformer @ '{self.cfg.prim_path}': \n" + f"\ttracked body frames: {[self._source_frame_body_name] + self._target_frame_body_names} \n" + f"\tnumber of envs: {self._num_envs}\n" + f"\tsource body frame: {self._source_frame_body_name}\n" + f"\ttarget frames (count: {self._target_frame_names}): {len(self._target_frame_names)}\n" + ) + + """ + Properties + """ + + @property + def data(self) -> FrameTransformerData: + # update sensors if needed + self._update_outdated_buffers() + # return the data + return self._data + + @property + def num_bodies(self) -> int: + """Returns the number of target bodies being tracked. + + Note: + This is an alias used for consistency with other sensors. Otherwise, we recommend using + :attr:`len(data.target_frame_names)` to access the number of target frames. + """ + return len(self._target_frame_body_names) + + @property + def body_names(self) -> list[str]: + """Returns the names of the target bodies being tracked. + + Note: + This is an alias used for consistency with other sensors. Otherwise, we recommend using + :attr:`data.target_frame_names` to access the target frame names. + """ + return self._target_frame_body_names + + """ + Operations + """ + + def reset(self, env_ids: Sequence[int] | None = None): + # reset the timers and counters + super().reset(env_ids) + # resolve None + if env_ids is None: + env_ids = ... + + def find_bodies(self, name_keys: str | Sequence[str], preserve_order: bool = False) -> tuple[list[int], list[str]]: + """Find bodies in the articulation based on the name keys. + + Args: + name_keys: A regular expression or a list of regular expressions to match the body names. + preserve_order: Whether to preserve the order of the name keys in the output. Defaults to False. + + Returns: + A tuple of lists containing the body indices and names. + """ + return string_utils.resolve_matching_names(name_keys, self._target_frame_names, preserve_order) + + """ + Implementation. + """ + + def _initialize_impl(self): + super()._initialize_impl() + + # resolve source frame offset + source_frame_offset_pos = torch.tensor(self.cfg.source_frame_offset.pos, device=self.device) + source_frame_offset_quat = torch.tensor(self.cfg.source_frame_offset.rot, device=self.device) + # Only need to perform offsetting of source frame if the position offsets is non-zero and rotation offset is + # not the identity quaternion for efficiency in _update_buffer_impl + self._apply_source_frame_offset = True + # Handle source frame offsets + if is_identity_pose(source_frame_offset_pos, source_frame_offset_quat): + logger.debug(f"No offset application needed for source frame as it is identity: {self.cfg.prim_path}") + self._apply_source_frame_offset = False + else: + logger.debug(f"Applying offset to source frame as it is not identity: {self.cfg.prim_path}") + # Store offsets as tensors (duplicating each env's offsets for ease of multiplication later) + self._source_frame_offset_pos = source_frame_offset_pos.unsqueeze(0).repeat(self._num_envs, 1) + self._source_frame_offset_quat = source_frame_offset_quat.unsqueeze(0).repeat(self._num_envs, 1) + + # Keep track of mapping from the rigid body name to the desired frames and prim path, + # as there may be multiple frames based upon the same body name and we don't want to + # create unnecessary views. + body_names_to_frames: dict[str, dict[str, set[str] | str]] = {} + # The offsets associated with each target frame + target_offsets: dict[str, dict[str, torch.Tensor]] = {} + # The frames whose offsets are not identity + non_identity_offset_frames: list[str] = [] + + # Only need to perform offsetting of target frame if any of the position offsets are non-zero or any of the + # rotation offsets are not the identity quaternion for efficiency in _update_buffer_impl + self._apply_target_frame_offset = False + + # Need to keep track of whether the source frame is also a target frame + self._source_is_also_target_frame = False + + # Collect all target frames, their associated body prim paths and their offsets so that we can extract + # the prim, check that it has the appropriate rigid body API in a single loop. + # First element is None because user can't specify source frame name + frames = [None] + [target_frame.name for target_frame in self.cfg.target_frames] + frame_prim_paths = [self.cfg.prim_path] + [target_frame.prim_path for target_frame in self.cfg.target_frames] + # First element is None because source frame offset is handled separately + frame_offsets = [None] + [target_frame.offset for target_frame in self.cfg.target_frames] + frame_types = ["source"] + ["target"] * len(self.cfg.target_frames) + for frame, prim_path, offset, frame_type in zip(frames, frame_prim_paths, frame_offsets, frame_types): + # Find correct prim + matching_prims = sim_utils.find_matching_prims(prim_path) + if len(matching_prims) == 0: + raise ValueError( + f"Failed to create frame transformer for frame '{frame}' with path '{prim_path}'." + " No matching prims were found." + ) + for prim in matching_prims: + # Get the prim path of the matching prim + matching_prim_path = prim.GetPath().pathString + # Check if it is a rigid prim + if not prim.HasAPI(UsdPhysics.RigidBodyAPI): + raise ValueError( + f"While resolving expression '{prim_path}' found a prim '{matching_prim_path}' which is not a" + " rigid body. The class only supports transformations between rigid bodies." + ) + + # Get the name of the body: use relative prim path for unique identification + body_name = self._get_relative_body_path(matching_prim_path) + # Use leaf name of prim path if frame name isn't specified by user + frame_name = frame if frame is not None else matching_prim_path.rsplit("/", 1)[-1] + + # Keep track of which frames are associated with which bodies + if body_name in body_names_to_frames: + body_names_to_frames[body_name]["frames"].add(frame_name) + + # This is a corner case where the source frame is also a target frame + if body_names_to_frames[body_name]["type"] == "source" and frame_type == "target": + self._source_is_also_target_frame = True + + else: + # Store the first matching prim path and the type of frame + body_names_to_frames[body_name] = { + "frames": {frame_name}, + "prim_path": matching_prim_path, + "type": frame_type, + } + + if offset is not None: + offset_pos = torch.tensor(offset.pos, device=self.device) + offset_quat = torch.tensor(offset.rot, device=self.device) + # Check if we need to apply offsets (optimized code path in _update_buffer_impl) + if not is_identity_pose(offset_pos, offset_quat): + non_identity_offset_frames.append(frame_name) + self._apply_target_frame_offset = True + target_offsets[frame_name] = {"pos": offset_pos, "quat": offset_quat} + + if not self._apply_target_frame_offset: + logger.info( + f"No offsets application needed from '{self.cfg.prim_path}' to target frames as all" + f" are identity: {frames[1:]}" + ) + else: + logger.info( + f"Offsets application needed from '{self.cfg.prim_path}' to the following target frames:" + f" {non_identity_offset_frames}" + ) + + # The names of bodies that RigidPrim will be tracking to later extract transforms from + tracked_prim_paths = [body_names_to_frames[body_name]["prim_path"] for body_name in body_names_to_frames.keys()] + tracked_body_names = [body_name for body_name in body_names_to_frames.keys()] + + body_names_regex = [tracked_prim_path.replace("env_0", "env_*") for tracked_prim_path in tracked_prim_paths] + + # obtain global simulation view + self._physics_sim_view = SimulationManager.get_physics_sim_view() + # Create a prim view for all frames and initialize it + # order of transforms coming out of view will be source frame followed by target frame(s) + self._frame_physx_view = self._physics_sim_view.create_rigid_body_view(body_names_regex) + + # Determine the order in which regex evaluated body names so we can later index into frame transforms + # by frame name correctly + all_prim_paths = self._frame_physx_view.prim_paths + + if "env_" in all_prim_paths[0]: + + def extract_env_num_and_prim_path(item: str) -> tuple[int, str]: + """Separates the environment number and prim_path from the item. + + Args: + item: The item to extract the environment number from. Assumes item is of the form + `/World/envs/env_1/blah` or `/World/envs/env_11/blah`. + Returns: + The environment number and the prim_path. + """ + match = re.search(r"env_(\d+)(.*)", item) + return (int(match.group(1)), match.group(2)) + + # Find the indices that would reorganize output to be per environment. + # We want `env_1/blah` to come before `env_11/blah` and env_1/Robot/base + # to come before env_1/Robot/foot so we need to use custom key function + self._per_env_indices = [ + index + for index, _ in sorted( + list(enumerate(all_prim_paths)), key=lambda x: extract_env_num_and_prim_path(x[1]) + ) + ] + + # Only need 0th env as the names and their ordering are the same across environments + sorted_prim_paths = [ + all_prim_paths[index] for index in self._per_env_indices if "env_0" in all_prim_paths[index] + ] + + else: + # If no environment is present, then the order of the body names is the same as the order of the + # prim paths sorted alphabetically + self._per_env_indices = [index for index, _ in sorted(enumerate(all_prim_paths), key=lambda x: x[1])] + sorted_prim_paths = [all_prim_paths[index] for index in self._per_env_indices] + + # -- target frames: use relative prim path for unique identification + self._target_frame_body_names = [self._get_relative_body_path(prim_path) for prim_path in sorted_prim_paths] + + # -- source frame: use relative prim path for unique identification + self._source_frame_body_name = self._get_relative_body_path(self.cfg.prim_path) + source_frame_index = self._target_frame_body_names.index(self._source_frame_body_name) + + # Only remove source frame from tracked bodies if it is not also a target frame + if not self._source_is_also_target_frame: + self._target_frame_body_names.remove(self._source_frame_body_name) + + # Determine indices into all tracked body frames for both source and target frames + all_ids = torch.arange(self._num_envs * len(tracked_body_names)) + self._source_frame_body_ids = torch.arange(self._num_envs) * len(tracked_body_names) + source_frame_index + + # If source frame is also a target frame, then the target frame body ids are the same as + # the source frame body ids + if self._source_is_also_target_frame: + self._target_frame_body_ids = all_ids + else: + self._target_frame_body_ids = all_ids[~torch.isin(all_ids, self._source_frame_body_ids)] + + # The name of each of the target frame(s) - either user specified or defaulted to the body name + self._target_frame_names: list[str] = [] + # The position and rotation components of target frame offsets + target_frame_offset_pos = [] + target_frame_offset_quat = [] + # Stores the indices of bodies that need to be duplicated. For instance, if body "LF_SHANK" is needed + # for 2 frames, this list enables us to duplicate the body to both frames when doing the calculations + # when updating sensor in _update_buffers_impl + duplicate_frame_indices = [] + + # Go through each body name and determine the number of duplicates we need for that frame + # and extract the offsets. This is all done to handle the case where multiple frames + # reference the same body, but have different names and/or offsets + for i, body_name in enumerate(self._target_frame_body_names): + for frame in body_names_to_frames[body_name]["frames"]: + # Only need to handle target frames here as source frame is handled separately + if frame in target_offsets: + target_frame_offset_pos.append(target_offsets[frame]["pos"]) + target_frame_offset_quat.append(target_offsets[frame]["quat"]) + self._target_frame_names.append(frame) + duplicate_frame_indices.append(i) + + # To handle multiple environments, need to expand so [0, 1, 1, 2] with 2 environments becomes + # [0, 1, 1, 2, 3, 4, 4, 5]. Again, this is a optimization to make _update_buffer_impl more efficient + duplicate_frame_indices = torch.tensor(duplicate_frame_indices, device=self.device) + if self._source_is_also_target_frame: + num_target_body_frames = len(tracked_body_names) + else: + num_target_body_frames = len(tracked_body_names) - 1 + + self._duplicate_frame_indices = torch.cat( + [duplicate_frame_indices + num_target_body_frames * env_num for env_num in range(self._num_envs)] + ) + + # Target frame offsets are only applied if at least one of the offsets are non-identity + if self._apply_target_frame_offset: + # Stack up all the frame offsets for shape (num_envs, num_frames, 3) and (num_envs, num_frames, 4) + self._target_frame_offset_pos = torch.stack(target_frame_offset_pos).repeat(self._num_envs, 1) + self._target_frame_offset_quat = torch.stack(target_frame_offset_quat).repeat(self._num_envs, 1) + + # fill the data buffer + self._data.target_frame_names = self._target_frame_names + self._data.source_pos_w = torch.zeros(self._num_envs, 3, device=self._device) + self._data.source_quat_w = torch.zeros(self._num_envs, 4, device=self._device) + self._data.target_pos_w = torch.zeros(self._num_envs, len(duplicate_frame_indices), 3, device=self._device) + self._data.target_quat_w = torch.zeros(self._num_envs, len(duplicate_frame_indices), 4, device=self._device) + self._data.target_pos_source = torch.zeros_like(self._data.target_pos_w) + self._data.target_quat_source = torch.zeros_like(self._data.target_quat_w) + + def _update_buffers_impl(self, env_ids: Sequence[int]): + """Fills the buffers of the sensor data.""" + # default to all sensors + if len(env_ids) == self._num_envs: + env_ids = ... + + # Extract transforms from view - shape is: + # (the total number of source and target body frames being tracked * self._num_envs, 7) + transforms = self._frame_physx_view.get_transforms() + + # Reorder the transforms to be per environment as is expected of SensorData + transforms = transforms[self._per_env_indices] + + # Convert quaternions as PhysX uses xyzw form + transforms[:, 3:] = convert_quat(transforms[:, 3:], to="wxyz") + + # Process source frame transform + source_frames = transforms[self._source_frame_body_ids] + # Only apply offset if the offsets will result in a coordinate frame transform + if self._apply_source_frame_offset: + source_pos_w, source_quat_w = combine_frame_transforms( + source_frames[:, :3], + source_frames[:, 3:], + self._source_frame_offset_pos, + self._source_frame_offset_quat, + ) + else: + source_pos_w = source_frames[:, :3] + source_quat_w = source_frames[:, 3:] + + # Process target frame transforms + target_frames = transforms[self._target_frame_body_ids] + duplicated_target_frame_pos_w = target_frames[self._duplicate_frame_indices, :3] + duplicated_target_frame_quat_w = target_frames[self._duplicate_frame_indices, 3:] + + # Only apply offset if the offsets will result in a coordinate frame transform + if self._apply_target_frame_offset: + target_pos_w, target_quat_w = combine_frame_transforms( + duplicated_target_frame_pos_w, + duplicated_target_frame_quat_w, + self._target_frame_offset_pos, + self._target_frame_offset_quat, + ) + else: + target_pos_w = duplicated_target_frame_pos_w + target_quat_w = duplicated_target_frame_quat_w + + # Compute the transform of the target frame with respect to the source frame + total_num_frames = len(self._target_frame_names) + target_pos_source, target_quat_source = subtract_frame_transforms( + source_pos_w.unsqueeze(1).expand(-1, total_num_frames, -1).reshape(-1, 3), + source_quat_w.unsqueeze(1).expand(-1, total_num_frames, -1).reshape(-1, 4), + target_pos_w, + target_quat_w, + ) + + # Update buffers + # note: The frame names / ordering don't change so no need to update them after initialization + self._data.source_pos_w[:] = source_pos_w.view(-1, 3) + self._data.source_quat_w[:] = source_quat_w.view(-1, 4) + self._data.target_pos_w[:] = target_pos_w.view(-1, total_num_frames, 3) + self._data.target_quat_w[:] = target_quat_w.view(-1, total_num_frames, 4) + self._data.target_pos_source[:] = target_pos_source.view(-1, total_num_frames, 3) + self._data.target_quat_source[:] = target_quat_source.view(-1, total_num_frames, 4) + + def _set_debug_vis_impl(self, debug_vis: bool): + # set visibility of markers + # note: parent only deals with callbacks. not their visibility + if debug_vis: + if not hasattr(self, "frame_visualizer"): + self.frame_visualizer = VisualizationMarkers(self.cfg.visualizer_cfg) + + # set their visibility to true + self.frame_visualizer.set_visibility(True) + else: + if hasattr(self, "frame_visualizer"): + self.frame_visualizer.set_visibility(False) + + def _debug_vis_callback(self, event): + # Get the all frames pose + frames_pos = torch.cat([self._data.source_pos_w, self._data.target_pos_w.view(-1, 3)], dim=0) + frames_quat = torch.cat([self._data.source_quat_w, self._data.target_quat_w.view(-1, 4)], dim=0) + + # Get the all connecting lines between frames pose + lines_pos, lines_quat, lines_length = self._get_connecting_lines( + start_pos=self._data.source_pos_w.repeat_interleave(self._data.target_pos_w.size(1), dim=0), + end_pos=self._data.target_pos_w.view(-1, 3), + ) + + # Initialize default (identity) scales and marker indices for all markers (frames + lines) + marker_scales = torch.ones(frames_pos.size(0) + lines_pos.size(0), 3) + marker_indices = torch.zeros(marker_scales.size(0)) + + # Set the z-scale of line markers to represent their actual length + marker_scales[-lines_length.size(0) :, -1] = lines_length + + # Assign marker config index 1 to line markers + marker_indices[-lines_length.size(0) :] = 1 + + # Update the frame and the connecting line visualizer + self.frame_visualizer.visualize( + translations=torch.cat((frames_pos, lines_pos), dim=0), + orientations=torch.cat((frames_quat, lines_quat), dim=0), + scales=marker_scales, + marker_indices=marker_indices, + ) + + """ + Internal simulation callbacks. + """ + + def _invalidate_initialize_callback(self, event): + """Invalidates the scene elements.""" + # call parent + super()._invalidate_initialize_callback(event) + # set all existing views to None to invalidate them + self._frame_physx_view = None + + """ + Internal helpers. + """ + + def _get_connecting_lines( + self, start_pos: torch.Tensor, end_pos: torch.Tensor + ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + """Draws connecting lines between frames. + + Given start and end points, this function computes the positions (mid-point), orientations, + and lengths of the connecting lines. + + Args: + start_pos: The start positions of the connecting lines. Shape is (N, 3). + end_pos: The end positions of the connecting lines. Shape is (N, 3). + + Returns: + A tuple containing: + - The positions of each connecting line. Shape is (N, 3). + - The orientations of each connecting line in quaternion. Shape is (N, 4). + - The lengths of each connecting line. Shape is (N,). + """ + direction = end_pos - start_pos + lengths = torch.norm(direction, dim=-1) + positions = (start_pos + end_pos) / 2 + + # Get default direction (along z-axis) + default_direction = torch.tensor([0.0, 0.0, 1.0], device=self.device).expand(start_pos.size(0), -1) + + # Normalize direction vector + direction_norm = normalize(direction) + + # Calculate rotation from default direction to target direction + rotation_axis = torch.linalg.cross(default_direction, direction_norm) + rotation_axis_norm = torch.norm(rotation_axis, dim=-1) + + # Handle case where vectors are parallel + mask = rotation_axis_norm > 1e-6 + rotation_axis = torch.where( + mask.unsqueeze(-1), + normalize(rotation_axis), + torch.tensor([1.0, 0.0, 0.0], device=self.device).expand(start_pos.size(0), -1), + ) + + # Calculate rotation angle + cos_angle = torch.sum(default_direction * direction_norm, dim=-1) + cos_angle = torch.clamp(cos_angle, -1.0, 1.0) + angle = torch.acos(cos_angle) + orientations = quat_from_angle_axis(angle, rotation_axis) + + return positions, orientations, lengths + + @staticmethod + def _get_relative_body_path(prim_path: str) -> str: + """Extract a normalized body path from a prim path. + + Removes the environment instance segment `/envs/env_/` to normalize paths + across multiple environments, while preserving the `/envs/` prefix to + distinguish environment-scoped paths from non-environment paths. + + Examples: + - '/World/envs/env_0/Robot/torso' -> '/World/envs/Robot/torso' + - '/World/envs/env_123/Robot/left_hand' -> '/World/envs/Robot/left_hand' + - '/World/Robot' -> '/World/Robot' + - '/World/Robot_2/left_hand' -> '/World/Robot_2/left_hand' + + Args: + prim_path: The full prim path. + + Returns: + The prim path with `/envs/env_/` removed, preserving `/envs/`. + """ + pattern = re.compile(r"/envs/env_[^/]+/") + return pattern.sub("/envs/", prim_path) diff --git a/source/isaaclab_physx/isaaclab_physx/sensors/frame_transformer/frame_transformer_cfg.py b/source/isaaclab_physx/isaaclab_physx/sensors/frame_transformer/frame_transformer_cfg.py new file mode 100644 index 00000000000..ca9b528aa1d --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/sensors/frame_transformer/frame_transformer_cfg.py @@ -0,0 +1,76 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from dataclasses import MISSING + +from isaaclab.markers.config import FRAME_MARKER_CFG, VisualizationMarkersCfg +from isaaclab.utils import configclass + +from ..sensor_base_cfg import SensorBaseCfg +from .frame_transformer import FrameTransformer + + +@configclass +class OffsetCfg: + """The offset pose of one frame relative to another frame.""" + + pos: tuple[float, float, float] = (0.0, 0.0, 0.0) + """Translation w.r.t. the parent frame. Defaults to (0.0, 0.0, 0.0).""" + rot: tuple[float, float, float, float] = (1.0, 0.0, 0.0, 0.0) + """Quaternion rotation (w, x, y, z) w.r.t. the parent frame. Defaults to (1.0, 0.0, 0.0, 0.0).""" + + +@configclass +class FrameTransformerCfg(SensorBaseCfg): + """Configuration for the frame transformer sensor.""" + + @configclass + class FrameCfg: + """Information specific to a coordinate frame.""" + + prim_path: str = MISSING + """The prim path corresponding to a rigid body. + + This can be a regex pattern to match multiple prims. For example, "/Robot/.*" + will match all prims under "/Robot". + + This means that if the source :attr:`FrameTransformerCfg.prim_path` is "/Robot/base", + and the target :attr:`FrameTransformerCfg.FrameCfg.prim_path` is "/Robot/.*", then + the frame transformer will track the poses of all the prims under "/Robot", + including "/Robot/base" (even though this will result in an identity pose w.r.t. + the source frame). + """ + + name: str | None = None + """User-defined name for the new coordinate frame. Defaults to None. + + If None, then the name is extracted from the leaf of the prim path. + """ + + offset: OffsetCfg = OffsetCfg() + """The pose offset from the parent prim frame.""" + + class_type: type = FrameTransformer + + prim_path: str = MISSING + """The prim path of the body to transform from (source frame).""" + + source_frame_offset: OffsetCfg = OffsetCfg() + """The pose offset from the source prim frame.""" + + target_frames: list[FrameCfg] = MISSING + """A list of the target frames. + + This allows a single FrameTransformer to handle multiple target prims. For example, in a quadruped, + we can use a single FrameTransformer to track each foot's position and orientation in the body + frame using four frame offsets. + """ + + visualizer_cfg: VisualizationMarkersCfg = FRAME_MARKER_CFG.replace(prim_path="/Visuals/FrameTransformer") + """The configuration object for the visualization markers. Defaults to FRAME_MARKER_CFG. + + Note: + This attribute is only used when debug visualization is enabled. + """ diff --git a/source/isaaclab_physx/isaaclab_physx/sensors/frame_transformer/frame_transformer_data.py b/source/isaaclab_physx/isaaclab_physx/sensors/frame_transformer/frame_transformer_data.py new file mode 100644 index 00000000000..942ddbd5144 --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/sensors/frame_transformer/frame_transformer_data.py @@ -0,0 +1,57 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from dataclasses import dataclass + +import torch + + +@dataclass +class FrameTransformerData: + """Data container for the frame transformer sensor.""" + + target_frame_names: list[str] = None + """Target frame names (this denotes the order in which that frame data is ordered). + + The frame names are resolved from the :attr:`FrameTransformerCfg.FrameCfg.name` field. + This does not necessarily follow the order in which the frames are defined in the config due to + the regex matching. + """ + + target_pos_source: torch.Tensor = None + """Position of the target frame(s) relative to source frame. + + Shape is (N, M, 3), where N is the number of environments, and M is the number of target frames. + """ + + target_quat_source: torch.Tensor = None + """Orientation of the target frame(s) relative to source frame quaternion (w, x, y, z). + + Shape is (N, M, 4), where N is the number of environments, and M is the number of target frames. + """ + + target_pos_w: torch.Tensor = None + """Position of the target frame(s) after offset (in world frame). + + Shape is (N, M, 3), where N is the number of environments, and M is the number of target frames. + """ + + target_quat_w: torch.Tensor = None + """Orientation of the target frame(s) after offset (in world frame) quaternion (w, x, y, z). + + Shape is (N, M, 4), where N is the number of environments, and M is the number of target frames. + """ + + source_pos_w: torch.Tensor = None + """Position of the source frame after offset (in world frame). + + Shape is (N, 3), where N is the number of environments. + """ + + source_quat_w: torch.Tensor = None + """Orientation of the source frame after offset (in world frame) quaternion (w, x, y, z). + + Shape is (N, 4), where N is the number of environments. + """ diff --git a/source/isaaclab/isaaclab/assets/utils/__init__.py b/source/isaaclab_physx/isaaclab_physx/sensors/imu/__init__.py similarity index 65% rename from source/isaaclab/isaaclab/assets/utils/__init__.py rename to source/isaaclab_physx/isaaclab_physx/sensors/imu/__init__.py index 460a3056908..7501e41cf49 100644 --- a/source/isaaclab/isaaclab/assets/utils/__init__.py +++ b/source/isaaclab_physx/isaaclab_physx/sensors/imu/__init__.py @@ -2,3 +2,11 @@ # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause + +""" +Imu Sensor +""" + +from .imu import Imu +from .imu_cfg import ImuCfg +from .imu_data import ImuData diff --git a/source/isaaclab_physx/isaaclab_physx/sensors/imu/imu.py b/source/isaaclab_physx/isaaclab_physx/sensors/imu/imu.py new file mode 100644 index 00000000000..e092b39502e --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/sensors/imu/imu.py @@ -0,0 +1,299 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from __future__ import annotations + +from collections.abc import Sequence +from typing import TYPE_CHECKING + +import torch + +from isaacsim.core.simulation_manager import SimulationManager +from pxr import UsdGeom, UsdPhysics + +import isaaclab.sim as sim_utils +import isaaclab.utils.math as math_utils +from isaaclab.markers import VisualizationMarkers + +from ..sensor_base import SensorBase +from .imu_data import ImuData + +if TYPE_CHECKING: + from .imu_cfg import ImuCfg + + +class Imu(SensorBase): + """The Inertia Measurement Unit (IMU) sensor. + + The sensor can be attached to any prim path with a rigid ancestor in its tree and produces body-frame + linear acceleration and angular velocity, along with world-frame pose and body-frame linear and angular + accelerations/velocities. + + If the provided path is not a rigid body, the closest rigid-body ancestor is used for simulation queries. + The fixed transform from that ancestor to the target prim is computed once during initialization and + composed with the configured sensor offset. + + .. note:: + + We are computing the accelerations using numerical differentiation from the velocities. Consequently, the + IMU sensor accuracy depends on the chosen phsyx timestep. For a sufficient accuracy, we recommend to keep the + timestep at least as 200Hz. + + .. note:: + + The user can configure the sensor offset in the configuration file. The offset is applied relative to the + rigid source prim. If the target prim is not a rigid body, the offset is composed with the fixed transform + from the rigid ancestor to the target prim. The offset is applied in the body frame of the rigid source prim. + The offset is defined as a position vector and a quaternion rotation, which + are applied in the order: position, then rotation. The position is applied as a translation + in the body frame of the rigid source prim, and the rotation is applied as a rotation + in the body frame of the rigid source prim. + + """ + + cfg: ImuCfg + """The configuration parameters.""" + + def __init__(self, cfg: ImuCfg): + """Initializes the Imu sensor. + + Args: + cfg: The configuration parameters. + """ + # initialize base class + super().__init__(cfg) + # Create empty variables for storing output data + self._data = ImuData() + + # Internal: expression used to build the rigid body view (may be different from cfg.prim_path) + self._rigid_parent_expr: str | None = None + + def __str__(self) -> str: + """Returns: A string containing information about the instance.""" + return ( + f"Imu sensor @ '{self.cfg.prim_path}': \n" + f"\tview type : {self._view.__class__}\n" + f"\tupdate period (s) : {self.cfg.update_period}\n" + f"\tnumber of sensors : {self._view.count}\n" + ) + + """ + Properties + """ + + @property + def data(self) -> ImuData: + # update sensors if needed + self._update_outdated_buffers() + # return the data + return self._data + + @property + def num_instances(self) -> int: + return self._view.count + + """ + Operations + """ + + def reset(self, env_ids: Sequence[int] | None = None): + # reset the timestamps + super().reset(env_ids) + # resolve None + if env_ids is None: + env_ids = slice(None) + # reset accumulative data buffers + self._data.pos_w[env_ids] = 0.0 + self._data.quat_w[env_ids] = 0.0 + self._data.quat_w[env_ids, 0] = 1.0 + self._data.projected_gravity_b[env_ids] = 0.0 + self._data.projected_gravity_b[env_ids, 2] = -1.0 + self._data.lin_vel_b[env_ids] = 0.0 + self._data.ang_vel_b[env_ids] = 0.0 + self._data.lin_acc_b[env_ids] = 0.0 + self._data.ang_acc_b[env_ids] = 0.0 + self._prev_lin_vel_w[env_ids] = 0.0 + self._prev_ang_vel_w[env_ids] = 0.0 + + def update(self, dt: float, force_recompute: bool = False): + # save timestamp + self._dt = dt + # execute updating + super().update(dt, force_recompute) + + """ + Implementation. + """ + + def _initialize_impl(self): + """Initializes the sensor handles and internal buffers. + + - If the target prim path is a rigid body, build the view directly on it. + - Otherwise find the closest rigid-body ancestor, cache the fixed transform from that ancestor + to the target prim, and build the view on the ancestor expression. + """ + # Initialize parent class + super()._initialize_impl() + # obtain global simulation view + self._physics_sim_view = SimulationManager.get_physics_sim_view() + # check if the prim at path is a rigid prim + prim = sim_utils.find_first_matching_prim(self.cfg.prim_path) + if prim is None: + raise RuntimeError(f"Failed to find a prim at path expression: {self.cfg.prim_path}") + + # Find the first matching ancestor prim that implements rigid body API + ancestor_prim = sim_utils.get_first_matching_ancestor_prim( + prim.GetPath(), predicate=lambda _prim: _prim.HasAPI(UsdPhysics.RigidBodyAPI) + ) + if ancestor_prim is None: + raise RuntimeError(f"Failed to find a rigid body ancestor prim at path expression: {self.cfg.prim_path}") + # Convert ancestor prim path to expression + if ancestor_prim == prim: + self._rigid_parent_expr = self.cfg.prim_path + fixed_pos_b, fixed_quat_b = None, None + else: + # Convert ancestor prim path to expression + relative_path = prim.GetPath().MakeRelativePath(ancestor_prim.GetPath()).pathString + self._rigid_parent_expr = self.cfg.prim_path.replace(relative_path, "") + # Resolve the relative pose between the target prim and the ancestor prim + fixed_pos_b, fixed_quat_b = sim_utils.resolve_prim_pose(prim, ancestor_prim) + + # Create the rigid body view on the ancestor + self._view = self._physics_sim_view.create_rigid_body_view(self._rigid_parent_expr.replace(".*", "*")) + + # Get world gravity + gravity = self._physics_sim_view.get_gravity() + gravity_dir = torch.tensor((gravity[0], gravity[1], gravity[2]), device=self.device) + gravity_dir = math_utils.normalize(gravity_dir.unsqueeze(0)).squeeze(0) + self.GRAVITY_VEC_W = gravity_dir.repeat(self.num_instances, 1) + + # Create internal buffers + self._initialize_buffers_impl() + + # Compose the configured offset with the fixed ancestor->target transform (done once) + # new_offset = fixed * cfg.offset + # where composition is: p = p_fixed + R_fixed * p_cfg, q = q_fixed * q_cfg + if fixed_pos_b is not None and fixed_quat_b is not None: + # Broadcast fixed transform across instances + fixed_p = torch.tensor(fixed_pos_b, device=self._device).repeat(self._view.count, 1) + fixed_q = torch.tensor(fixed_quat_b, device=self._device).repeat(self._view.count, 1) + + cfg_p = self._offset_pos_b.clone() + cfg_q = self._offset_quat_b.clone() + + composed_p = fixed_p + math_utils.quat_apply(fixed_q, cfg_p) + composed_q = math_utils.quat_mul(fixed_q, cfg_q) + + self._offset_pos_b = composed_p + self._offset_quat_b = composed_q + + def _update_buffers_impl(self, env_ids: Sequence[int]): + """Fills the buffers of the sensor data.""" + + # default to all sensors + if len(env_ids) == self._num_envs: + env_ids = slice(None) + # world pose of the rigid source (ancestor) from the PhysX view + pos_w, quat_w = self._view.get_transforms()[env_ids].split([3, 4], dim=-1) + quat_w = quat_w.roll(1, dims=-1) + + # sensor pose in world: apply composed offset + self._data.pos_w[env_ids] = pos_w + math_utils.quat_apply(quat_w, self._offset_pos_b[env_ids]) + self._data.quat_w[env_ids] = math_utils.quat_mul(quat_w, self._offset_quat_b[env_ids]) + + # COM of rigid source (body frame) + com_pos_b = self._view.get_coms().to(self.device).split([3, 4], dim=-1)[0] + + # Velocities at rigid source COM + lin_vel_w, ang_vel_w = self._view.get_velocities()[env_ids].split([3, 3], dim=-1) + + # If sensor offset or COM != link origin, account for angular velocity contribution + lin_vel_w += torch.linalg.cross( + ang_vel_w, math_utils.quat_apply(quat_w, self._offset_pos_b[env_ids] - com_pos_b[env_ids]), dim=-1 + ) + + # numerical derivative (world frame) + lin_acc_w = (lin_vel_w - self._prev_lin_vel_w[env_ids]) / self._dt + self._gravity_bias_w[env_ids] + ang_acc_w = (ang_vel_w - self._prev_ang_vel_w[env_ids]) / self._dt + + # batch rotate world->body using current sensor orientation + dynamics_data = torch.stack((lin_vel_w, ang_vel_w, lin_acc_w, ang_acc_w, self.GRAVITY_VEC_W[env_ids]), dim=0) + dynamics_data_rot = math_utils.quat_apply_inverse(self._data.quat_w[env_ids].repeat(5, 1), dynamics_data).chunk( + 5, dim=0 + ) + # store the velocities. + self._data.lin_vel_b[env_ids] = dynamics_data_rot[0] + self._data.ang_vel_b[env_ids] = dynamics_data_rot[1] + # store the accelerations + self._data.lin_acc_b[env_ids] = dynamics_data_rot[2] + self._data.ang_acc_b[env_ids] = dynamics_data_rot[3] + # store projected gravity + self._data.projected_gravity_b[env_ids] = dynamics_data_rot[4] + + self._prev_lin_vel_w[env_ids] = lin_vel_w + self._prev_ang_vel_w[env_ids] = ang_vel_w + + def _initialize_buffers_impl(self): + """Create buffers for storing data.""" + # data buffers + self._data.pos_w = torch.zeros(self._view.count, 3, device=self._device) + self._data.quat_w = torch.zeros(self._view.count, 4, device=self._device) + self._data.quat_w[:, 0] = 1.0 + self._data.projected_gravity_b = torch.zeros(self._view.count, 3, device=self._device) + self._data.lin_vel_b = torch.zeros_like(self._data.pos_w) + self._data.ang_vel_b = torch.zeros_like(self._data.pos_w) + self._data.lin_acc_b = torch.zeros_like(self._data.pos_w) + self._data.ang_acc_b = torch.zeros_like(self._data.pos_w) + self._prev_lin_vel_w = torch.zeros_like(self._data.pos_w) + self._prev_ang_vel_w = torch.zeros_like(self._data.pos_w) + + # store sensor offset (applied relative to rigid source). + # This may be composed later with a fixed ancestor->target transform. + self._offset_pos_b = torch.tensor(list(self.cfg.offset.pos), device=self._device).repeat(self._view.count, 1) + self._offset_quat_b = torch.tensor(list(self.cfg.offset.rot), device=self._device).repeat(self._view.count, 1) + # set gravity bias + self._gravity_bias_w = torch.tensor(list(self.cfg.gravity_bias), device=self._device).repeat( + self._view.count, 1 + ) + + def _set_debug_vis_impl(self, debug_vis: bool): + # set visibility of markers + # note: parent only deals with callbacks. not their visibility + if debug_vis: + # create markers if necessary for the first time + if not hasattr(self, "acceleration_visualizer"): + self.acceleration_visualizer = VisualizationMarkers(self.cfg.visualizer_cfg) + # set their visibility to true + self.acceleration_visualizer.set_visibility(True) + else: + if hasattr(self, "acceleration_visualizer"): + self.acceleration_visualizer.set_visibility(False) + + def _debug_vis_callback(self, event): + # safely return if view becomes invalid + # note: this invalidity happens because of isaac sim view callbacks + if self._view is None: + return + # get marker location + # -- base state + base_pos_w = self._data.pos_w.clone() + base_pos_w[:, 2] += 0.5 + # -- resolve the scales + default_scale = self.acceleration_visualizer.cfg.markers["arrow"].scale + arrow_scale = torch.tensor(default_scale, device=self.device).repeat(self._data.lin_acc_b.shape[0], 1) + # get up axis of current stage + up_axis = UsdGeom.GetStageUpAxis(self.stage) + # arrow-direction + quat_opengl = math_utils.quat_from_matrix( + math_utils.create_rotation_matrix_from_view( + self._data.pos_w, + self._data.pos_w + math_utils.quat_apply(self._data.quat_w, self._data.lin_acc_b), + up_axis=up_axis, + device=self._device, + ) + ) + quat_w = math_utils.convert_camera_frame_orientation_convention(quat_opengl, "opengl", "world") + # display markers + self.acceleration_visualizer.visualize(base_pos_w, quat_w, arrow_scale) diff --git a/source/isaaclab_physx/isaaclab_physx/sensors/imu/imu_cfg.py b/source/isaaclab_physx/isaaclab_physx/sensors/imu/imu_cfg.py new file mode 100644 index 00000000000..06aeca5fa95 --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/sensors/imu/imu_cfg.py @@ -0,0 +1,46 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from __future__ import annotations + +from isaaclab.markers import VisualizationMarkersCfg +from isaaclab.markers.config import RED_ARROW_X_MARKER_CFG +from isaaclab.utils import configclass + +from ..sensor_base_cfg import SensorBaseCfg +from .imu import Imu + + +@configclass +class ImuCfg(SensorBaseCfg): + """Configuration for an Inertial Measurement Unit (IMU) sensor.""" + + class_type: type = Imu + + @configclass + class OffsetCfg: + """The offset pose of the sensor's frame from the sensor's parent frame.""" + + pos: tuple[float, float, float] = (0.0, 0.0, 0.0) + """Translation w.r.t. the parent frame. Defaults to (0.0, 0.0, 0.0).""" + + rot: tuple[float, float, float, float] = (1.0, 0.0, 0.0, 0.0) + """Quaternion rotation (w, x, y, z) w.r.t. the parent frame. Defaults to (1.0, 0.0, 0.0, 0.0).""" + + offset: OffsetCfg = OffsetCfg() + """The offset pose of the sensor's frame from the sensor's parent frame. Defaults to identity.""" + + visualizer_cfg: VisualizationMarkersCfg = RED_ARROW_X_MARKER_CFG.replace(prim_path="/Visuals/Command/velocity_goal") + """The configuration object for the visualization markers. Defaults to RED_ARROW_X_MARKER_CFG. + + This attribute is only used when debug visualization is enabled. + """ + gravity_bias: tuple[float, float, float] = (0.0, 0.0, 9.81) + """The linear acceleration bias applied to the linear acceleration in the world frame (x,y,z). + + Imu sensors typically output a positive gravity acceleration in opposition to the direction of gravity. This + config parameter allows users to subtract that bias if set to (0.,0.,0.). By default this is set to (0.0,0.0,9.81) + which results in a positive acceleration reading in the world Z. + """ diff --git a/source/isaaclab_physx/isaaclab_physx/sensors/imu/imu_data.py b/source/isaaclab_physx/isaaclab_physx/sensors/imu/imu_data.py new file mode 100644 index 00000000000..dd06e09a8b7 --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/sensors/imu/imu_data.py @@ -0,0 +1,57 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from __future__ import annotations + +from dataclasses import dataclass + +import torch + + +@dataclass +class ImuData: + """Data container for the Imu sensor.""" + + pos_w: torch.Tensor = None + """Position of the sensor origin in world frame. + + Shape is (N, 3), where ``N`` is the number of environments. + """ + + quat_w: torch.Tensor = None + """Orientation of the sensor origin in quaternion ``(w, x, y, z)`` in world frame. + + Shape is (N, 4), where ``N`` is the number of environments. + """ + + projected_gravity_b: torch.Tensor = None + """Gravity direction unit vector projected on the imu frame. + + Shape is (N,3), where ``N`` is the number of environments. + """ + + lin_vel_b: torch.Tensor = None + """IMU frame angular velocity relative to the world expressed in IMU frame. + + Shape is (N, 3), where ``N`` is the number of environments. + """ + + ang_vel_b: torch.Tensor = None + """IMU frame angular velocity relative to the world expressed in IMU frame. + + Shape is (N, 3), where ``N`` is the number of environments. + """ + + lin_acc_b: torch.Tensor = None + """IMU frame linear acceleration relative to the world expressed in IMU frame. + + Shape is (N, 3), where ``N`` is the number of environments. + """ + + ang_acc_b: torch.Tensor = None + """IMU frame angular acceleration relative to the world expressed in IMU frame. + + Shape is (N, 3), where ``N`` is the number of environments. + """ diff --git a/source/isaaclab_physx/isaaclab_physx/sensors/ray_caster/__init__.py b/source/isaaclab_physx/isaaclab_physx/sensors/ray_caster/__init__.py new file mode 100644 index 00000000000..06f479ed2ee --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/sensors/ray_caster/__init__.py @@ -0,0 +1,29 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Sub-module for Warp-based ray-cast sensor. + +The sub-module contains two implementations of the ray-cast sensor: + +- :class:`isaaclab.sensors.ray_caster.RayCaster`: A basic ray-cast sensor that can be used to ray-cast against a single mesh. +- :class:`isaaclab.sensors.ray_caster.MultiMeshRayCaster`: A multi-mesh ray-cast sensor that can be used to ray-cast against + multiple meshes. For these meshes, it tracks their transformations and updates the warp meshes accordingly. + +Corresponding camera implementations are also provided for each of the sensor implementations. Internally, they perform +the same ray-casting operations as the sensor implementations, but return the results as images. +""" + +from . import patterns +from .multi_mesh_ray_caster import MultiMeshRayCaster +from .multi_mesh_ray_caster_camera import MultiMeshRayCasterCamera +from .multi_mesh_ray_caster_camera_cfg import MultiMeshRayCasterCameraCfg +from .multi_mesh_ray_caster_camera_data import MultiMeshRayCasterCameraData +from .multi_mesh_ray_caster_cfg import MultiMeshRayCasterCfg +from .multi_mesh_ray_caster_data import MultiMeshRayCasterData +from .ray_caster import RayCaster +from .ray_caster_camera import RayCasterCamera +from .ray_caster_camera_cfg import RayCasterCameraCfg +from .ray_caster_cfg import RayCasterCfg +from .ray_caster_data import RayCasterData diff --git a/source/isaaclab_physx/isaaclab_physx/sensors/ray_caster/multi_mesh_ray_caster.py b/source/isaaclab_physx/isaaclab_physx/sensors/ray_caster/multi_mesh_ray_caster.py new file mode 100644 index 00000000000..39be0d7ca0d --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/sensors/ray_caster/multi_mesh_ray_caster.py @@ -0,0 +1,427 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from __future__ import annotations + +import logging +import re +from collections.abc import Sequence +from typing import TYPE_CHECKING, ClassVar + +import numpy as np +import torch +import trimesh +import warp as wp + +import omni.physics.tensors.impl.api as physx + +import isaaclab.sim as sim_utils +from isaaclab.sim.views import XformPrimView +from isaaclab.utils.math import matrix_from_quat, quat_mul +from isaaclab.utils.mesh import PRIMITIVE_MESH_TYPES, create_trimesh_from_geom_mesh, create_trimesh_from_geom_shape +from isaaclab.utils.warp import convert_to_warp_mesh, raycast_dynamic_meshes + +from .multi_mesh_ray_caster_data import MultiMeshRayCasterData +from .ray_cast_utils import obtain_world_pose_from_view +from .ray_caster import RayCaster + +if TYPE_CHECKING: + from .multi_mesh_ray_caster_cfg import MultiMeshRayCasterCfg + +# import logger +logger = logging.getLogger(__name__) + + +class MultiMeshRayCaster(RayCaster): + """A multi-mesh ray-casting sensor. + + The ray-caster uses a set of rays to detect collisions with meshes in the scene. The rays are + defined in the sensor's local coordinate frame. The sensor can be configured to ray-cast against + a set of meshes with a given ray pattern. + + The meshes are parsed from the list of primitive paths provided in the configuration. These are then + converted to warp meshes and stored in the :attr:`meshes` list. The ray-caster then ray-casts against + these warp meshes using the ray pattern provided in the configuration. + + Compared to the default RayCaster, the MultiMeshRayCaster provides additional functionality and flexibility as + an extension of the default RayCaster with the following enhancements: + + - Raycasting against multiple target types : Supports primitive shapes (spheres, cubes, etc.) as well as arbitrary + meshes. + - Dynamic mesh tracking : Keeps track of specified meshes, enabling raycasting against moving parts + (e.g., robot links, articulated bodies, or dynamic obstacles). + - Memory-efficient caching : Avoids redundant memory usage by reusing mesh data across environments. + + Example usage to raycast against the visual meshes of a robot (e.g. ANYmal): + + .. code-block:: python + + ray_caster_cfg = MultiMeshRayCasterCfg( + prim_path="{ENV_REGEX_NS}/Robot", + mesh_prim_paths=[ + "/World/Ground", + MultiMeshRayCasterCfg.RaycastTargetCfg(prim_expr="{ENV_REGEX_NS}/Robot/LF_.*/visuals"), + MultiMeshRayCasterCfg.RaycastTargetCfg(prim_expr="{ENV_REGEX_NS}/Robot/RF_.*/visuals"), + MultiMeshRayCasterCfg.RaycastTargetCfg(prim_expr="{ENV_REGEX_NS}/Robot/LH_.*/visuals"), + MultiMeshRayCasterCfg.RaycastTargetCfg(prim_expr="{ENV_REGEX_NS}/Robot/RH_.*/visuals"), + MultiMeshRayCasterCfg.RaycastTargetCfg(prim_expr="{ENV_REGEX_NS}/Robot/base/visuals"), + ], + ray_alignment="world", + pattern_cfg=patterns.GridPatternCfg(resolution=0.02, size=(2.5, 2.5), direction=(0, 0, -1)), + ) + + """ + + cfg: MultiMeshRayCasterCfg + """The configuration parameters.""" + + mesh_offsets: dict[str, tuple[torch.Tensor, torch.Tensor]] = {} + + mesh_views: ClassVar[dict[str, XformPrimView | physx.ArticulationView | physx.RigidBodyView]] = {} + """A dictionary to store mesh views for raycasting, shared across all instances. + + The keys correspond to the prim path for the mesh views, and values are the corresponding view objects. + """ + + def __init__(self, cfg: MultiMeshRayCasterCfg): + """Initializes the ray-caster object. + + Args: + cfg: The configuration parameters. + """ + # Initialize base class + super().__init__(cfg) + + # Create empty variables for storing output data + self._num_meshes_per_env: dict[str, int] = {} + """Keeps track of the number of meshes per env for each ray_cast target. + Since we allow regex indexing (e.g. env_*/object_*) they can differ + """ + + self._raycast_targets_cfg: list[MultiMeshRayCasterCfg.RaycastTargetCfg] = [] + for target in self.cfg.mesh_prim_paths: + # Legacy support for string targets. Treat them as global targets. + if isinstance(target, str): + self._raycast_targets_cfg.append(cfg.RaycastTargetCfg(prim_expr=target, track_mesh_transforms=False)) + else: + self._raycast_targets_cfg.append(target) + + # Resolve regex namespace if set + for cfg in self._raycast_targets_cfg: + cfg.prim_expr = cfg.prim_expr.format(ENV_REGEX_NS="/World/envs/env_.*") + + # overwrite the data class + self._data = MultiMeshRayCasterData() + + def __str__(self) -> str: + """Returns: A string containing information about the instance.""" + + return ( + f"Ray-caster @ '{self.cfg.prim_path}': \n" + f"\tview type : {self._view.__class__}\n" + f"\tupdate period (s) : {self.cfg.update_period}\n" + f"\tnumber of meshes : {self._num_envs} x {sum(self._num_meshes_per_env.values())} \n" + f"\tnumber of sensors : {self._view.count}\n" + f"\tnumber of rays/sensor: {self.num_rays}\n" + f"\ttotal number of rays : {self.num_rays * self._view.count}" + ) + + """ + Properties + """ + + @property + def data(self) -> MultiMeshRayCasterData: + # update sensors if needed + self._update_outdated_buffers() + # return the data + return self._data + + """ + Implementation. + """ + + def _initialize_warp_meshes(self): + """Parse mesh prim expressions, build (or reuse) Warp meshes, and cache per-env mesh IDs. + + High-level steps (per target expression): + + 1. Resolve matching prims by regex/path expression. + 2. Collect supported mesh child prims; merge into a single mesh if configured. + 3. Deduplicate identical vertex buffers (exact match) to avoid uploading duplicates to Warp. + 4. Partition mesh IDs per environment or mark as globally shared. + 5. Optionally create physics views (articulation / rigid body / fallback XForm) and cache local offsets. + + Exceptions: + Raises a RuntimeError if: + + - No prims match the provided expression. + - No supported mesh prims are found under a matched prim. + - Multiple mesh prims are found but merging is disabled. + + """ + multi_mesh_ids: dict[str, list[list[int]]] = {} + for target_cfg in self._raycast_targets_cfg: + # target prim path to ray cast against + target_prim_path = target_cfg.prim_expr + # # check if mesh already casted into warp mesh and skip if so. + if target_prim_path in multi_mesh_ids: + logger.warning( + f"Mesh at target prim path '{target_prim_path}' already exists in the mesh cache. Duplicate entries" + " in `mesh_prim_paths`? This mesh will be skipped." + ) + continue + + # find all matching prim paths to provided expression of the target + target_prims = sim_utils.find_matching_prims(target_prim_path) + if len(target_prims) == 0: + raise RuntimeError(f"Failed to find a prim at path expression: {target_prim_path}") + + # If only one prim is found, treat it as a global prim. + # Either it's a single global object (e.g. ground) or we are only using one env. + is_global_prim = len(target_prims) == 1 + + loaded_vertices: list[np.ndarray | None] = [] + wp_mesh_ids = [] + + for target_prim in target_prims: + # Reuse previously parsed shared mesh instance if possible. + if target_cfg.is_shared and len(wp_mesh_ids) > 0: + # Verify if this mesh has already been registered in an earlier environment. + # Note, this check may fail, if the prim path is not following the env_.* pattern + # Which (worst case) leads to parsing the mesh and skipping registering it at a later stage + curr_prim_base_path = re.sub(r"env_\d+", "env_0", str(target_prim.GetPath())) # + if curr_prim_base_path in MultiMeshRayCaster.meshes: + MultiMeshRayCaster.meshes[str(target_prim.GetPath())] = MultiMeshRayCaster.meshes[ + curr_prim_base_path + ] + # Reuse mesh imported by another ray-cast sensor (global cache). + if str(target_prim.GetPath()) in MultiMeshRayCaster.meshes: + wp_mesh_ids.append(MultiMeshRayCaster.meshes[str(target_prim.GetPath())].id) + loaded_vertices.append(None) + continue + + mesh_prims = sim_utils.get_all_matching_child_prims( + target_prim.GetPath(), lambda prim: prim.GetTypeName() in PRIMITIVE_MESH_TYPES + ["Mesh"] + ) + if len(mesh_prims) == 0: + warn_msg = ( + f"No mesh prims found at path: {target_prim.GetPath()} with supported types:" + f" {PRIMITIVE_MESH_TYPES + ['Mesh']}" + " Skipping this target." + ) + for prim in sim_utils.get_all_matching_child_prims(target_prim.GetPath(), lambda prim: True): + warn_msg += f"\n - Available prim '{prim.GetPath()}' of type '{prim.GetTypeName()}'" + logger.warning(warn_msg) + continue + + trimesh_meshes = [] + + for mesh_prim in mesh_prims: + # check if valid + if mesh_prim is None or not mesh_prim.IsValid(): + raise RuntimeError(f"Invalid mesh prim path: {target_prim}") + + if mesh_prim.GetTypeName() == "Mesh": + mesh = create_trimesh_from_geom_mesh(mesh_prim) + else: + mesh = create_trimesh_from_geom_shape(mesh_prim) + scale = sim_utils.resolve_prim_scale(mesh_prim) + mesh.apply_scale(scale) + + relative_pos, relative_quat = sim_utils.resolve_prim_pose(mesh_prim, target_prim) + relative_pos = torch.tensor(relative_pos, dtype=torch.float32) + relative_quat = torch.tensor(relative_quat, dtype=torch.float32) + + rotation = matrix_from_quat(relative_quat) + transform = np.eye(4) + transform[:3, :3] = rotation.numpy() + transform[:3, 3] = relative_pos.numpy() + mesh.apply_transform(transform) + + # add to list of parsed meshes + trimesh_meshes.append(mesh) + + if len(trimesh_meshes) == 1: + trimesh_mesh = trimesh_meshes[0] + elif target_cfg.merge_prim_meshes: + # combine all trimesh meshes into a single mesh + trimesh_mesh = trimesh.util.concatenate(trimesh_meshes) + else: + raise RuntimeError( + f"Multiple mesh prims found at path: {target_prim.GetPath()} but merging is disabled. Please" + " enable `merge_prim_meshes` in the configuration or specify each mesh separately." + ) + + # check if the mesh is already registered, if so only reference the mesh + registered_idx = _registered_points_idx(trimesh_mesh.vertices, loaded_vertices) + if registered_idx != -1 and self.cfg.reference_meshes: + logger.info("Found a duplicate mesh, only reference the mesh.") + # Found a duplicate mesh, only reference the mesh. + loaded_vertices.append(None) + wp_mesh_ids.append(wp_mesh_ids[registered_idx]) + else: + loaded_vertices.append(trimesh_mesh.vertices) + wp_mesh = convert_to_warp_mesh(trimesh_mesh.vertices, trimesh_mesh.faces, device=self.device) + MultiMeshRayCaster.meshes[str(target_prim.GetPath())] = wp_mesh + wp_mesh_ids.append(wp_mesh.id) + + # print info + if registered_idx != -1: + logger.info(f"Found duplicate mesh for mesh prims under path '{target_prim.GetPath()}'.") + else: + logger.info( + f"Read '{len(mesh_prims)}' mesh prims under path '{target_prim.GetPath()}' with" + f" {len(trimesh_mesh.vertices)} vertices and {len(trimesh_mesh.faces)} faces." + ) + + if is_global_prim: + # reference the mesh for each environment to ray cast against + multi_mesh_ids[target_prim_path] = [wp_mesh_ids] * self._num_envs + self._num_meshes_per_env[target_prim_path] = len(wp_mesh_ids) + else: + # split up the meshes for each environment. Little bit ugly, since + # the current order is interleaved (env1_obj1, env1_obj2, env2_obj1, env2_obj2, ...) + multi_mesh_ids[target_prim_path] = [] + mesh_idx = 0 + n_meshes_per_env = len(wp_mesh_ids) // self._num_envs + self._num_meshes_per_env[target_prim_path] = n_meshes_per_env + for _ in range(self._num_envs): + multi_mesh_ids[target_prim_path].append(wp_mesh_ids[mesh_idx : mesh_idx + n_meshes_per_env]) + mesh_idx += n_meshes_per_env + + if target_cfg.track_mesh_transforms: + MultiMeshRayCaster.mesh_views[target_prim_path], MultiMeshRayCaster.mesh_offsets[target_prim_path] = ( + self._obtain_trackable_prim_view(target_prim_path) + ) + + # throw an error if no meshes are found + if all([target_cfg.prim_expr not in multi_mesh_ids for target_cfg in self._raycast_targets_cfg]): + raise RuntimeError( + f"No meshes found for ray-casting! Please check the mesh prim paths: {self.cfg.mesh_prim_paths}" + ) + + total_n_meshes_per_env = sum(self._num_meshes_per_env.values()) + self._mesh_positions_w = torch.zeros(self._num_envs, total_n_meshes_per_env, 3, device=self.device) + self._mesh_orientations_w = torch.zeros(self._num_envs, total_n_meshes_per_env, 4, device=self.device) + + # Update the mesh positions and rotations + mesh_idx = 0 + for target_cfg in self._raycast_targets_cfg: + n_meshes = self._num_meshes_per_env[target_cfg.prim_expr] + + # update position of the target meshes + pos_w, ori_w = [], [] + for prim in sim_utils.find_matching_prims(target_cfg.prim_expr): + translation, quat = sim_utils.resolve_prim_pose(prim) + pos_w.append(translation) + ori_w.append(quat) + pos_w = torch.tensor(pos_w, device=self.device, dtype=torch.float32).view(-1, n_meshes, 3) + ori_w = torch.tensor(ori_w, device=self.device, dtype=torch.float32).view(-1, n_meshes, 4) + + self._mesh_positions_w[:, mesh_idx : mesh_idx + n_meshes] = pos_w + self._mesh_orientations_w[:, mesh_idx : mesh_idx + n_meshes] = ori_w + mesh_idx += n_meshes + + # flatten the list of meshes that are included in mesh_prim_paths of the specific ray caster + multi_mesh_ids_flattened = [] + for env_idx in range(self._num_envs): + meshes_in_env = [] + for target_cfg in self._raycast_targets_cfg: + meshes_in_env.extend(multi_mesh_ids[target_cfg.prim_expr][env_idx]) + multi_mesh_ids_flattened.append(meshes_in_env) + + self._mesh_views = [ + self.mesh_views[target_cfg.prim_expr] if target_cfg.track_mesh_transforms else None + for target_cfg in self._raycast_targets_cfg + ] + + # save a warp array with mesh ids that is passed to the raycast function + self._mesh_ids_wp = wp.array2d(multi_mesh_ids_flattened, dtype=wp.uint64, device=self.device) + + def _initialize_rays_impl(self): + super()._initialize_rays_impl() + if self.cfg.update_mesh_ids: + self._data.ray_mesh_ids = torch.zeros( + self._num_envs, self.num_rays, 1, device=self.device, dtype=torch.int16 + ) + + def _update_buffers_impl(self, env_ids: Sequence[int]): + """Fills the buffers of the sensor data. + + Args: + env_ids: The environment ids to update. + """ + + self._update_ray_infos(env_ids) + + # Update the mesh positions and rotations + mesh_idx = 0 + for view, target_cfg in zip(self._mesh_views, self._raycast_targets_cfg): + if not target_cfg.track_mesh_transforms: + mesh_idx += self._num_meshes_per_env[target_cfg.prim_expr] + continue + + # update position of the target meshes + pos_w, ori_w = obtain_world_pose_from_view(view, None) + pos_w = pos_w.squeeze(0) if len(pos_w.shape) == 3 else pos_w + ori_w = ori_w.squeeze(0) if len(ori_w.shape) == 3 else ori_w + + if target_cfg.prim_expr in MultiMeshRayCaster.mesh_offsets: + pos_offset, ori_offset = MultiMeshRayCaster.mesh_offsets[target_cfg.prim_expr] + pos_w -= pos_offset + ori_w = quat_mul(ori_offset.expand(ori_w.shape[0], -1), ori_w) + + count = view.count + if count != 1: # Mesh is not global, i.e. we have different meshes for each env + count = count // self._num_envs + pos_w = pos_w.view(self._num_envs, count, 3) + ori_w = ori_w.view(self._num_envs, count, 4) + + self._mesh_positions_w[:, mesh_idx : mesh_idx + count] = pos_w + self._mesh_orientations_w[:, mesh_idx : mesh_idx + count] = ori_w + mesh_idx += count + + self._data.ray_hits_w[env_ids], _, _, _, mesh_ids = raycast_dynamic_meshes( + self._ray_starts_w[env_ids], + self._ray_directions_w[env_ids], + mesh_ids_wp=self._mesh_ids_wp, # list with shape num_envs x num_meshes_per_env + max_dist=self.cfg.max_distance, + mesh_positions_w=self._mesh_positions_w[env_ids], + mesh_orientations_w=self._mesh_orientations_w[env_ids], + return_mesh_id=self.cfg.update_mesh_ids, + ) + + if self.cfg.update_mesh_ids: + self._data.ray_mesh_ids[env_ids] = mesh_ids + + def __del__(self): + super().__del__() + if RayCaster._instance_count == 0: + MultiMeshRayCaster.mesh_offsets.clear() + MultiMeshRayCaster.mesh_views.clear() + + +""" +Helper functions +""" + + +def _registered_points_idx(points: np.ndarray, registered_points: list[np.ndarray | None]) -> int: + """Check if the points are already registered in the list of registered points. + + Args: + points: The points to check. + registered_points: The list of registered points. + + Returns: + The index of the registered points if found, otherwise -1. + """ + for idx, reg_points in enumerate(registered_points): + if reg_points is None: + continue + if reg_points.shape == points.shape and (reg_points == points).all(): + return idx + return -1 diff --git a/source/isaaclab_physx/isaaclab_physx/sensors/ray_caster/multi_mesh_ray_caster_camera.py b/source/isaaclab_physx/isaaclab_physx/sensors/ray_caster/multi_mesh_ray_caster_camera.py new file mode 100644 index 00000000000..970860fa50a --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/sensors/ray_caster/multi_mesh_ray_caster_camera.py @@ -0,0 +1,221 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from __future__ import annotations + +from collections.abc import Sequence +from typing import TYPE_CHECKING + +import torch + +import isaaclab.utils.math as math_utils +from isaaclab.utils.warp import raycast_dynamic_meshes + +from .multi_mesh_ray_caster import MultiMeshRayCaster +from .multi_mesh_ray_caster_camera_data import MultiMeshRayCasterCameraData +from .ray_cast_utils import obtain_world_pose_from_view +from .ray_caster_camera import RayCasterCamera + +if TYPE_CHECKING: + from .multi_mesh_ray_caster_camera_cfg import MultiMeshRayCasterCameraCfg + + +class MultiMeshRayCasterCamera(RayCasterCamera, MultiMeshRayCaster): + """A multi-mesh ray-casting camera sensor. + + The ray-caster camera uses a set of rays to get the distances to meshes in the scene. The rays are + defined in the sensor's local coordinate frame. The sensor has the same interface as the + :class:`isaaclab.sensors.Camera` that implements the camera class through USD camera prims. + However, this class provides a faster image generation. The sensor converts meshes from the list of + primitive paths provided in the configuration to Warp meshes. The camera then ray-casts against these + Warp meshes only. + + Currently, only the following annotators are supported: + + - ``"distance_to_camera"``: An image containing the distance to camera optical center. + - ``"distance_to_image_plane"``: An image containing distances of 3D points from camera plane along camera's z-axis. + - ``"normals"``: An image containing the local surface normal vectors at each pixel. + """ + + cfg: MultiMeshRayCasterCameraCfg + """The configuration parameters.""" + + def __init__(self, cfg: MultiMeshRayCasterCameraCfg): + """Initializes the camera object. + + Args: + cfg: The configuration parameters. + + Raises: + ValueError: If the provided data types are not supported by the ray-caster camera. + """ + self._check_supported_data_types(cfg) + # initialize base class + MultiMeshRayCaster.__init__(self, cfg) + # create empty variables for storing output data + self._data = MultiMeshRayCasterCameraData() + + def __str__(self) -> str: + """Returns: A string containing information about the instance.""" + return ( + f"Multi-Mesh Ray-Caster-Camera @ '{self.cfg.prim_path}': \n" + f"\tview type : {self._view.__class__}\n" + f"\tupdate period (s) : {self.cfg.update_period}\n" + f"\tnumber of meshes : {len(MultiMeshRayCaster.meshes)}\n" + f"\tnumber of sensors : {self._view.count}\n" + f"\tnumber of rays/sensor: {self.num_rays}\n" + f"\ttotal number of rays : {self.num_rays * self._view.count}\n" + f"\timage shape : {self.image_shape}" + ) + + """ + Implementation. + """ + + def _initialize_warp_meshes(self): + MultiMeshRayCaster._initialize_warp_meshes(self) + + def _create_buffers(self): + super()._create_buffers() + self._data.image_mesh_ids = torch.zeros( + self._num_envs, *self.image_shape, 1, device=self.device, dtype=torch.int16 + ) + + def _initialize_rays_impl(self): + # Create all indices buffer + self._ALL_INDICES = torch.arange(self._view.count, device=self._device, dtype=torch.long) + # Create frame count buffer + self._frame = torch.zeros(self._view.count, device=self._device, dtype=torch.long) + # create buffers + self._create_buffers() + # compute intrinsic matrices + self._compute_intrinsic_matrices() + # compute ray stars and directions + self.ray_starts, self.ray_directions = self.cfg.pattern_cfg.func( + self.cfg.pattern_cfg, self._data.intrinsic_matrices, self._device + ) + self.num_rays = self.ray_directions.shape[1] + # create buffer to store ray hits + self.ray_hits_w = torch.zeros(self._view.count, self.num_rays, 3, device=self._device) + # set offsets + quat_w = math_utils.convert_camera_frame_orientation_convention( + torch.tensor([self.cfg.offset.rot], device=self._device), origin=self.cfg.offset.convention, target="world" + ) + self._offset_quat = quat_w.repeat(self._view.count, 1) + self._offset_pos = torch.tensor(list(self.cfg.offset.pos), device=self._device).repeat(self._view.count, 1) + + self._data.quat_w = torch.zeros(self._view.count, 4, device=self.device) + self._data.pos_w = torch.zeros(self._view.count, 3, device=self.device) + + self._ray_starts_w = torch.zeros(self._view.count, self.num_rays, 3, device=self.device) + self._ray_directions_w = torch.zeros(self._view.count, self.num_rays, 3, device=self.device) + + def _update_ray_infos(self, env_ids: Sequence[int]): + """Updates the ray information buffers.""" + + # compute poses from current view + pos_w, quat_w = obtain_world_pose_from_view(self._view, env_ids) + pos_w, quat_w = math_utils.combine_frame_transforms( + pos_w, quat_w, self._offset_pos[env_ids], self._offset_quat[env_ids] + ) + # update the data + self._data.pos_w[env_ids] = pos_w + self._data.quat_w_world[env_ids] = quat_w + self._data.quat_w_ros[env_ids] = quat_w + + # note: full orientation is considered + ray_starts_w = math_utils.quat_apply(quat_w.repeat(1, self.num_rays), self.ray_starts[env_ids]) + ray_starts_w += pos_w.unsqueeze(1) + ray_directions_w = math_utils.quat_apply(quat_w.repeat(1, self.num_rays), self.ray_directions[env_ids]) + + self._ray_starts_w[env_ids] = ray_starts_w + self._ray_directions_w[env_ids] = ray_directions_w + + def _update_buffers_impl(self, env_ids: Sequence[int] | torch.Tensor | None): + """Fills the buffers of the sensor data.""" + self._update_ray_infos(env_ids) + + # increment frame count + if env_ids is None: + env_ids = torch.arange(self._num_envs, device=self.device) + elif not isinstance(env_ids, torch.Tensor): + env_ids = torch.tensor(env_ids, device=self.device) + + self._frame[env_ids] += 1 + + # Update the mesh positions and rotations + mesh_idx = 0 + for view, target_cfg in zip(self._mesh_views, self._raycast_targets_cfg): + if not target_cfg.track_mesh_transforms: + mesh_idx += self._num_meshes_per_env[target_cfg.prim_expr] + continue + + # update position of the target meshes + pos_w, ori_w = obtain_world_pose_from_view(view, None) + pos_w = pos_w.squeeze(0) if len(pos_w.shape) == 3 else pos_w + ori_w = ori_w.squeeze(0) if len(ori_w.shape) == 3 else ori_w + + if target_cfg.prim_expr in MultiMeshRayCaster.mesh_offsets: + pos_offset, ori_offset = MultiMeshRayCaster.mesh_offsets[target_cfg.prim_expr] + pos_w -= pos_offset + ori_w = math_utils.quat_mul(ori_offset.expand(ori_w.shape[0], -1), ori_w) + + count = view.count + if count != 1: # Mesh is not global, i.e. we have different meshes for each env + count = count // self._num_envs + pos_w = pos_w.view(self._num_envs, count, 3) + ori_w = ori_w.view(self._num_envs, count, 4) + + self._mesh_positions_w[:, mesh_idx : mesh_idx + count] = pos_w + self._mesh_orientations_w[:, mesh_idx : mesh_idx + count] = ori_w + mesh_idx += count + + # ray cast and store the hits + self.ray_hits_w[env_ids], ray_depth, ray_normal, _, ray_mesh_ids = raycast_dynamic_meshes( + self._ray_starts_w[env_ids], + self._ray_directions_w[env_ids], + mesh_ids_wp=self._mesh_ids_wp, # list with shape num_envs x num_meshes_per_env + max_dist=self.cfg.max_distance, + mesh_positions_w=self._mesh_positions_w[env_ids], + mesh_orientations_w=self._mesh_orientations_w[env_ids], + return_distance=any( + [name in self.cfg.data_types for name in ["distance_to_image_plane", "distance_to_camera"]] + ), + return_normal="normals" in self.cfg.data_types, + return_mesh_id=self.cfg.update_mesh_ids, + ) + + # update output buffers + if "distance_to_image_plane" in self.cfg.data_types: + # note: data is in camera frame so we only take the first component (z-axis of camera frame) + distance_to_image_plane = ( + math_utils.quat_apply( + math_utils.quat_inv(self._data.quat_w_world[env_ids]).repeat(1, self.num_rays), + (ray_depth[:, :, None] * self._ray_directions_w[env_ids]), + ) + )[:, :, 0] + # apply the maximum distance after the transformation + if self.cfg.depth_clipping_behavior == "max": + distance_to_image_plane = torch.clip(distance_to_image_plane, max=self.cfg.max_distance) + distance_to_image_plane[torch.isnan(distance_to_image_plane)] = self.cfg.max_distance + elif self.cfg.depth_clipping_behavior == "zero": + distance_to_image_plane[distance_to_image_plane > self.cfg.max_distance] = 0.0 + distance_to_image_plane[torch.isnan(distance_to_image_plane)] = 0.0 + self._data.output["distance_to_image_plane"][env_ids] = distance_to_image_plane.view( + -1, *self.image_shape, 1 + ) + + if "distance_to_camera" in self.cfg.data_types: + if self.cfg.depth_clipping_behavior == "max": + ray_depth = torch.clip(ray_depth, max=self.cfg.max_distance) + elif self.cfg.depth_clipping_behavior == "zero": + ray_depth[ray_depth > self.cfg.max_distance] = 0.0 + self._data.output["distance_to_camera"][env_ids] = ray_depth.view(-1, *self.image_shape, 1) + + if "normals" in self.cfg.data_types: + self._data.output["normals"][env_ids] = ray_normal.view(-1, *self.image_shape, 3) + + if self.cfg.update_mesh_ids: + self._data.image_mesh_ids[env_ids] = ray_mesh_ids.view(-1, *self.image_shape, 1) diff --git a/source/isaaclab_physx/isaaclab_physx/sensors/ray_caster/multi_mesh_ray_caster_camera_cfg.py b/source/isaaclab_physx/isaaclab_physx/sensors/ray_caster/multi_mesh_ray_caster_camera_cfg.py new file mode 100644 index 00000000000..45df51ce6d8 --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/sensors/ray_caster/multi_mesh_ray_caster_camera_cfg.py @@ -0,0 +1,35 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Configuration for the ray-cast camera sensor.""" + +import logging + +from isaaclab.utils import configclass + +from .multi_mesh_ray_caster_camera import MultiMeshRayCasterCamera +from .multi_mesh_ray_caster_cfg import MultiMeshRayCasterCfg +from .ray_caster_camera_cfg import RayCasterCameraCfg + +# import logger +logger = logging.getLogger(__name__) + + +@configclass +class MultiMeshRayCasterCameraCfg(RayCasterCameraCfg, MultiMeshRayCasterCfg): + """Configuration for the multi-mesh ray-cast camera sensor.""" + + class_type: type = MultiMeshRayCasterCamera + + def __post_init__(self): + super().__post_init__() + + # Camera only supports 'base' ray alignment. Ensure this is set correctly. + if self.ray_alignment != "base": + logger.warning( + "Ray alignment for MultiMeshRayCasterCameraCfg only supports 'base' alignment. Overriding from" + f"'{self.ray_alignment}' to 'base'." + ) + self.ray_alignment = "base" diff --git a/source/isaaclab_physx/isaaclab_physx/sensors/ray_caster/multi_mesh_ray_caster_camera_data.py b/source/isaaclab_physx/isaaclab_physx/sensors/ray_caster/multi_mesh_ray_caster_camera_data.py new file mode 100644 index 00000000000..d2f26abdbf4 --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/sensors/ray_caster/multi_mesh_ray_caster_camera_data.py @@ -0,0 +1,23 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Data container for the multi-mesh ray-cast camera sensor.""" + +import torch + +from isaaclab.sensors.camera import CameraData + +from .ray_caster_data import RayCasterData + + +class MultiMeshRayCasterCameraData(CameraData, RayCasterData): + """Data container for the multi-mesh ray-cast sensor.""" + + image_mesh_ids: torch.Tensor = None + """The mesh ids of the image pixels. + + Shape is (N, H, W, 1), where N is the number of sensors, H and W are the height and width of the image, + and 1 is the number of mesh ids per pixel. + """ diff --git a/source/isaaclab_physx/isaaclab_physx/sensors/ray_caster/multi_mesh_ray_caster_cfg.py b/source/isaaclab_physx/isaaclab_physx/sensors/ray_caster/multi_mesh_ray_caster_cfg.py new file mode 100644 index 00000000000..f5393920162 --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/sensors/ray_caster/multi_mesh_ray_caster_cfg.py @@ -0,0 +1,71 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + + +"""Configuration for the ray-cast sensor.""" + +from dataclasses import MISSING + +from isaaclab.utils import configclass + +from .multi_mesh_ray_caster import MultiMeshRayCaster +from .ray_caster_cfg import RayCasterCfg + + +@configclass +class MultiMeshRayCasterCfg(RayCasterCfg): + """Configuration for the multi-mesh ray-cast sensor.""" + + @configclass + class RaycastTargetCfg: + """Configuration for different ray-cast targets.""" + + prim_expr: str = MISSING + """The regex to specify the target prim to ray cast against.""" + + is_shared: bool = False + """Whether the target prim is assumed to be the same mesh across all environments. Defaults to False. + + If True, only the first mesh is read and then reused for all environments, rather than re-parsed. + This provides a startup performance boost when there are many environments that all use the same asset. + + .. note:: + If :attr:`MultiMeshRayCasterCfg.reference_meshes` is False, this flag has no effect. + """ + + merge_prim_meshes: bool = True + """Whether to merge the parsed meshes for a prim that contains multiple meshes. Defaults to True. + + This will create a new mesh that combines all meshes in the parsed prim. The raycast hits mesh IDs + will then refer to the single merged mesh. + """ + + track_mesh_transforms: bool = True + """Whether the mesh transformations should be tracked. Defaults to True. + + .. note:: + Not tracking the mesh transformations is recommended when the meshes are static to increase performance. + """ + + class_type: type = MultiMeshRayCaster + + mesh_prim_paths: list[str | RaycastTargetCfg] = MISSING + """The list of mesh primitive paths to ray cast against. + + If an entry is a string, it is internally converted to :class:`RaycastTargetCfg` with + :attr:`~RaycastTargetCfg.track_mesh_transforms` disabled. These settings ensure backwards compatibility + with the default raycaster. + """ + + update_mesh_ids: bool = False + """Whether to update the mesh ids of the ray hits in the :attr:`data` container.""" + + reference_meshes: bool = True + """Whether to reference duplicated meshes instead of loading each one separately into memory. + Defaults to True. + + When enabled, the raycaster parses all meshes in all environments, but reuses references + for duplicates instead of storing multiple copies. This reduces memory footprint. + """ diff --git a/source/isaaclab_physx/isaaclab_physx/sensors/ray_caster/multi_mesh_ray_caster_data.py b/source/isaaclab_physx/isaaclab_physx/sensors/ray_caster/multi_mesh_ray_caster_data.py new file mode 100644 index 00000000000..b9ae187591b --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/sensors/ray_caster/multi_mesh_ray_caster_data.py @@ -0,0 +1,22 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + + +"""Data container for the multi-mesh ray-cast sensor.""" + +import torch + +from .ray_caster_data import RayCasterData + + +class MultiMeshRayCasterData(RayCasterData): + """Data container for the multi-mesh ray-cast sensor.""" + + ray_mesh_ids: torch.Tensor = None + """The mesh ids of the ray hits. + + Shape is (N, B, 1), where N is the number of sensors, B is the number of rays + in the scan pattern per sensor. + """ diff --git a/source/isaaclab_physx/isaaclab_physx/sensors/ray_caster/patterns/__init__.py b/source/isaaclab_physx/isaaclab_physx/sensors/ray_caster/patterns/__init__.py new file mode 100644 index 00000000000..d43f5437ce0 --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/sensors/ray_caster/patterns/__init__.py @@ -0,0 +1,9 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Sub-module for ray-casting patterns used by the ray-caster.""" + +from .patterns import bpearl_pattern, grid_pattern, lidar_pattern, pinhole_camera_pattern +from .patterns_cfg import BpearlPatternCfg, GridPatternCfg, LidarPatternCfg, PatternBaseCfg, PinholeCameraPatternCfg diff --git a/source/isaaclab_physx/isaaclab_physx/sensors/ray_caster/patterns/patterns.py b/source/isaaclab_physx/isaaclab_physx/sensors/ray_caster/patterns/patterns.py new file mode 100644 index 00000000000..d5255f64c75 --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/sensors/ray_caster/patterns/patterns.py @@ -0,0 +1,180 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from __future__ import annotations + +import math +from typing import TYPE_CHECKING + +import torch + +if TYPE_CHECKING: + from . import patterns_cfg + + +def grid_pattern(cfg: patterns_cfg.GridPatternCfg, device: str) -> tuple[torch.Tensor, torch.Tensor]: + """A regular grid pattern for ray casting. + + The grid pattern is made from rays that are parallel to each other. They span a 2D grid in the sensor's + local coordinates from ``(-length/2, -width/2)`` to ``(length/2, width/2)``, which is defined + by the ``size = (length, width)`` and ``resolution`` parameters in the config. + + Args: + cfg: The configuration instance for the pattern. + device: The device to create the pattern on. + + Returns: + The starting positions and directions of the rays. + + Raises: + ValueError: If the ordering is not "xy" or "yx". + ValueError: If the resolution is less than or equal to 0. + """ + # check valid arguments + if cfg.ordering not in ["xy", "yx"]: + raise ValueError(f"Ordering must be 'xy' or 'yx'. Received: '{cfg.ordering}'.") + if cfg.resolution <= 0: + raise ValueError(f"Resolution must be greater than 0. Received: '{cfg.resolution}'.") + + # resolve mesh grid indexing (note: torch meshgrid is different from numpy meshgrid) + # check: https://github.com/pytorch/pytorch/issues/15301 + indexing = cfg.ordering if cfg.ordering == "xy" else "ij" + # define grid pattern + x = torch.arange(start=-cfg.size[0] / 2, end=cfg.size[0] / 2 + 1.0e-9, step=cfg.resolution, device=device) + y = torch.arange(start=-cfg.size[1] / 2, end=cfg.size[1] / 2 + 1.0e-9, step=cfg.resolution, device=device) + grid_x, grid_y = torch.meshgrid(x, y, indexing=indexing) + + # store into ray starts + num_rays = grid_x.numel() + ray_starts = torch.zeros(num_rays, 3, device=device) + ray_starts[:, 0] = grid_x.flatten() + ray_starts[:, 1] = grid_y.flatten() + + # define ray-cast directions + ray_directions = torch.zeros_like(ray_starts) + ray_directions[..., :] = torch.tensor(list(cfg.direction), device=device) + + return ray_starts, ray_directions + + +def pinhole_camera_pattern( + cfg: patterns_cfg.PinholeCameraPatternCfg, intrinsic_matrices: torch.Tensor, device: str +) -> tuple[torch.Tensor, torch.Tensor]: + """The image pattern for ray casting. + + .. caution:: + This function does not follow the standard pattern interface. It requires the intrinsic matrices + of the cameras to be passed in. This is because we want to be able to randomize the intrinsic + matrices of the cameras, which is not possible with the standard pattern interface. + + Args: + cfg: The configuration instance for the pattern. + intrinsic_matrices: The intrinsic matrices of the cameras. Shape is (N, 3, 3). + device: The device to create the pattern on. + + Returns: + The starting positions and directions of the rays. The shape of the tensors are + (N, H * W, 3) and (N, H * W, 3) respectively. + """ + # get image plane mesh grid + grid = torch.meshgrid( + torch.arange(start=0, end=cfg.width, dtype=torch.int32, device=device), + torch.arange(start=0, end=cfg.height, dtype=torch.int32, device=device), + indexing="xy", + ) + pixels = torch.vstack(list(map(torch.ravel, grid))).T + # convert to homogeneous coordinate system + pixels = torch.hstack([pixels, torch.ones((len(pixels), 1), device=device)]) + # move each pixel coordinate to the center of the pixel + pixels += torch.tensor([[0.5, 0.5, 0]], device=device) + # get pixel coordinates in camera frame + pix_in_cam_frame = torch.matmul(torch.inverse(intrinsic_matrices), pixels.T) + + # robotics camera frame is (x forward, y left, z up) from camera frame with (x right, y down, z forward) + # transform to robotics camera frame + transform_vec = torch.tensor([1, -1, -1], device=device).unsqueeze(0).unsqueeze(2) + pix_in_cam_frame = pix_in_cam_frame[:, [2, 0, 1], :] * transform_vec + # normalize ray directions + ray_directions = (pix_in_cam_frame / torch.norm(pix_in_cam_frame, dim=1, keepdim=True)).permute(0, 2, 1) + # for camera, we always ray-cast from the sensor's origin + ray_starts = torch.zeros_like(ray_directions, device=device) + + return ray_starts, ray_directions + + +def bpearl_pattern(cfg: patterns_cfg.BpearlPatternCfg, device: str) -> tuple[torch.Tensor, torch.Tensor]: + """The RS-Bpearl pattern for ray casting. + + The `Robosense RS-Bpearl`_ is a short-range LiDAR that has a 360 degrees x 90 degrees super wide + field of view. It is designed for near-field blind-spots detection. + + .. _Robosense RS-Bpearl: https://www.roscomponents.com/product/rs-bpearl/ + + Args: + cfg: The configuration instance for the pattern. + device: The device to create the pattern on. + + Returns: + The starting positions and directions of the rays. + """ + h = torch.arange(-cfg.horizontal_fov / 2, cfg.horizontal_fov / 2, cfg.horizontal_res, device=device) + v = torch.tensor(list(cfg.vertical_ray_angles), device=device) + + pitch, yaw = torch.meshgrid(v, h, indexing="xy") + pitch, yaw = torch.deg2rad(pitch.reshape(-1)), torch.deg2rad(yaw.reshape(-1)) + pitch += torch.pi / 2 + x = torch.sin(pitch) * torch.cos(yaw) + y = torch.sin(pitch) * torch.sin(yaw) + z = torch.cos(pitch) + + ray_directions = -torch.stack([x, y, z], dim=1) + ray_starts = torch.zeros_like(ray_directions) + return ray_starts, ray_directions + + +def lidar_pattern(cfg: patterns_cfg.LidarPatternCfg, device: str) -> tuple[torch.Tensor, torch.Tensor]: + """Lidar sensor pattern for ray casting. + + Args: + cfg: The configuration instance for the pattern. + device: The device to create the pattern on. + + Returns: + The starting positions and directions of the rays. + """ + # Vertical angles + vertical_angles = torch.linspace(cfg.vertical_fov_range[0], cfg.vertical_fov_range[1], cfg.channels) + + # If the horizontal field of view is 360 degrees, exclude the last point to avoid overlap + if abs(abs(cfg.horizontal_fov_range[0] - cfg.horizontal_fov_range[1]) - 360.0) < 1e-6: + up_to = -1 + else: + up_to = None + + # Horizontal angles + num_horizontal_angles = math.ceil((cfg.horizontal_fov_range[1] - cfg.horizontal_fov_range[0]) / cfg.horizontal_res) + horizontal_angles = torch.linspace(cfg.horizontal_fov_range[0], cfg.horizontal_fov_range[1], num_horizontal_angles)[ + :up_to + ] + + # Convert degrees to radians + vertical_angles_rad = torch.deg2rad(vertical_angles) + horizontal_angles_rad = torch.deg2rad(horizontal_angles) + + # Meshgrid to create a 2D array of angles + v_angles, h_angles = torch.meshgrid(vertical_angles_rad, horizontal_angles_rad, indexing="ij") + + # Spherical to Cartesian conversion (assuming Z is up) + x = torch.cos(v_angles) * torch.cos(h_angles) + y = torch.cos(v_angles) * torch.sin(h_angles) + z = torch.sin(v_angles) + + # Ray directions + ray_directions = torch.stack([x, y, z], dim=-1).reshape(-1, 3).to(device) + + # Ray starts: Assuming all rays originate from (0,0,0) + ray_starts = torch.zeros_like(ray_directions).to(device) + + return ray_starts, ray_directions diff --git a/source/isaaclab_physx/isaaclab_physx/sensors/ray_caster/patterns/patterns_cfg.py b/source/isaaclab_physx/isaaclab_physx/sensors/ray_caster/patterns/patterns_cfg.py new file mode 100644 index 00000000000..f50ba272b70 --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/sensors/ray_caster/patterns/patterns_cfg.py @@ -0,0 +1,219 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Configuration for the ray-cast sensor.""" + +from __future__ import annotations + +from collections.abc import Callable, Sequence +from dataclasses import MISSING +from typing import Literal + +import torch + +from isaaclab.utils import configclass + +from . import patterns + + +@configclass +class PatternBaseCfg: + """Base configuration for a pattern.""" + + func: Callable[[PatternBaseCfg, str], tuple[torch.Tensor, torch.Tensor]] = MISSING + """Function to generate the pattern. + + The function should take in the configuration and the device name as arguments. It should return + the pattern's starting positions and directions as a tuple of torch.Tensor. + """ + + +@configclass +class GridPatternCfg(PatternBaseCfg): + """Configuration for the grid pattern for ray-casting. + + Defines a 2D grid of rays in the coordinates of the sensor. + + .. attention:: + The points are ordered based on the :attr:`ordering` attribute. + + """ + + func: Callable = patterns.grid_pattern + + resolution: float = MISSING + """Grid resolution (in meters).""" + + size: tuple[float, float] = MISSING + """Grid size (length, width) (in meters).""" + + direction: tuple[float, float, float] = (0.0, 0.0, -1.0) + """Ray direction. Defaults to (0.0, 0.0, -1.0).""" + + ordering: Literal["xy", "yx"] = "xy" + """Specifies the ordering of points in the generated grid. Defaults to ``"xy"``. + + Consider a grid pattern with points at :math:`(x, y)` where :math:`x` and :math:`y` are the grid indices. + The ordering of the points can be specified as "xy" or "yx". This determines the inner and outer loop order + when iterating over the grid points. + + * If "xy" is selected, the points are ordered with inner loop over "x" and outer loop over "y". + * If "yx" is selected, the points are ordered with inner loop over "y" and outer loop over "x". + + For example, the grid pattern points with :math:`X = (0, 1, 2)` and :math:`Y = (3, 4)`: + + * "xy" ordering: :math:`[(0, 3), (1, 3), (2, 3), (1, 4), (2, 4), (2, 4)]` + * "yx" ordering: :math:`[(0, 3), (0, 4), (1, 3), (1, 4), (2, 3), (2, 4)]` + """ + + +@configclass +class PinholeCameraPatternCfg(PatternBaseCfg): + """Configuration for a pinhole camera depth image pattern for ray-casting. + + .. caution:: + Focal length as well as the aperture sizes and offsets are set as a tenth of the world unit. In our case, the + world unit is meters, so all of these values are in cm. For more information, please check: + https://docs.omniverse.nvidia.com/materials-and-rendering/latest/cameras.html + """ + + func: Callable = patterns.pinhole_camera_pattern + + focal_length: float = 24.0 + """Perspective focal length (in cm). Defaults to 24.0cm. + + Longer lens lengths narrower FOV, shorter lens lengths wider FOV. + """ + + horizontal_aperture: float = 20.955 + """Horizontal aperture (in cm). Defaults to 20.955 cm. + + Emulates sensor/film width on a camera. + + Note: + The default value is the horizontal aperture of a 35 mm spherical projector. + """ + vertical_aperture: float | None = None + r"""Vertical aperture (in cm). Defaults to None. + + Emulates sensor/film height on a camera. If None, then the vertical aperture is calculated based on the + horizontal aperture and the aspect ratio of the image to maintain squared pixels. In this case, the vertical + aperture is calculated as: + + .. math:: + \text{vertical aperture} = \text{horizontal aperture} \times \frac{\text{height}}{\text{width}} + """ + + horizontal_aperture_offset: float = 0.0 + """Offsets Resolution/Film gate horizontally. Defaults to 0.0.""" + + vertical_aperture_offset: float = 0.0 + """Offsets Resolution/Film gate vertically. Defaults to 0.0.""" + + width: int = MISSING + """Width of the image (in pixels).""" + + height: int = MISSING + """Height of the image (in pixels).""" + + @classmethod + def from_intrinsic_matrix( + cls, + intrinsic_matrix: list[float], + width: int, + height: int, + focal_length: float = 24.0, + ) -> PinholeCameraPatternCfg: + r"""Create a :class:`PinholeCameraPatternCfg` class instance from an intrinsic matrix. + + The intrinsic matrix is a 3x3 matrix that defines the mapping between the 3D world coordinates and + the 2D image. The matrix is defined as: + + .. math:: + I_{cam} = \begin{bmatrix} + f_x & 0 & c_x \\ + 0 & f_y & c_y \\ + 0 & 0 & 1 + \end{bmatrix}, + + where :math:`f_x` and :math:`f_y` are the focal length along x and y direction, while + :math:`c_x` and :math:`c_y` are the principle point offsets along x and y direction, respectively. + + Args: + intrinsic_matrix: Intrinsic matrix of the camera in row-major format. + The matrix is defined as [f_x, 0, c_x, 0, f_y, c_y, 0, 0, 1]. Shape is (9,). + width: Width of the image (in pixels). + height: Height of the image (in pixels). + focal_length: Focal length of the camera (in cm). Defaults to 24.0 cm. + + Returns: + An instance of the :class:`PinholeCameraPatternCfg` class. + """ + # extract parameters from matrix + f_x = intrinsic_matrix[0] + c_x = intrinsic_matrix[2] + f_y = intrinsic_matrix[4] + c_y = intrinsic_matrix[5] + # resolve parameters for usd camera + horizontal_aperture = width * focal_length / f_x + vertical_aperture = height * focal_length / f_y + horizontal_aperture_offset = (c_x - width / 2) / f_x + vertical_aperture_offset = (c_y - height / 2) / f_y + + return cls( + focal_length=focal_length, + horizontal_aperture=horizontal_aperture, + vertical_aperture=vertical_aperture, + horizontal_aperture_offset=horizontal_aperture_offset, + vertical_aperture_offset=vertical_aperture_offset, + width=width, + height=height, + ) + + +@configclass +class BpearlPatternCfg(PatternBaseCfg): + """Configuration for the Bpearl pattern for ray-casting.""" + + func: Callable = patterns.bpearl_pattern + + horizontal_fov: float = 360.0 + """Horizontal field of view (in degrees). Defaults to 360.0.""" + + horizontal_res: float = 10.0 + """Horizontal resolution (in degrees). Defaults to 10.0.""" + + # fmt: off + vertical_ray_angles: Sequence[float] = [ + 89.5, 86.6875, 83.875, 81.0625, 78.25, 75.4375, 72.625, 69.8125, 67.0, 64.1875, 61.375, + 58.5625, 55.75, 52.9375, 50.125, 47.3125, 44.5, 41.6875, 38.875, 36.0625, 33.25, 30.4375, + 27.625, 24.8125, 22, 19.1875, 16.375, 13.5625, 10.75, 7.9375, 5.125, 2.3125 + ] + # fmt: on + """Vertical ray angles (in degrees). Defaults to a list of 32 angles. + + Note: + We manually set the vertical ray angles to match the Bpearl sensor. The ray-angles + are not evenly spaced. + """ + + +@configclass +class LidarPatternCfg(PatternBaseCfg): + """Configuration for the LiDAR pattern for ray-casting.""" + + func: Callable = patterns.lidar_pattern + + channels: int = MISSING + """Number of Channels (Beams). Determines the vertical resolution of the LiDAR sensor.""" + + vertical_fov_range: tuple[float, float] = MISSING + """Vertical field of view range in degrees.""" + + horizontal_fov_range: tuple[float, float] = MISSING + """Horizontal field of view range in degrees.""" + + horizontal_res: float = MISSING + """Horizontal resolution (in degrees).""" diff --git a/source/isaaclab_physx/isaaclab_physx/sensors/ray_caster/ray_cast_utils.py b/source/isaaclab_physx/isaaclab_physx/sensors/ray_caster/ray_cast_utils.py new file mode 100644 index 00000000000..543276e8ea2 --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/sensors/ray_caster/ray_cast_utils.py @@ -0,0 +1,51 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Utility functions for ray-cast sensors.""" + +from __future__ import annotations + +import torch + +import omni.physics.tensors.impl.api as physx + +from isaaclab.sim.views import XformPrimView +from isaaclab.utils.math import convert_quat + + +def obtain_world_pose_from_view( + physx_view: XformPrimView | physx.ArticulationView | physx.RigidBodyView, + env_ids: torch.Tensor, + clone: bool = False, +) -> tuple[torch.Tensor, torch.Tensor]: + """Get the world poses of the prim referenced by the prim view. + + Args: + physx_view: The prim view to get the world poses from. + env_ids: The environment ids of the prims to get the world poses for. + clone: Whether to clone the returned tensors (default: False). + + Returns: + A tuple containing the world positions and orientations of the prims. + Orientation is in (w, x, y, z) format. + + Raises: + NotImplementedError: If the prim view is not of the supported type. + """ + if isinstance(physx_view, XformPrimView): + pos_w, quat_w = physx_view.get_world_poses(env_ids) + elif isinstance(physx_view, physx.ArticulationView): + pos_w, quat_w = physx_view.get_root_transforms()[env_ids].split([3, 4], dim=-1) + quat_w = convert_quat(quat_w, to="wxyz") + elif isinstance(physx_view, physx.RigidBodyView): + pos_w, quat_w = physx_view.get_transforms()[env_ids].split([3, 4], dim=-1) + quat_w = convert_quat(quat_w, to="wxyz") + else: + raise NotImplementedError(f"Cannot get world poses for prim view of type '{type(physx_view)}'.") + + if clone: + return pos_w.clone(), quat_w.clone() + else: + return pos_w, quat_w diff --git a/source/isaaclab_physx/isaaclab_physx/sensors/ray_caster/ray_caster.py b/source/isaaclab_physx/isaaclab_physx/sensors/ray_caster/ray_caster.py new file mode 100644 index 00000000000..e6735a9f481 --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/sensors/ray_caster/ray_caster.py @@ -0,0 +1,428 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from __future__ import annotations + +import logging +import re +from collections.abc import Sequence +from typing import TYPE_CHECKING, ClassVar + +import numpy as np +import torch +import warp as wp + +import omni +from isaacsim.core.simulation_manager import SimulationManager +from pxr import UsdGeom, UsdPhysics + +import isaaclab.sim as sim_utils +import isaaclab.utils.math as math_utils +from isaaclab.markers import VisualizationMarkers +from isaaclab.sim.views import XformPrimView +from isaaclab.terrains.trimesh.utils import make_plane +from isaaclab.utils.math import quat_apply, quat_apply_yaw +from isaaclab.utils.warp import convert_to_warp_mesh, raycast_mesh + +from ..sensor_base import SensorBase +from .ray_cast_utils import obtain_world_pose_from_view +from .ray_caster_data import RayCasterData + +if TYPE_CHECKING: + from .ray_caster_cfg import RayCasterCfg + +# import logger +logger = logging.getLogger(__name__) + + +class RayCaster(SensorBase): + """A ray-casting sensor. + + The ray-caster uses a set of rays to detect collisions with meshes in the scene. The rays are + defined in the sensor's local coordinate frame. The sensor can be configured to ray-cast against + a set of meshes with a given ray pattern. + + The meshes are parsed from the list of primitive paths provided in the configuration. These are then + converted to warp meshes and stored in the `warp_meshes` list. The ray-caster then ray-casts against + these warp meshes using the ray pattern provided in the configuration. + + .. note:: + Currently, only static meshes are supported. Extending the warp mesh to support dynamic meshes + is a work in progress. + """ + + cfg: RayCasterCfg + """The configuration parameters.""" + + # Class variables to share meshes across instances + meshes: ClassVar[dict[str, wp.Mesh]] = {} + """A dictionary to store warp meshes for raycasting, shared across all instances. + + The keys correspond to the prim path for the meshes, and values are the corresponding warp Mesh objects.""" + _instance_count: ClassVar[int] = 0 + """A counter to track the number of RayCaster instances, used to manage class variable lifecycle.""" + + def __init__(self, cfg: RayCasterCfg): + """Initializes the ray-caster object. + + Args: + cfg: The configuration parameters. + """ + RayCaster._instance_count += 1 + # check if sensor path is valid + # note: currently we do not handle environment indices if there is a regex pattern in the leaf + # For example, if the prim path is "/World/Sensor_[1,2]". + sensor_path = cfg.prim_path.split("/")[-1] + sensor_path_is_regex = re.match(r"^[a-zA-Z0-9/_]+$", sensor_path) is None + if sensor_path_is_regex: + raise RuntimeError( + f"Invalid prim path for the ray-caster sensor: {cfg.prim_path}." + "\n\tHint: Please ensure that the prim path does not contain any regex patterns in the leaf." + ) + # Initialize base class + super().__init__(cfg) + # Create empty variables for storing output data + self._data = RayCasterData() + + def __str__(self) -> str: + """Returns: A string containing information about the instance.""" + return ( + f"Ray-caster @ '{self.cfg.prim_path}': \n" + f"\tview type : {self._view.__class__}\n" + f"\tupdate period (s) : {self.cfg.update_period}\n" + f"\tnumber of meshes : {len(RayCaster.meshes)}\n" + f"\tnumber of sensors : {self._view.count}\n" + f"\tnumber of rays/sensor: {self.num_rays}\n" + f"\ttotal number of rays : {self.num_rays * self._view.count}" + ) + + """ + Properties + """ + + @property + def num_instances(self) -> int: + return self._view.count + + @property + def data(self) -> RayCasterData: + # update sensors if needed + self._update_outdated_buffers() + # return the data + return self._data + + """ + Operations. + """ + + def reset(self, env_ids: Sequence[int] | None = None): + # reset the timers and counters + super().reset(env_ids) + # resolve None + if env_ids is None: + env_ids = slice(None) + num_envs_ids = self._view.count + else: + num_envs_ids = len(env_ids) + # resample the drift + r = torch.empty(num_envs_ids, 3, device=self.device) + self.drift[env_ids] = r.uniform_(*self.cfg.drift_range) + # resample the height drift + range_list = [self.cfg.ray_cast_drift_range.get(key, (0.0, 0.0)) for key in ["x", "y", "z"]] + ranges = torch.tensor(range_list, device=self.device) + self.ray_cast_drift[env_ids] = math_utils.sample_uniform( + ranges[:, 0], ranges[:, 1], (num_envs_ids, 3), device=self.device + ) + + """ + Implementation. + """ + + def _initialize_impl(self): + super()._initialize_impl() + # obtain global simulation view + + self._physics_sim_view = SimulationManager.get_physics_sim_view() + prim = sim_utils.find_first_matching_prim(self.cfg.prim_path) + if prim is None: + available_prims = ",".join([str(p.GetPath()) for p in sim_utils.get_current_stage().Traverse()]) + raise RuntimeError( + f"Failed to find a prim at path expression: {self.cfg.prim_path}. Available prims: {available_prims}" + ) + + self._view, self._offset = self._obtain_trackable_prim_view(self.cfg.prim_path) + + # load the meshes by parsing the stage + self._initialize_warp_meshes() + # initialize the ray start and directions + self._initialize_rays_impl() + + def _initialize_warp_meshes(self): + # check number of mesh prims provided + if len(self.cfg.mesh_prim_paths) != 1: + raise NotImplementedError( + f"RayCaster currently only supports one mesh prim. Received: {len(self.cfg.mesh_prim_paths)}" + ) + + # read prims to ray-cast + for mesh_prim_path in self.cfg.mesh_prim_paths: + # check if mesh already casted into warp mesh + if mesh_prim_path in RayCaster.meshes: + continue + + # check if the prim is a plane - handle PhysX plane as a special case + # if a plane exists then we need to create an infinite mesh that is a plane + mesh_prim = sim_utils.get_first_matching_child_prim( + mesh_prim_path, lambda prim: prim.GetTypeName() == "Plane" + ) + # if we did not find a plane then we need to read the mesh + if mesh_prim is None: + # obtain the mesh prim + mesh_prim = sim_utils.get_first_matching_child_prim( + mesh_prim_path, lambda prim: prim.GetTypeName() == "Mesh" + ) + # check if valid + if mesh_prim is None or not mesh_prim.IsValid(): + raise RuntimeError(f"Invalid mesh prim path: {mesh_prim_path}") + # cast into UsdGeomMesh + mesh_prim = UsdGeom.Mesh(mesh_prim) + # read the vertices and faces + points = np.asarray(mesh_prim.GetPointsAttr().Get()) + transform_matrix = np.array(omni.usd.get_world_transform_matrix(mesh_prim)).T + points = np.matmul(points, transform_matrix[:3, :3].T) + points += transform_matrix[:3, 3] + indices = np.asarray(mesh_prim.GetFaceVertexIndicesAttr().Get()) + wp_mesh = convert_to_warp_mesh(points, indices, device=self.device) + # print info + logger.info( + f"Read mesh prim: {mesh_prim.GetPath()} with {len(points)} vertices and {len(indices)} faces." + ) + else: + mesh = make_plane(size=(2e6, 2e6), height=0.0, center_zero=True) + wp_mesh = convert_to_warp_mesh(mesh.vertices, mesh.faces, device=self.device) + # print info + logger.info(f"Created infinite plane mesh prim: {mesh_prim.GetPath()}.") + # add the warp mesh to the list + RayCaster.meshes[mesh_prim_path] = wp_mesh + + # throw an error if no meshes are found + if all([mesh_prim_path not in RayCaster.meshes for mesh_prim_path in self.cfg.mesh_prim_paths]): + raise RuntimeError( + f"No meshes found for ray-casting! Please check the mesh prim paths: {self.cfg.mesh_prim_paths}" + ) + + def _initialize_rays_impl(self): + # compute ray stars and directions + self.ray_starts, self.ray_directions = self.cfg.pattern_cfg.func(self.cfg.pattern_cfg, self._device) + self.num_rays = len(self.ray_directions) + # apply offset transformation to the rays + offset_pos = torch.tensor(list(self.cfg.offset.pos), device=self._device) + offset_quat = torch.tensor(list(self.cfg.offset.rot), device=self._device) + self.ray_directions = quat_apply(offset_quat.repeat(len(self.ray_directions), 1), self.ray_directions) + self.ray_starts += offset_pos + # repeat the rays for each sensor + self.ray_starts = self.ray_starts.repeat(self._view.count, 1, 1) + self.ray_directions = self.ray_directions.repeat(self._view.count, 1, 1) + # prepare drift + self.drift = torch.zeros(self._view.count, 3, device=self.device) + self.ray_cast_drift = torch.zeros(self._view.count, 3, device=self.device) + # fill the data buffer + self._data.pos_w = torch.zeros(self._view.count, 3, device=self.device) + self._data.quat_w = torch.zeros(self._view.count, 4, device=self.device) + self._data.ray_hits_w = torch.zeros(self._view.count, self.num_rays, 3, device=self.device) + self._ray_starts_w = torch.zeros(self._view.count, self.num_rays, 3, device=self.device) + self._ray_directions_w = torch.zeros(self._view.count, self.num_rays, 3, device=self.device) + + def _update_ray_infos(self, env_ids: Sequence[int]): + """Updates the ray information buffers.""" + + pos_w, quat_w = obtain_world_pose_from_view(self._view, env_ids) + pos_w, quat_w = math_utils.combine_frame_transforms( + pos_w, quat_w, self._offset[0][env_ids], self._offset[1][env_ids] + ) + # apply drift to ray starting position in world frame + pos_w += self.drift[env_ids] + # store the poses + self._data.pos_w[env_ids] = pos_w + self._data.quat_w[env_ids] = quat_w + + # check if user provided attach_yaw_only flag + if self.cfg.attach_yaw_only is not None: + msg = ( + "Raycaster attribute 'attach_yaw_only' property will be deprecated in a future release." + " Please use the parameter 'ray_alignment' instead." + ) + # set ray alignment to yaw + if self.cfg.attach_yaw_only: + self.cfg.ray_alignment = "yaw" + msg += " Setting ray_alignment to 'yaw'." + else: + self.cfg.ray_alignment = "base" + msg += " Setting ray_alignment to 'base'." + # log the warning + logger.warning(msg) + # ray cast based on the sensor poses + if self.cfg.ray_alignment == "world": + # apply horizontal drift to ray starting position in ray caster frame + pos_w[:, 0:2] += self.ray_cast_drift[env_ids, 0:2] + # no rotation is considered and directions are not rotated + ray_starts_w = self.ray_starts[env_ids] + ray_starts_w += pos_w.unsqueeze(1) + ray_directions_w = self.ray_directions[env_ids] + elif self.cfg.ray_alignment == "yaw": + # apply horizontal drift to ray starting position in ray caster frame + pos_w[:, 0:2] += quat_apply_yaw(quat_w, self.ray_cast_drift[env_ids])[:, 0:2] + # only yaw orientation is considered and directions are not rotated + ray_starts_w = quat_apply_yaw(quat_w.repeat(1, self.num_rays), self.ray_starts[env_ids]) + ray_starts_w += pos_w.unsqueeze(1) + ray_directions_w = self.ray_directions[env_ids] + elif self.cfg.ray_alignment == "base": + # apply horizontal drift to ray starting position in ray caster frame + pos_w[:, 0:2] += quat_apply(quat_w, self.ray_cast_drift[env_ids])[:, 0:2] + # full orientation is considered + ray_starts_w = quat_apply(quat_w.repeat(1, self.num_rays), self.ray_starts[env_ids]) + ray_starts_w += pos_w.unsqueeze(1) + ray_directions_w = quat_apply(quat_w.repeat(1, self.num_rays), self.ray_directions[env_ids]) + else: + raise RuntimeError(f"Unsupported ray_alignment type: {self.cfg.ray_alignment}.") + + self._ray_starts_w[env_ids] = ray_starts_w + self._ray_directions_w[env_ids] = ray_directions_w + + def _update_buffers_impl(self, env_ids: Sequence[int]): + """Fills the buffers of the sensor data.""" + self._update_ray_infos(env_ids) + + # ray cast and store the hits + # TODO: Make this work for multiple meshes? + self._data.ray_hits_w[env_ids] = raycast_mesh( + self._ray_starts_w[env_ids], + self._ray_directions_w[env_ids], + max_dist=self.cfg.max_distance, + mesh=RayCaster.meshes[self.cfg.mesh_prim_paths[0]], + )[0] + + # apply vertical drift to ray starting position in ray caster frame + self._data.ray_hits_w[env_ids, :, 2] += self.ray_cast_drift[env_ids, 2].unsqueeze(-1) + + def _set_debug_vis_impl(self, debug_vis: bool): + # set visibility of markers + # note: parent only deals with callbacks. not their visibility + if debug_vis: + if not hasattr(self, "ray_visualizer"): + self.ray_visualizer = VisualizationMarkers(self.cfg.visualizer_cfg) + # set their visibility to true + self.ray_visualizer.set_visibility(True) + else: + if hasattr(self, "ray_visualizer"): + self.ray_visualizer.set_visibility(False) + + def _debug_vis_callback(self, event): + if self._data.ray_hits_w is None: + return + # remove possible inf values + viz_points = self._data.ray_hits_w.reshape(-1, 3) + viz_points = viz_points[~torch.any(torch.isinf(viz_points), dim=1)] + + self.ray_visualizer.visualize(viz_points) + + """ + Internal Helpers. + """ + + def _obtain_trackable_prim_view( + self, target_prim_path: str + ) -> tuple[XformPrimView | any, tuple[torch.Tensor, torch.Tensor]]: + """Obtain a prim view that can be used to track the pose of the parget prim. + + The target prim path is a regex expression that matches one or more mesh prims. While we can track its + pose directly using XFormPrim, this is not efficient and can be slow. Instead, we create a prim view + using the physics simulation view, which provides a more efficient way to track the pose of the mesh prims. + + The function additionally resolves the relative pose between the mesh and its corresponding physics prim. + This is especially useful if the mesh is not directly parented to the physics prim. + + Args: + target_prim_path: The target prim path to obtain the prim view for. + + Returns: + A tuple containing: + + - An XFormPrim or a physics prim view (ArticulationView or RigidBodyView). + - A tuple containing the positions and orientations of the mesh prims in the physics prim frame. + + """ + + mesh_prim = sim_utils.find_first_matching_prim(target_prim_path) + current_prim = mesh_prim + current_path_expr = target_prim_path + + prim_view = None + + while prim_view is None: + # TODO: Need to handle the case where API is present but it is disabled + if current_prim.HasAPI(UsdPhysics.ArticulationRootAPI): + prim_view = self._physics_sim_view.create_articulation_view(current_path_expr.replace(".*", "*")) + logger.info(f"Created articulation view for mesh prim at path: {target_prim_path}") + break + + # TODO: Need to handle the case where API is present but it is disabled + if current_prim.HasAPI(UsdPhysics.RigidBodyAPI): + prim_view = self._physics_sim_view.create_rigid_body_view(current_path_expr.replace(".*", "*")) + logger.info(f"Created rigid body view for mesh prim at path: {target_prim_path}") + break + + new_root_prim = current_prim.GetParent() + current_path_expr = current_path_expr.rsplit("/", 1)[0] + if not new_root_prim.IsValid(): + prim_view = XformPrimView(target_prim_path, device=self._device, stage=self.stage) + current_path_expr = target_prim_path + logger.warning( + f"The prim at path {target_prim_path} which is used for raycasting is not a physics prim." + " Defaulting to XFormPrim. \n The pose of the mesh will most likely not" + " be updated correctly when running in headless mode and position lookups will be much slower. \n" + " If possible, ensure that the mesh or its parent is a physics prim (rigid body or articulation)." + ) + break + + # switch the current prim to the parent prim + current_prim = new_root_prim + + # obtain the relative transforms between target prim and the view prims + mesh_prims = sim_utils.find_matching_prims(target_prim_path) + view_prims = sim_utils.find_matching_prims(current_path_expr) + if len(mesh_prims) != len(view_prims): + raise RuntimeError( + f"The number of mesh prims ({len(mesh_prims)}) does not match the number of physics prims" + f" ({len(view_prims)})Please specify the correct mesh and physics prim paths more" + " specifically in your target expressions." + ) + positions = [] + quaternions = [] + for mesh_prim, view_prim in zip(mesh_prims, view_prims): + pos, orientation = sim_utils.resolve_prim_pose(mesh_prim, view_prim) + positions.append(torch.tensor(pos, dtype=torch.float32, device=self.device)) + quaternions.append(torch.tensor(orientation, dtype=torch.float32, device=self.device)) + + positions = torch.stack(positions).to(device=self.device, dtype=torch.float32) + quaternions = torch.stack(quaternions).to(device=self.device, dtype=torch.float32) + + return prim_view, (positions, quaternions) + + """ + Internal simulation callbacks. + """ + + def _invalidate_initialize_callback(self, event): + """Invalidates the scene elements.""" + # call parent + super()._invalidate_initialize_callback(event) + # set all existing views to None to invalidate them + self._view = None + + def __del__(self): + RayCaster._instance_count -= 1 + if RayCaster._instance_count == 0: + RayCaster.meshes.clear() diff --git a/source/isaaclab_physx/isaaclab_physx/sensors/ray_caster/ray_caster_camera.py b/source/isaaclab_physx/isaaclab_physx/sensors/ray_caster/ray_caster_camera.py new file mode 100644 index 00000000000..e930d3df183 --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/sensors/ray_caster/ray_caster_camera.py @@ -0,0 +1,456 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from __future__ import annotations + +import logging +from collections.abc import Sequence +from typing import TYPE_CHECKING, ClassVar, Literal + +import torch + +from pxr import UsdGeom + +import isaaclab.utils.math as math_utils +from isaaclab.sensors.camera import CameraData +from isaaclab.utils.warp import raycast_mesh + +from .ray_cast_utils import obtain_world_pose_from_view +from .ray_caster import RayCaster + +if TYPE_CHECKING: + from .ray_caster_camera_cfg import RayCasterCameraCfg + +# import logger +logger = logging.getLogger(__name__) + + +class RayCasterCamera(RayCaster): + """A ray-casting camera sensor. + + The ray-caster camera uses a set of rays to get the distances to meshes in the scene. The rays are + defined in the sensor's local coordinate frame. The sensor has the same interface as the + :class:`isaaclab.sensors.Camera` that implements the camera class through USD camera prims. + However, this class provides a faster image generation. The sensor converts meshes from the list of + primitive paths provided in the configuration to Warp meshes. The camera then ray-casts against these + Warp meshes only. + + Currently, only the following annotators are supported: + + - ``"distance_to_camera"``: An image containing the distance to camera optical center. + - ``"distance_to_image_plane"``: An image containing distances of 3D points from camera plane along camera's z-axis. + - ``"normals"``: An image containing the local surface normal vectors at each pixel. + + .. note:: + Currently, only static meshes are supported. Extending the warp mesh to support dynamic meshes + is a work in progress. + """ + + cfg: RayCasterCameraCfg + """The configuration parameters.""" + UNSUPPORTED_TYPES: ClassVar[set[str]] = { + "rgb", + "instance_id_segmentation", + "instance_id_segmentation_fast", + "instance_segmentation", + "instance_segmentation_fast", + "semantic_segmentation", + "skeleton_data", + "motion_vectors", + "bounding_box_2d_tight", + "bounding_box_2d_tight_fast", + "bounding_box_2d_loose", + "bounding_box_2d_loose_fast", + "bounding_box_3d", + "bounding_box_3d_fast", + } + """A set of sensor types that are not supported by the ray-caster camera.""" + + def __init__(self, cfg: RayCasterCameraCfg): + """Initializes the camera object. + + Args: + cfg: The configuration parameters. + + Raises: + ValueError: If the provided data types are not supported by the ray-caster camera. + """ + # perform check on supported data types + self._check_supported_data_types(cfg) + # initialize base class + super().__init__(cfg) + # create empty variables for storing output data + self._data = CameraData() + + def __str__(self) -> str: + """Returns: A string containing information about the instance.""" + return ( + f"Ray-Caster-Camera @ '{self.cfg.prim_path}': \n" + f"\tview type : {self._view.__class__}\n" + f"\tupdate period (s) : {self.cfg.update_period}\n" + f"\tnumber of meshes : {len(RayCaster.meshes)}\n" + f"\tnumber of sensors : {self._view.count}\n" + f"\tnumber of rays/sensor: {self.num_rays}\n" + f"\ttotal number of rays : {self.num_rays * self._view.count}\n" + f"\timage shape : {self.image_shape}" + ) + + """ + Properties + """ + + @property + def data(self) -> CameraData: + # update sensors if needed + self._update_outdated_buffers() + # return the data + return self._data + + @property + def image_shape(self) -> tuple[int, int]: + """A tuple containing (height, width) of the camera sensor.""" + return (self.cfg.pattern_cfg.height, self.cfg.pattern_cfg.width) + + @property + def frame(self) -> torch.tensor: + """Frame number when the measurement took place.""" + return self._frame + + """ + Operations. + """ + + def set_intrinsic_matrices( + self, matrices: torch.Tensor, focal_length: float = 1.0, env_ids: Sequence[int] | None = None + ): + """Set the intrinsic matrix of the camera. + + Args: + matrices: The intrinsic matrices for the camera. Shape is (N, 3, 3). + focal_length: Focal length to use when computing aperture values (in cm). Defaults to 1.0. + env_ids: A sensor ids to manipulate. Defaults to None, which means all sensor indices. + """ + # resolve env_ids + if env_ids is None: + env_ids = slice(None) + # save new intrinsic matrices and focal length + self._data.intrinsic_matrices[env_ids] = matrices.to(self._device) + self._focal_length = focal_length + # recompute ray directions + self.ray_starts[env_ids], self.ray_directions[env_ids] = self.cfg.pattern_cfg.func( + self.cfg.pattern_cfg, self._data.intrinsic_matrices[env_ids], self._device + ) + + def reset(self, env_ids: Sequence[int] | None = None): + # reset the timestamps + super().reset(env_ids) + # resolve None + if env_ids is None or isinstance(env_ids, slice): + env_ids = self._ALL_INDICES + # reset the data + # note: this recomputation is useful if one performs events such as randomizations on the camera poses. + pos_w, quat_w = obtain_world_pose_from_view(self._view, env_ids, clone=True) + pos_w, quat_w = math_utils.combine_frame_transforms( + pos_w, quat_w, self._offset_pos[env_ids], self._offset_quat[env_ids] + ) + self._data.pos_w[env_ids] = pos_w + self._data.quat_w_world[env_ids] = quat_w + # Reset the frame count + self._frame[env_ids] = 0 + + def set_world_poses( + self, + positions: torch.Tensor | None = None, + orientations: torch.Tensor | None = None, + env_ids: Sequence[int] | None = None, + convention: Literal["opengl", "ros", "world"] = "ros", + ): + """Set the pose of the camera w.r.t. the world frame using specified convention. + + Since different fields use different conventions for camera orientations, the method allows users to + set the camera poses in the specified convention. Possible conventions are: + + - :obj:`"opengl"` - forward axis: -Z - up axis +Y - Offset is applied in the OpenGL (Usd.Camera) convention + - :obj:`"ros"` - forward axis: +Z - up axis -Y - Offset is applied in the ROS convention + - :obj:`"world"` - forward axis: +X - up axis +Z - Offset is applied in the World Frame convention + + See :meth:`isaaclab.utils.maths.convert_camera_frame_orientation_convention` for more details + on the conventions. + + Args: + positions: The cartesian coordinates (in meters). Shape is (N, 3). + Defaults to None, in which case the camera position in not changed. + orientations: The quaternion orientation in (w, x, y, z). Shape is (N, 4). + Defaults to None, in which case the camera orientation in not changed. + env_ids: A sensor ids to manipulate. Defaults to None, which means all sensor indices. + convention: The convention in which the poses are fed. Defaults to "ros". + + Raises: + RuntimeError: If the camera prim is not set. Need to call :meth:`initialize` method first. + """ + # resolve env_ids + if env_ids is None or isinstance(env_ids, slice): + env_ids = self._ALL_INDICES + + # get current positions + pos_w, quat_w = obtain_world_pose_from_view(self._view, env_ids) + if positions is not None: + # transform to camera frame + pos_offset_world_frame = positions - pos_w + self._offset_pos[env_ids] = math_utils.quat_apply(math_utils.quat_inv(quat_w), pos_offset_world_frame) + if orientations is not None: + # convert rotation matrix from input convention to world + quat_w_set = math_utils.convert_camera_frame_orientation_convention( + orientations, origin=convention, target="world" + ) + self._offset_quat[env_ids] = math_utils.quat_mul(math_utils.quat_inv(quat_w), quat_w_set) + + # update the data + pos_w, quat_w = obtain_world_pose_from_view(self._view, env_ids, clone=True) + pos_w, quat_w = math_utils.combine_frame_transforms( + pos_w, quat_w, self._offset_pos[env_ids], self._offset_quat[env_ids] + ) + self._data.pos_w[env_ids] = pos_w + self._data.quat_w_world[env_ids] = quat_w + + def set_world_poses_from_view( + self, eyes: torch.Tensor, targets: torch.Tensor, env_ids: Sequence[int] | None = None + ): + """Set the poses of the camera from the eye position and look-at target position. + + Args: + eyes: The positions of the camera's eye. Shape is N, 3). + targets: The target locations to look at. Shape is (N, 3). + env_ids: A sensor ids to manipulate. Defaults to None, which means all sensor indices. + + Raises: + RuntimeError: If the camera prim is not set. Need to call :meth:`initialize` method first. + NotImplementedError: If the stage up-axis is not "Y" or "Z". + """ + # get up axis of current stage + up_axis = UsdGeom.GetStageUpAxis(self.stage) + # camera position and rotation in opengl convention + orientations = math_utils.quat_from_matrix( + math_utils.create_rotation_matrix_from_view(eyes, targets, up_axis=up_axis, device=self._device) + ) + self.set_world_poses(eyes, orientations, env_ids, convention="opengl") + + """ + Implementation. + """ + + def _initialize_rays_impl(self): + # Create all indices buffer + self._ALL_INDICES = torch.arange(self._view.count, device=self._device, dtype=torch.long) + # Create frame count buffer + self._frame = torch.zeros(self._view.count, device=self._device, dtype=torch.long) + # create buffers + self._create_buffers() + # compute intrinsic matrices + self._compute_intrinsic_matrices() + # compute ray stars and directions + self.ray_starts, self.ray_directions = self.cfg.pattern_cfg.func( + self.cfg.pattern_cfg, self._data.intrinsic_matrices, self._device + ) + self.num_rays = self.ray_directions.shape[1] + # create buffer to store ray hits + self.ray_hits_w = torch.zeros(self._view.count, self.num_rays, 3, device=self._device) + # set offsets + quat_w = math_utils.convert_camera_frame_orientation_convention( + torch.tensor([self.cfg.offset.rot], device=self._device), origin=self.cfg.offset.convention, target="world" + ) + self._offset_quat = quat_w.repeat(self._view.count, 1) + self._offset_pos = torch.tensor(list(self.cfg.offset.pos), device=self._device).repeat(self._view.count, 1) + + def _update_buffers_impl(self, env_ids: Sequence[int]): + """Fills the buffers of the sensor data.""" + # increment frame count + self._frame[env_ids] += 1 + + # compute poses from current view + pos_w, quat_w = obtain_world_pose_from_view(self._view, env_ids, clone=True) + pos_w, quat_w = math_utils.combine_frame_transforms( + pos_w, quat_w, self._offset_pos[env_ids], self._offset_quat[env_ids] + ) + # update the data + self._data.pos_w[env_ids] = pos_w + self._data.quat_w_world[env_ids] = quat_w + + # note: full orientation is considered + ray_starts_w = math_utils.quat_apply(quat_w.repeat(1, self.num_rays), self.ray_starts[env_ids]) + ray_starts_w += pos_w.unsqueeze(1) + ray_directions_w = math_utils.quat_apply(quat_w.repeat(1, self.num_rays), self.ray_directions[env_ids]) + + # ray cast and store the hits + # note: we set max distance to 1e6 during the ray-casting. THis is because we clip the distance + # to the image plane and distance to the camera to the maximum distance afterwards in-order to + # match the USD camera behavior. + + # TODO: Make ray-casting work for multiple meshes? + # necessary for regular dictionaries. + self.ray_hits_w, ray_depth, ray_normal, _ = raycast_mesh( + ray_starts_w, + ray_directions_w, + mesh=RayCaster.meshes[self.cfg.mesh_prim_paths[0]], + max_dist=1e6, + return_distance=any( + [name in self.cfg.data_types for name in ["distance_to_image_plane", "distance_to_camera"]] + ), + return_normal="normals" in self.cfg.data_types, + ) + # update output buffers + if "distance_to_image_plane" in self.cfg.data_types: + # note: data is in camera frame so we only take the first component (z-axis of camera frame) + distance_to_image_plane = ( + math_utils.quat_apply( + math_utils.quat_inv(quat_w).repeat(1, self.num_rays), + (ray_depth[:, :, None] * ray_directions_w), + ) + )[:, :, 0] + # apply the maximum distance after the transformation + if self.cfg.depth_clipping_behavior == "max": + distance_to_image_plane = torch.clip(distance_to_image_plane, max=self.cfg.max_distance) + distance_to_image_plane[torch.isnan(distance_to_image_plane)] = self.cfg.max_distance + elif self.cfg.depth_clipping_behavior == "zero": + distance_to_image_plane[distance_to_image_plane > self.cfg.max_distance] = 0.0 + distance_to_image_plane[torch.isnan(distance_to_image_plane)] = 0.0 + self._data.output["distance_to_image_plane"][env_ids] = distance_to_image_plane.view( + -1, *self.image_shape, 1 + ) + + if "distance_to_camera" in self.cfg.data_types: + if self.cfg.depth_clipping_behavior == "max": + ray_depth = torch.clip(ray_depth, max=self.cfg.max_distance) + elif self.cfg.depth_clipping_behavior == "zero": + ray_depth[ray_depth > self.cfg.max_distance] = 0.0 + self._data.output["distance_to_camera"][env_ids] = ray_depth.view(-1, *self.image_shape, 1) + + if "normals" in self.cfg.data_types: + self._data.output["normals"][env_ids] = ray_normal.view(-1, *self.image_shape, 3) + + def _debug_vis_callback(self, event): + # in case it crashes be safe + if not hasattr(self, "ray_hits_w"): + return + # show ray hit positions + self.ray_visualizer.visualize(self.ray_hits_w.view(-1, 3)) + + """ + Private Helpers + """ + + def _check_supported_data_types(self, cfg: RayCasterCameraCfg): + """Checks if the data types are supported by the ray-caster camera.""" + # check if there is any intersection in unsupported types + # reason: we cannot obtain this data from simplified warp-based ray caster + common_elements = set(cfg.data_types) & RayCasterCamera.UNSUPPORTED_TYPES + if common_elements: + raise ValueError( + f"RayCasterCamera class does not support the following sensor types: {common_elements}." + "\n\tThis is because these sensor types cannot be obtained in a fast way using ''warp''." + "\n\tHint: If you need to work with these sensor types, we recommend using the USD camera" + " interface from the isaaclab.sensors.camera module." + ) + + def _create_buffers(self): + """Create buffers for storing data.""" + # prepare drift + self.drift = torch.zeros(self._view.count, 3, device=self.device) + self.ray_cast_drift = torch.zeros(self._view.count, 3, device=self.device) + # create the data object + # -- pose of the cameras + self._data.pos_w = torch.zeros((self._view.count, 3), device=self._device) + self._data.quat_w_world = torch.zeros((self._view.count, 4), device=self._device) + # -- intrinsic matrix + self._data.intrinsic_matrices = torch.zeros((self._view.count, 3, 3), device=self._device) + self._data.intrinsic_matrices[:, 2, 2] = 1.0 + self._data.image_shape = self.image_shape + # -- output data + # create the buffers to store the annotator data. + self._data.output = {} + self._data.info = [{name: None for name in self.cfg.data_types}] * self._view.count + for name in self.cfg.data_types: + if name in ["distance_to_image_plane", "distance_to_camera"]: + shape = (self.cfg.pattern_cfg.height, self.cfg.pattern_cfg.width, 1) + elif name in ["normals"]: + shape = (self.cfg.pattern_cfg.height, self.cfg.pattern_cfg.width, 3) + else: + raise ValueError(f"Received unknown data type: {name}. Please check the configuration.") + # allocate tensor to store the data + self._data.output[name] = torch.zeros((self._view.count, *shape), device=self._device) + + def _compute_intrinsic_matrices(self): + """Computes the intrinsic matrices for the camera based on the config provided.""" + # get the sensor properties + pattern_cfg = self.cfg.pattern_cfg + + # check if vertical aperture is provided + # if not then it is auto-computed based on the aspect ratio to preserve squared pixels + if pattern_cfg.vertical_aperture is None: + pattern_cfg.vertical_aperture = pattern_cfg.horizontal_aperture * pattern_cfg.height / pattern_cfg.width + + # compute the intrinsic matrix + f_x = pattern_cfg.width * pattern_cfg.focal_length / pattern_cfg.horizontal_aperture + f_y = pattern_cfg.height * pattern_cfg.focal_length / pattern_cfg.vertical_aperture + c_x = pattern_cfg.horizontal_aperture_offset * f_x + pattern_cfg.width / 2 + c_y = pattern_cfg.vertical_aperture_offset * f_y + pattern_cfg.height / 2 + # allocate the intrinsic matrices + self._data.intrinsic_matrices[:, 0, 0] = f_x + self._data.intrinsic_matrices[:, 0, 2] = c_x + self._data.intrinsic_matrices[:, 1, 1] = f_y + self._data.intrinsic_matrices[:, 1, 2] = c_y + + # save focal length + self._focal_length = pattern_cfg.focal_length + + def _compute_view_world_poses(self, env_ids: Sequence[int]) -> tuple[torch.Tensor, torch.Tensor]: + """Obtains the pose of the view the camera is attached to in the world frame. + + .. deprecated v2.3.1: + This function will be removed in a future release in favor of implementation + :meth:`obtain_world_pose_from_view`. + + Returns: + A tuple of the position (in meters) and quaternion (w, x, y, z). + + + """ + # deprecation + logger.warning( + "The function '_compute_view_world_poses' will be deprecated in favor of the util method" + " 'obtain_world_pose_from_view'. Please use 'obtain_world_pose_from_view' instead...." + ) + + return obtain_world_pose_from_view(self._view, env_ids, clone=True) + + def _compute_camera_world_poses(self, env_ids: Sequence[int]) -> tuple[torch.Tensor, torch.Tensor]: + """Computes the pose of the camera in the world frame. + + This function applies the offset pose to the pose of the view the camera is attached to. + + .. deprecated v2.3.1: + This function will be removed in a future release. Instead, use the code block below: + + .. code-block:: python + + pos_w, quat_w = obtain_world_pose_from_view(self._view, env_ids, clone=True) + pos_w, quat_w = math_utils.combine_frame_transforms( + pos_w, quat_w, self._offset_pos[env_ids], self._offset_quat[env_ids] + ) + + Returns: + A tuple of the position (in meters) and quaternion (w, x, y, z) in "world" convention. + """ + + # deprecation + logger.warning( + "The function '_compute_camera_world_poses' will be deprecated in favor of the combination of methods" + " 'obtain_world_pose_from_view' and 'math_utils.combine_frame_transforms'. Please use" + " 'obtain_world_pose_from_view' and 'math_utils.combine_frame_transforms' instead...." + ) + + # get the pose of the view the camera is attached to + pos_w, quat_w = obtain_world_pose_from_view(self._view, env_ids, clone=True) + return math_utils.combine_frame_transforms(pos_w, quat_w, self._offset_pos[env_ids], self._offset_quat[env_ids]) diff --git a/source/isaaclab_physx/isaaclab_physx/sensors/ray_caster/ray_caster_camera_cfg.py b/source/isaaclab_physx/isaaclab_physx/sensors/ray_caster/ray_caster_camera_cfg.py new file mode 100644 index 00000000000..604c586adcc --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/sensors/ray_caster/ray_caster_camera_cfg.py @@ -0,0 +1,64 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Configuration for the ray-cast camera sensor.""" + +from dataclasses import MISSING +from typing import Literal + +from isaaclab.utils import configclass + +from .patterns import PinholeCameraPatternCfg +from .ray_caster_camera import RayCasterCamera +from .ray_caster_cfg import RayCasterCfg + + +@configclass +class RayCasterCameraCfg(RayCasterCfg): + """Configuration for the ray-cast sensor.""" + + @configclass + class OffsetCfg: + """The offset pose of the sensor's frame from the sensor's parent frame.""" + + pos: tuple[float, float, float] = (0.0, 0.0, 0.0) + """Translation w.r.t. the parent frame. Defaults to (0.0, 0.0, 0.0).""" + + rot: tuple[float, float, float, float] = (1.0, 0.0, 0.0, 0.0) + """Quaternion rotation (w, x, y, z) w.r.t. the parent frame. Defaults to (1.0, 0.0, 0.0, 0.0).""" + + convention: Literal["opengl", "ros", "world"] = "ros" + """The convention in which the frame offset is applied. Defaults to "ros". + + - ``"opengl"`` - forward axis: ``-Z`` - up axis: ``+Y`` - Offset is applied in the OpenGL (Usd.Camera) + convention. + - ``"ros"`` - forward axis: ``+Z`` - up axis: ``-Y`` - Offset is applied in the ROS convention. + - ``"world"`` - forward axis: ``+X`` - up axis: ``+Z`` - Offset is applied in the World Frame convention. + + """ + + class_type: type = RayCasterCamera + + offset: OffsetCfg = OffsetCfg() + """The offset pose of the sensor's frame from the sensor's parent frame. Defaults to identity.""" + + data_types: list[str] = ["distance_to_image_plane"] + """List of sensor names/types to enable for the camera. Defaults to ["distance_to_image_plane"].""" + + depth_clipping_behavior: Literal["max", "zero", "none"] = "none" + """Clipping behavior for the camera for values exceed the maximum value. Defaults to "none". + + - ``"max"``: Values are clipped to the maximum value. + - ``"zero"``: Values are clipped to zero. + - ``"none``: No clipping is applied. Values will be returned as ``inf`` for ``distance_to_camera`` and ``nan`` + for ``distance_to_image_plane`` data type. + """ + + pattern_cfg: PinholeCameraPatternCfg = MISSING + """The pattern that defines the local ray starting positions and directions in a pinhole camera pattern.""" + + def __post_init__(self): + # for cameras, this quantity should be False always. + self.ray_alignment = "base" diff --git a/source/isaaclab_physx/isaaclab_physx/sensors/ray_caster/ray_caster_cfg.py b/source/isaaclab_physx/isaaclab_physx/sensors/ray_caster/ray_caster_cfg.py new file mode 100644 index 00000000000..dbdebfad3a5 --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/sensors/ray_caster/ray_caster_cfg.py @@ -0,0 +1,98 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Configuration for the ray-cast sensor.""" + +from dataclasses import MISSING +from typing import Literal + +from isaaclab.markers import VisualizationMarkersCfg +from isaaclab.markers.config import RAY_CASTER_MARKER_CFG +from isaaclab.utils import configclass + +from ..sensor_base_cfg import SensorBaseCfg +from .patterns.patterns_cfg import PatternBaseCfg +from .ray_caster import RayCaster + + +@configclass +class RayCasterCfg(SensorBaseCfg): + """Configuration for the ray-cast sensor.""" + + @configclass + class OffsetCfg: + """The offset pose of the sensor's frame from the sensor's parent frame.""" + + pos: tuple[float, float, float] = (0.0, 0.0, 0.0) + """Translation w.r.t. the parent frame. Defaults to (0.0, 0.0, 0.0).""" + rot: tuple[float, float, float, float] = (1.0, 0.0, 0.0, 0.0) + """Quaternion rotation (w, x, y, z) w.r.t. the parent frame. Defaults to (1.0, 0.0, 0.0, 0.0).""" + + class_type: type = RayCaster + + mesh_prim_paths: list[str] = MISSING + """The list of mesh primitive paths to ray cast against. + + Note: + Currently, only a single static mesh is supported. We are working on supporting multiple + static meshes and dynamic meshes. + """ + + offset: OffsetCfg = OffsetCfg() + """The offset pose of the sensor's frame from the sensor's parent frame. Defaults to identity.""" + + attach_yaw_only: bool | None = None + """Whether the rays' starting positions and directions only track the yaw orientation. + Defaults to None, which doesn't raise a warning of deprecated usage. + + This is useful for ray-casting height maps, where only yaw rotation is needed. + + .. deprecated:: 2.1.1 + + This attribute is deprecated and will be removed in the future. Please use + :attr:`ray_alignment` instead. + + To get the same behavior as setting this parameter to ``True`` or ``False``, set + :attr:`ray_alignment` to ``"yaw"`` or "base" respectively. + + """ + + ray_alignment: Literal["base", "yaw", "world"] = "base" + """Specify in what frame the rays are projected onto the ground. Default is "base". + + The options are: + + * ``base`` if the rays' starting positions and directions track the full root position and orientation. + * ``yaw`` if the rays' starting positions and directions track root position and only yaw component of + the orientation. This is useful for ray-casting height maps. + * ``world`` if rays' starting positions and directions are always fixed. This is useful in combination + with a mapping package on the robot and querying ray-casts in a global frame. + """ + + pattern_cfg: PatternBaseCfg = MISSING + """The pattern that defines the local ray starting positions and directions.""" + + max_distance: float = 1e6 + """Maximum distance (in meters) from the sensor to ray cast to. Defaults to 1e6.""" + + drift_range: tuple[float, float] = (0.0, 0.0) + """The range of drift (in meters) to add to the ray starting positions (xyz) in world frame. Defaults to (0.0, 0.0). + + For floating base robots, this is useful for simulating drift in the robot's pose estimation. + """ + + ray_cast_drift_range: dict[str, tuple[float, float]] = {"x": (0.0, 0.0), "y": (0.0, 0.0), "z": (0.0, 0.0)} + """The range of drift (in meters) to add to the projected ray points in local projection frame. Defaults to + a dictionary with zero drift for each x, y and z axis. + + For floating base robots, this is useful for simulating drift in the robot's pose estimation. + """ + + visualizer_cfg: VisualizationMarkersCfg = RAY_CASTER_MARKER_CFG.replace(prim_path="/Visuals/RayCaster") + """The configuration object for the visualization markers. Defaults to RAY_CASTER_MARKER_CFG. + + Note: + This attribute is only used when debug visualization is enabled. + """ diff --git a/source/isaaclab_physx/isaaclab_physx/sensors/ray_caster/ray_caster_data.py b/source/isaaclab_physx/isaaclab_physx/sensors/ray_caster/ray_caster_data.py new file mode 100644 index 00000000000..d63e085e752 --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/sensors/ray_caster/ray_caster_data.py @@ -0,0 +1,30 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from dataclasses import dataclass + +import torch + + +@dataclass +class RayCasterData: + """Data container for the ray-cast sensor.""" + + pos_w: torch.Tensor = None + """Position of the sensor origin in world frame. + + Shape is (N, 3), where N is the number of sensors. + """ + quat_w: torch.Tensor = None + """Orientation of the sensor origin in quaternion (w, x, y, z) in world frame. + + Shape is (N, 4), where N is the number of sensors. + """ + ray_hits_w: torch.Tensor = None + """The ray hit positions in the world frame. + + Shape is (N, B, 3), where N is the number of sensors, B is the number of rays + in the scan pattern per sensor. + """ diff --git a/source/isaaclab_physx/isaaclab_physx/sensors/sensor_base.py b/source/isaaclab_physx/isaaclab_physx/sensors/sensor_base.py new file mode 100644 index 00000000000..4ece160bbe5 --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/sensors/sensor_base.py @@ -0,0 +1,363 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Base class for sensors. + +This class defines an interface for sensors similar to how the :class:`isaaclab.assets.AssetBase` class works. +Each sensor class should inherit from this class and implement the abstract methods. +""" + +from __future__ import annotations + +import builtins +import inspect +import re +import weakref +from abc import ABC, abstractmethod +from collections.abc import Sequence +from typing import TYPE_CHECKING, Any + +import torch + +import omni.kit.app +import omni.timeline +from isaacsim.core.simulation_manager import IsaacEvents, SimulationManager + +import isaaclab.sim as sim_utils +from isaaclab.sim.utils.stage import get_current_stage + +if TYPE_CHECKING: + from .sensor_base_cfg import SensorBaseCfg + + +class SensorBase(ABC): + """The base class for implementing a sensor. + + The implementation is based on lazy evaluation. The sensor data is only updated when the user + tries accessing the data through the :attr:`data` property or sets ``force_compute=True`` in + the :meth:`update` method. This is done to avoid unnecessary computation when the sensor data + is not used. + + The sensor is updated at the specified update period. If the update period is zero, then the + sensor is updated at every simulation step. + """ + + def __init__(self, cfg: SensorBaseCfg): + """Initialize the sensor class. + + Args: + cfg: The configuration parameters for the sensor. + """ + # check that config is valid + if cfg.history_length < 0: + raise ValueError(f"History length must be greater than 0! Received: {cfg.history_length}") + # check that the config is valid + cfg.validate() + # store inputs + self.cfg = cfg.copy() + # flag for whether the sensor is initialized + self._is_initialized = False + # flag for whether the sensor is in visualization mode + self._is_visualizing = False + # get stage handle + self.stage = get_current_stage() + + # register various callback functions + self._register_callbacks() + + # add handle for debug visualization (this is set to a valid handle inside set_debug_vis) + self._debug_vis_handle = None + # set initial state of debug visualization + self.set_debug_vis(self.cfg.debug_vis) + + def __del__(self): + """Unsubscribe from the callbacks.""" + # clear physics events handles + self._clear_callbacks() + + """ + Properties + """ + + @property + def is_initialized(self) -> bool: + """Whether the sensor is initialized. + + Returns True if the sensor is initialized, False otherwise. + """ + return self._is_initialized + + @property + def num_instances(self) -> int: + """Number of instances of the sensor. + + This is equal to the number of sensors per environment multiplied by the number of environments. + """ + return self._num_envs + + @property + def device(self) -> str: + """Memory device for computation.""" + return self._device + + @property + @abstractmethod + def data(self) -> Any: + """Data from the sensor. + + This property is only updated when the user tries to access the data. This is done to avoid + unnecessary computation when the sensor data is not used. + + For updating the sensor when this property is accessed, you can use the following + code snippet in your sensor implementation: + + .. code-block:: python + + # update sensors if needed + self._update_outdated_buffers() + # return the data (where `_data` is the data for the sensor) + return self._data + """ + raise NotImplementedError + + @property + def has_debug_vis_implementation(self) -> bool: + """Whether the sensor has a debug visualization implemented.""" + # check if function raises NotImplementedError + source_code = inspect.getsource(self._set_debug_vis_impl) + return "NotImplementedError" not in source_code + + """ + Operations + """ + + def set_debug_vis(self, debug_vis: bool) -> bool: + """Sets whether to visualize the sensor data. + + Args: + debug_vis: Whether to visualize the sensor data. + + Returns: + Whether the debug visualization was successfully set. False if the sensor + does not support debug visualization. + """ + # check if debug visualization is supported + if not self.has_debug_vis_implementation: + return False + # toggle debug visualization objects + self._set_debug_vis_impl(debug_vis) + # toggle debug visualization flag + self._is_visualizing = debug_vis + # toggle debug visualization handles + if debug_vis: + # create a subscriber for the post update event if it doesn't exist + if self._debug_vis_handle is None: + app_interface = omni.kit.app.get_app_interface() + self._debug_vis_handle = app_interface.get_post_update_event_stream().create_subscription_to_pop( + lambda event, obj=weakref.proxy(self): obj._debug_vis_callback(event) + ) + else: + # remove the subscriber if it exists + if self._debug_vis_handle is not None: + self._debug_vis_handle.unsubscribe() + self._debug_vis_handle = None + # return success + return True + + def reset(self, env_ids: Sequence[int] | None = None): + """Resets the sensor internals. + + Args: + env_ids: The sensor ids to reset. Defaults to None. + """ + # Resolve sensor ids + if env_ids is None: + env_ids = slice(None) + # Reset the timestamp for the sensors + self._timestamp[env_ids] = 0.0 + self._timestamp_last_update[env_ids] = 0.0 + # Set all reset sensors to outdated so that they are updated when data is called the next time. + self._is_outdated[env_ids] = True + + def update(self, dt: float, force_recompute: bool = False): + # Update the timestamp for the sensors + self._timestamp += dt + self._is_outdated |= self._timestamp - self._timestamp_last_update + 1e-6 >= self.cfg.update_period + # Update the buffers + # TODO (from @mayank): Why is there a history length here when it doesn't mean anything in the sensor base?!? + # It is only for the contact sensor but there we should redefine the update function IMO. + if force_recompute or self._is_visualizing or (self.cfg.history_length > 0): + self._update_outdated_buffers() + + """ + Implementation specific. + """ + + @abstractmethod + def _initialize_impl(self): + """Initializes the sensor-related handles and internal buffers.""" + # Obtain Simulation Context + sim = sim_utils.SimulationContext.instance() + if sim is None: + raise RuntimeError("Simulation Context is not initialized!") + # Obtain device and backend + self._device = sim.device + self._backend = sim.backend + self._sim_physics_dt = sim.get_physics_dt() + # Count number of environments + env_prim_path_expr = self.cfg.prim_path.rsplit("/", 1)[0] + self._parent_prims = sim_utils.find_matching_prims(env_prim_path_expr) + self._num_envs = len(self._parent_prims) + # Boolean tensor indicating whether the sensor data has to be refreshed + self._is_outdated = torch.ones(self._num_envs, dtype=torch.bool, device=self._device) + # Current timestamp (in seconds) + self._timestamp = torch.zeros(self._num_envs, device=self._device) + # Timestamp from last update + self._timestamp_last_update = torch.zeros_like(self._timestamp) + + # Initialize debug visualization handle + if self._debug_vis_handle is None: + # set initial state of debug visualization + self.set_debug_vis(self.cfg.debug_vis) + + @abstractmethod + def _update_buffers_impl(self, env_ids: Sequence[int]): + """Fills the sensor data for provided environment ids. + + This function does not perform any time-based checks and directly fills the data into the + data container. + + Args: + env_ids: The indices of the sensors that are ready to capture. + """ + raise NotImplementedError + + def _set_debug_vis_impl(self, debug_vis: bool): + """Set debug visualization into visualization objects. + + This function is responsible for creating the visualization objects if they don't exist + and input ``debug_vis`` is True. If the visualization objects exist, the function should + set their visibility into the stage. + """ + raise NotImplementedError(f"Debug visualization is not implemented for {self.__class__.__name__}.") + + def _debug_vis_callback(self, event): + """Callback for debug visualization. + + This function calls the visualization objects and sets the data to visualize into them. + """ + raise NotImplementedError(f"Debug visualization is not implemented for {self.__class__.__name__}.") + + """ + Internal simulation callbacks. + """ + + def _register_callbacks(self): + """Registers the timeline and prim deletion callbacks.""" + + # register simulator callbacks (with weakref safety to avoid crashes on deletion) + def safe_callback(callback_name, event, obj_ref): + """Safely invoke a callback on a weakly-referenced object, ignoring ReferenceError if deleted.""" + try: + obj = obj_ref + getattr(obj, callback_name)(event) + except ReferenceError: + # Object has been deleted; ignore. + pass + + # note: use weakref on callbacks to ensure that this object can be deleted when its destructor is called. + # add callbacks for stage play/stop + obj_ref = weakref.proxy(self) + timeline_event_stream = omni.timeline.get_timeline_interface().get_timeline_event_stream() + + # the order is set to 10 which is arbitrary but should be lower priority than the default order of 0 + # register timeline PLAY event callback (lower priority with order=10) + self._initialize_handle = timeline_event_stream.create_subscription_to_pop_by_type( + int(omni.timeline.TimelineEventType.PLAY), + lambda event, obj_ref=obj_ref: safe_callback("_initialize_callback", event, obj_ref), + order=10, + ) + # register timeline STOP event callback (lower priority with order=10) + self._invalidate_initialize_handle = timeline_event_stream.create_subscription_to_pop_by_type( + int(omni.timeline.TimelineEventType.STOP), + lambda event, obj_ref=obj_ref: safe_callback("_invalidate_initialize_callback", event, obj_ref), + order=10, + ) + # register prim deletion callback + self._prim_deletion_callback_id = SimulationManager.register_callback( + lambda event, obj_ref=obj_ref: safe_callback("_on_prim_deletion", event, obj_ref), + event=IsaacEvents.PRIM_DELETION, + ) + + def _initialize_callback(self, event): + """Initializes the scene elements. + + Note: + PhysX handles are only enabled once the simulator starts playing. Hence, this function needs to be + called whenever the simulator "plays" from a "stop" state. + """ + if not self._is_initialized: + try: + self._initialize_impl() + except Exception as e: + if builtins.ISAACLAB_CALLBACK_EXCEPTION is None: + builtins.ISAACLAB_CALLBACK_EXCEPTION = e + self._is_initialized = True + + def _invalidate_initialize_callback(self, event): + """Invalidates the scene elements.""" + self._is_initialized = False + if self._debug_vis_handle is not None: + self._debug_vis_handle.unsubscribe() + self._debug_vis_handle = None + + def _on_prim_deletion(self, prim_path: str) -> None: + """Invalidates and deletes the callbacks when the prim is deleted. + + Args: + prim_path: The path to the prim that is being deleted. + + Note: + This function is called when the prim is deleted. + """ + if prim_path == "/": + self._clear_callbacks() + return + result = re.match( + pattern="^" + "/".join(self.cfg.prim_path.split("/")[: prim_path.count("/") + 1]) + "$", string=prim_path + ) + if result: + self._clear_callbacks() + + def _clear_callbacks(self) -> None: + """Clears the callbacks.""" + if self._prim_deletion_callback_id: + SimulationManager.deregister_callback(self._prim_deletion_callback_id) + self._prim_deletion_callback_id = None + if self._initialize_handle: + self._initialize_handle.unsubscribe() + self._initialize_handle = None + if self._invalidate_initialize_handle: + self._invalidate_initialize_handle.unsubscribe() + self._invalidate_initialize_handle = None + # clear debug visualization + if self._debug_vis_handle: + self._debug_vis_handle.unsubscribe() + self._debug_vis_handle = None + + """ + Helper functions. + """ + + def _update_outdated_buffers(self): + """Fills the sensor data for the outdated sensors.""" + outdated_env_ids = self._is_outdated.nonzero().squeeze(-1) + if len(outdated_env_ids) > 0: + # obtain new data + self._update_buffers_impl(outdated_env_ids) + # update the timestamp from last update + self._timestamp_last_update[outdated_env_ids] = self._timestamp[outdated_env_ids] + # set outdated flag to false for the updated sensors + self._is_outdated[outdated_env_ids] = False diff --git a/source/isaaclab_physx/isaaclab_physx/sensors/sensor_base_cfg.py b/source/isaaclab_physx/isaaclab_physx/sensors/sensor_base_cfg.py new file mode 100644 index 00000000000..85875b2e499 --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/sensors/sensor_base_cfg.py @@ -0,0 +1,42 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from dataclasses import MISSING + +from isaaclab.utils import configclass + +from .sensor_base import SensorBase + + +@configclass +class SensorBaseCfg: + """Configuration parameters for a sensor.""" + + class_type: type[SensorBase] = MISSING + """The associated sensor class. + + The class should inherit from :class:`isaaclab.sensors.sensor_base.SensorBase`. + """ + + prim_path: str = MISSING + """Prim path (or expression) to the sensor. + + .. note:: + The expression can contain the environment namespace regex ``{ENV_REGEX_NS}`` which + will be replaced with the environment namespace. + + Example: ``{ENV_REGEX_NS}/Robot/sensor`` will be replaced with ``/World/envs/env_.*/Robot/sensor``. + + """ + + update_period: float = 0.0 + """Update period of the sensor buffers (in seconds). Defaults to 0.0 (update every step).""" + + history_length: int = 0 + """Number of past frames to store in the sensor buffers. Defaults to 0, which means that only + the current data is stored (no history).""" + + debug_vis: bool = False + """Whether to visualize the sensor. Defaults to False.""" diff --git a/source/isaaclab_physx/isaaclab_physx/sensors/tacsl_sensor/__init__.py b/source/isaaclab_physx/isaaclab_physx/sensors/tacsl_sensor/__init__.py new file mode 100644 index 00000000000..869b233d166 --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/sensors/tacsl_sensor/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""TacSL Tactile Sensor implementation for IsaacLab.""" + +from .visuotactile_sensor import VisuoTactileSensor +from .visuotactile_sensor_cfg import GelSightRenderCfg, VisuoTactileSensorCfg +from .visuotactile_sensor_data import VisuoTactileSensorData diff --git a/source/isaaclab_physx/pyproject.toml b/source/isaaclab_physx/pyproject.toml new file mode 100644 index 00000000000..d90ac3536f1 --- /dev/null +++ b/source/isaaclab_physx/pyproject.toml @@ -0,0 +1,3 @@ +[build-system] +requires = ["setuptools", "wheel", "toml"] +build-backend = "setuptools.build_meta" diff --git a/source/isaaclab_physx/setup.py b/source/isaaclab_physx/setup.py new file mode 100644 index 00000000000..ac5bdc64c45 --- /dev/null +++ b/source/isaaclab_physx/setup.py @@ -0,0 +1,61 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Installation script for the 'isaaclab_physx' python package.""" + +import os + +import toml +from setuptools import setup + +# Obtain the extension data from the extension.toml file +EXTENSION_PATH = os.path.dirname(os.path.realpath(__file__)) +# Read the extension.toml file +EXTENSION_TOML_DATA = toml.load(os.path.join(EXTENSION_PATH, "config", "extension.toml")) + +# Minimum dependencies required prior to installation +INSTALL_REQUIRES = [ + # generic + "numpy<2", + "torch>=2.7", + "prettytable==3.3.0", + "toml", + "warp-lang", + # testing + "pytest", + "pytest-mock", + "junitparser", + "flatdict==4.0.1", + "flaky", + "packaging", +] + +PYTORCH_INDEX_URL = ["https://download.pytorch.org/whl/cu128"] + +# Installation operation +setup( + name="isaaclab_physx", + author="Isaac Lab Project Developers", + maintainer="Isaac Lab Project Developers", + url=EXTENSION_TOML_DATA["package"]["repository"], + version=EXTENSION_TOML_DATA["package"]["version"], + description=EXTENSION_TOML_DATA["package"]["description"], + keywords=EXTENSION_TOML_DATA["package"]["keywords"], + license="BSD-3-Clause", + include_package_data=True, + python_requires=">=3.10", + install_requires=INSTALL_REQUIRES, + dependency_links=PYTORCH_INDEX_URL, + packages=["isaaclab_physx"], + classifiers=[ + "Natural Language :: English", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Isaac Sim :: 4.5.0", + "Isaac Sim :: 5.0.0", + "Isaac Sim :: 5.1.0", + ], + zip_safe=False, +) diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/lift/config/franka/ik_abs_env_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/lift/config/franka/ik_abs_env_cfg.py index 9b9441a2234..cac90ed641e 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/lift/config/franka/ik_abs_env_cfg.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/lift/config/franka/ik_abs_env_cfg.py @@ -3,7 +3,7 @@ # # SPDX-License-Identifier: BSD-3-Clause -from isaaclab.assets import DeformableObjectCfg +from isaaclab_physx.assets import DeformableObjectCfg from isaaclab.controllers.differential_ik_cfg import DifferentialIKControllerCfg from isaaclab.envs.mdp.actions.actions_cfg import DifferentialInverseKinematicsActionCfg from isaaclab.managers import EventTermCfg as EventTerm diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/lift/config/openarm/lift_openarm_env_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/lift/config/openarm/lift_openarm_env_cfg.py index 491b713c14f..305dc5125fe 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/lift/config/openarm/lift_openarm_env_cfg.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/lift/config/openarm/lift_openarm_env_cfg.py @@ -11,7 +11,8 @@ from dataclasses import MISSING import isaaclab.sim as sim_utils -from isaaclab.assets import ArticulationCfg, AssetBaseCfg, DeformableObjectCfg, RigidObjectCfg +from isaaclab.assets import ArticulationCfg, AssetBaseCfg, RigidObjectCfg +from isaaclab_physx.assets import DeformableObjectCfg from isaaclab.envs import ManagerBasedRLEnvCfg from isaaclab.managers import CurriculumTermCfg as CurrTerm from isaaclab.managers import EventTermCfg as EventTerm diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/lift/lift_env_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/lift/lift_env_cfg.py index 272661bda61..485be97d3f8 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/lift/lift_env_cfg.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/lift/lift_env_cfg.py @@ -6,7 +6,8 @@ from dataclasses import MISSING import isaaclab.sim as sim_utils -from isaaclab.assets import ArticulationCfg, AssetBaseCfg, DeformableObjectCfg, RigidObjectCfg +from isaaclab.assets import ArticulationCfg, AssetBaseCfg, RigidObjectCfg +from isaaclab_physx.assets import DeformableObjectCfg from isaaclab.envs import ManagerBasedRLEnvCfg from isaaclab.managers import CurriculumTermCfg as CurrTerm from isaaclab.managers import EventTermCfg as EventTerm diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/config/galbot/stack_joint_pos_env_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/config/galbot/stack_joint_pos_env_cfg.py index 4506a95eaba..ddff39a4f33 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/config/galbot/stack_joint_pos_env_cfg.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/config/galbot/stack_joint_pos_env_cfg.py @@ -4,7 +4,8 @@ # SPDX-License-Identifier: BSD-3-Clause -from isaaclab.assets import RigidObjectCfg, SurfaceGripperCfg +from isaaclab.assets import RigidObjectCfg +from isaaclab_physx.assets import SurfaceGripperCfg from isaaclab.devices import DevicesCfg from isaaclab.devices.device_base import DeviceBase from isaaclab.devices.openxr.openxr_device import OpenXRDeviceCfg diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/config/ur10_gripper/stack_joint_pos_env_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/config/ur10_gripper/stack_joint_pos_env_cfg.py index c3d73a3fb3c..c0b04999388 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/config/ur10_gripper/stack_joint_pos_env_cfg.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/config/ur10_gripper/stack_joint_pos_env_cfg.py @@ -3,7 +3,8 @@ # # SPDX-License-Identifier: BSD-3-Clause -from isaaclab.assets import RigidObjectCfg, SurfaceGripperCfg +from isaaclab.assets import RigidObjectCfg +from isaaclab_physx.assets import SurfaceGripperCfg from isaaclab.envs.mdp.actions.actions_cfg import SurfaceGripperBinaryActionCfg from isaaclab.managers import EventTermCfg as EventTerm from isaaclab.managers import SceneEntityCfg From 86169ed07bb308d8da108e8910abb1723373edaf Mon Sep 17 00:00:00 2001 From: Antoine Richard Date: Tue, 27 Jan 2026 16:25:48 +0100 Subject: [PATCH 02/38] removing sensors --- .../isaaclab_physx/sensors/__init__.py | 47 -- .../isaaclab_physx/sensors/camera/__init__.py | 13 - .../isaaclab_physx/sensors/camera/camera.py | 722 ------------------ .../sensors/camera/camera_cfg.py | 143 ---- .../sensors/camera/camera_data.py | 92 --- .../sensors/camera/tiled_camera.py | 425 ----------- .../sensors/camera/tiled_camera_cfg.py | 16 - .../isaaclab_physx/sensors/camera/utils.py | 272 ------- .../sensors/contact_sensor/__init__.py | 10 - .../sensors/contact_sensor/contact_sensor.py | 539 ------------- .../contact_sensor/contact_sensor_cfg.py | 79 -- .../contact_sensor/contact_sensor_data.py | 153 ---- .../sensors/frame_transformer/__init__.py | 10 - .../frame_transformer/frame_transformer.py | 560 -------------- .../frame_transformer_cfg.py | 76 -- .../frame_transformer_data.py | 57 -- .../isaaclab_physx/sensors/imu/__init__.py | 12 - .../isaaclab_physx/sensors/imu/imu.py | 299 -------- .../isaaclab_physx/sensors/imu/imu_cfg.py | 46 -- .../isaaclab_physx/sensors/imu/imu_data.py | 57 -- .../sensors/ray_caster/__init__.py | 29 - .../ray_caster/multi_mesh_ray_caster.py | 427 ----------- .../multi_mesh_ray_caster_camera.py | 221 ------ .../multi_mesh_ray_caster_camera_cfg.py | 35 - .../multi_mesh_ray_caster_camera_data.py | 23 - .../ray_caster/multi_mesh_ray_caster_cfg.py | 71 -- .../ray_caster/multi_mesh_ray_caster_data.py | 22 - .../sensors/ray_caster/patterns/__init__.py | 9 - .../sensors/ray_caster/patterns/patterns.py | 180 ----- .../ray_caster/patterns/patterns_cfg.py | 219 ------ .../sensors/ray_caster/ray_cast_utils.py | 51 -- .../sensors/ray_caster/ray_caster.py | 428 ----------- .../sensors/ray_caster/ray_caster_camera.py | 456 ----------- .../ray_caster/ray_caster_camera_cfg.py | 64 -- .../sensors/ray_caster/ray_caster_cfg.py | 98 --- .../sensors/ray_caster/ray_caster_data.py | 30 - .../isaaclab_physx/sensors/sensor_base.py | 363 --------- .../isaaclab_physx/sensors/sensor_base_cfg.py | 42 - .../sensors/tacsl_sensor/__init__.py | 10 - 39 files changed, 6406 deletions(-) delete mode 100644 source/isaaclab_physx/isaaclab_physx/sensors/__init__.py delete mode 100644 source/isaaclab_physx/isaaclab_physx/sensors/camera/__init__.py delete mode 100644 source/isaaclab_physx/isaaclab_physx/sensors/camera/camera.py delete mode 100644 source/isaaclab_physx/isaaclab_physx/sensors/camera/camera_cfg.py delete mode 100644 source/isaaclab_physx/isaaclab_physx/sensors/camera/camera_data.py delete mode 100644 source/isaaclab_physx/isaaclab_physx/sensors/camera/tiled_camera.py delete mode 100644 source/isaaclab_physx/isaaclab_physx/sensors/camera/tiled_camera_cfg.py delete mode 100644 source/isaaclab_physx/isaaclab_physx/sensors/camera/utils.py delete mode 100644 source/isaaclab_physx/isaaclab_physx/sensors/contact_sensor/__init__.py delete mode 100644 source/isaaclab_physx/isaaclab_physx/sensors/contact_sensor/contact_sensor.py delete mode 100644 source/isaaclab_physx/isaaclab_physx/sensors/contact_sensor/contact_sensor_cfg.py delete mode 100644 source/isaaclab_physx/isaaclab_physx/sensors/contact_sensor/contact_sensor_data.py delete mode 100644 source/isaaclab_physx/isaaclab_physx/sensors/frame_transformer/__init__.py delete mode 100644 source/isaaclab_physx/isaaclab_physx/sensors/frame_transformer/frame_transformer.py delete mode 100644 source/isaaclab_physx/isaaclab_physx/sensors/frame_transformer/frame_transformer_cfg.py delete mode 100644 source/isaaclab_physx/isaaclab_physx/sensors/frame_transformer/frame_transformer_data.py delete mode 100644 source/isaaclab_physx/isaaclab_physx/sensors/imu/__init__.py delete mode 100644 source/isaaclab_physx/isaaclab_physx/sensors/imu/imu.py delete mode 100644 source/isaaclab_physx/isaaclab_physx/sensors/imu/imu_cfg.py delete mode 100644 source/isaaclab_physx/isaaclab_physx/sensors/imu/imu_data.py delete mode 100644 source/isaaclab_physx/isaaclab_physx/sensors/ray_caster/__init__.py delete mode 100644 source/isaaclab_physx/isaaclab_physx/sensors/ray_caster/multi_mesh_ray_caster.py delete mode 100644 source/isaaclab_physx/isaaclab_physx/sensors/ray_caster/multi_mesh_ray_caster_camera.py delete mode 100644 source/isaaclab_physx/isaaclab_physx/sensors/ray_caster/multi_mesh_ray_caster_camera_cfg.py delete mode 100644 source/isaaclab_physx/isaaclab_physx/sensors/ray_caster/multi_mesh_ray_caster_camera_data.py delete mode 100644 source/isaaclab_physx/isaaclab_physx/sensors/ray_caster/multi_mesh_ray_caster_cfg.py delete mode 100644 source/isaaclab_physx/isaaclab_physx/sensors/ray_caster/multi_mesh_ray_caster_data.py delete mode 100644 source/isaaclab_physx/isaaclab_physx/sensors/ray_caster/patterns/__init__.py delete mode 100644 source/isaaclab_physx/isaaclab_physx/sensors/ray_caster/patterns/patterns.py delete mode 100644 source/isaaclab_physx/isaaclab_physx/sensors/ray_caster/patterns/patterns_cfg.py delete mode 100644 source/isaaclab_physx/isaaclab_physx/sensors/ray_caster/ray_cast_utils.py delete mode 100644 source/isaaclab_physx/isaaclab_physx/sensors/ray_caster/ray_caster.py delete mode 100644 source/isaaclab_physx/isaaclab_physx/sensors/ray_caster/ray_caster_camera.py delete mode 100644 source/isaaclab_physx/isaaclab_physx/sensors/ray_caster/ray_caster_camera_cfg.py delete mode 100644 source/isaaclab_physx/isaaclab_physx/sensors/ray_caster/ray_caster_cfg.py delete mode 100644 source/isaaclab_physx/isaaclab_physx/sensors/ray_caster/ray_caster_data.py delete mode 100644 source/isaaclab_physx/isaaclab_physx/sensors/sensor_base.py delete mode 100644 source/isaaclab_physx/isaaclab_physx/sensors/sensor_base_cfg.py delete mode 100644 source/isaaclab_physx/isaaclab_physx/sensors/tacsl_sensor/__init__.py diff --git a/source/isaaclab_physx/isaaclab_physx/sensors/__init__.py b/source/isaaclab_physx/isaaclab_physx/sensors/__init__.py deleted file mode 100644 index 1e9273803a0..00000000000 --- a/source/isaaclab_physx/isaaclab_physx/sensors/__init__.py +++ /dev/null @@ -1,47 +0,0 @@ -# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). -# All rights reserved. -# -# SPDX-License-Identifier: BSD-3-Clause - -"""Sub-package containing various sensor classes implementations. - -This subpackage contains the sensor classes that are compatible with Isaac Sim. We include both -USD-based and custom sensors: - -* **USD-prim sensors**: Available in Omniverse and require creating a USD prim for them. - For instance, RTX ray tracing camera and lidar sensors. -* **USD-schema sensors**: Available in Omniverse and require creating a USD schema on an existing prim. - For instance, contact sensors and frame transformers. -* **Custom sensors**: Implemented in Python and do not require creating any USD prim or schema. - For instance, warp-based ray-casters. - -Due to the above categorization, the prim paths passed to the sensor's configuration class -are interpreted differently based on the sensor type. The following table summarizes the -interpretation of the prim paths for different sensor types: - -+---------------------+---------------------------+---------------------------------------------------------------+ -| Sensor Type | Example Prim Path | Pre-check | -+=====================+===========================+===============================================================+ -| Camera | /World/robot/base/camera | Leaf is available, and it will spawn a USD camera | -+---------------------+---------------------------+---------------------------------------------------------------+ -| Contact Sensor | /World/robot/feet_* | Leaf is available and checks if the schema exists | -+---------------------+---------------------------+---------------------------------------------------------------+ -| Ray Caster | /World/robot/base | Leaf exists and is a physics body (Articulation / Rigid Body) | -+---------------------+---------------------------+---------------------------------------------------------------+ -| Frame Transformer | /World/robot/base | Leaf exists and is a physics body (Articulation / Rigid Body) | -+---------------------+---------------------------+---------------------------------------------------------------+ -| Imu | /World/robot/base | Leaf exists and is a physics body (Rigid Body) | -+---------------------+---------------------------+---------------------------------------------------------------+ -| Visuo-Tactile Sensor| /World/robot/base | Leaf exists and is a physics body (Rigid Body) | -+---------------------+---------------------------+---------------------------------------------------------------+ - -""" - -from .camera import * # noqa: F401, F403 -from .contact_sensor import * # noqa: F401, F403 -from .frame_transformer import * # noqa: F401 -from .imu import * # noqa: F401, F403 -from .ray_caster import * # noqa: F401, F403 -from .sensor_base import SensorBase # noqa: F401 -from .sensor_base_cfg import SensorBaseCfg # noqa: F401 -from .tacsl_sensor import * # noqa: F401, F403 diff --git a/source/isaaclab_physx/isaaclab_physx/sensors/camera/__init__.py b/source/isaaclab_physx/isaaclab_physx/sensors/camera/__init__.py deleted file mode 100644 index f2318067b58..00000000000 --- a/source/isaaclab_physx/isaaclab_physx/sensors/camera/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). -# All rights reserved. -# -# SPDX-License-Identifier: BSD-3-Clause - -"""Sub-module for camera wrapper around USD camera prim.""" - -from .camera import Camera -from .camera_cfg import CameraCfg -from .camera_data import CameraData -from .tiled_camera import TiledCamera -from .tiled_camera_cfg import TiledCameraCfg -from .utils import * # noqa: F401, F403 diff --git a/source/isaaclab_physx/isaaclab_physx/sensors/camera/camera.py b/source/isaaclab_physx/isaaclab_physx/sensors/camera/camera.py deleted file mode 100644 index b4fa558d713..00000000000 --- a/source/isaaclab_physx/isaaclab_physx/sensors/camera/camera.py +++ /dev/null @@ -1,722 +0,0 @@ -# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). -# All rights reserved. -# -# SPDX-License-Identifier: BSD-3-Clause - -from __future__ import annotations - -import json -import logging -import re -from collections.abc import Sequence -from typing import TYPE_CHECKING, Any, Literal - -import numpy as np -import torch -from packaging import version - -import carb -import omni.usd -from pxr import Sdf, UsdGeom - -import isaaclab.sim as sim_utils -import isaaclab.utils.sensors as sensor_utils -from isaaclab.sim.views import XformPrimView -from isaaclab.utils import to_camel_case -from isaaclab.utils.array import convert_to_torch -from isaaclab.utils.math import ( - convert_camera_frame_orientation_convention, - create_rotation_matrix_from_view, - quat_from_matrix, -) -from isaaclab.utils.version import get_isaac_sim_version - -from ..sensor_base import SensorBase -from .camera_data import CameraData - -if TYPE_CHECKING: - from .camera_cfg import CameraCfg - -# import logger -logger = logging.getLogger(__name__) - - -class Camera(SensorBase): - r"""The camera sensor for acquiring visual data. - - This class wraps over the `UsdGeom Camera`_ for providing a consistent API for acquiring visual data. - It ensures that the camera follows the ROS convention for the coordinate system. - - Summarizing from the `replicator extension`_, the following sensor types are supported: - - - ``"rgb"``: A 3-channel rendered color image. - - ``"rgba"``: A 4-channel rendered color image with alpha channel. - - ``"distance_to_camera"``: An image containing the distance to camera optical center. - - ``"distance_to_image_plane"``: An image containing distances of 3D points from camera plane along camera's z-axis. - - ``"depth"``: The same as ``"distance_to_image_plane"``. - - ``"normals"``: An image containing the local surface normal vectors at each pixel. - - ``"motion_vectors"``: An image containing the motion vector data at each pixel. - - ``"semantic_segmentation"``: The semantic segmentation data. - - ``"instance_segmentation_fast"``: The instance segmentation data. - - ``"instance_id_segmentation_fast"``: The instance id segmentation data. - - .. note:: - Currently the following sensor types are not supported in a "view" format: - - - ``"instance_segmentation"``: The instance segmentation data. Please use the fast counterparts instead. - - ``"instance_id_segmentation"``: The instance id segmentation data. Please use the fast counterparts instead. - - ``"bounding_box_2d_tight"``: The tight 2D bounding box data (only contains non-occluded regions). - - ``"bounding_box_2d_tight_fast"``: The tight 2D bounding box data (only contains non-occluded regions). - - ``"bounding_box_2d_loose"``: The loose 2D bounding box data (contains occluded regions). - - ``"bounding_box_2d_loose_fast"``: The loose 2D bounding box data (contains occluded regions). - - ``"bounding_box_3d"``: The 3D view space bounding box data. - - ``"bounding_box_3d_fast"``: The 3D view space bounding box data. - - .. _replicator extension: https://docs.omniverse.nvidia.com/extensions/latest/ext_replicator/annotators_details.html#annotator-output - .. _USDGeom Camera: https://graphics.pixar.com/usd/docs/api/class_usd_geom_camera.html - - """ - - cfg: CameraCfg - """The configuration parameters.""" - - UNSUPPORTED_TYPES: set[str] = { - "instance_id_segmentation", - "instance_segmentation", - "bounding_box_2d_tight", - "bounding_box_2d_loose", - "bounding_box_3d", - "bounding_box_2d_tight_fast", - "bounding_box_2d_loose_fast", - "bounding_box_3d_fast", - } - """The set of sensor types that are not supported by the camera class.""" - - def __init__(self, cfg: CameraCfg): - """Initializes the camera sensor. - - Args: - cfg: The configuration parameters. - - Raises: - RuntimeError: If no camera prim is found at the given path. - ValueError: If the provided data types are not supported by the camera. - """ - # check if sensor path is valid - # note: currently we do not handle environment indices if there is a regex pattern in the leaf - # For example, if the prim path is "/World/Sensor_[1,2]". - sensor_path = cfg.prim_path.split("/")[-1] - sensor_path_is_regex = re.match(r"^[a-zA-Z0-9/_]+$", sensor_path) is None - if sensor_path_is_regex: - raise RuntimeError( - f"Invalid prim path for the camera sensor: {self.cfg.prim_path}." - "\n\tHint: Please ensure that the prim path does not contain any regex patterns in the leaf." - ) - # perform check on supported data types - self._check_supported_data_types(cfg) - # initialize base class - super().__init__(cfg) - - # toggle rendering of rtx sensors as True - # this flag is read by SimulationContext to determine if rtx sensors should be rendered - carb_settings_iface = carb.settings.get_settings() - carb_settings_iface.set_bool("/isaaclab/render/rtx_sensors", True) - - # spawn the asset - if self.cfg.spawn is not None: - # compute the rotation offset - rot = torch.tensor(self.cfg.offset.rot, dtype=torch.float32, device="cpu").unsqueeze(0) - rot_offset = convert_camera_frame_orientation_convention( - rot, origin=self.cfg.offset.convention, target="opengl" - ) - rot_offset = rot_offset.squeeze(0).cpu().numpy() - # ensure vertical aperture is set, otherwise replace with default for squared pixels - if self.cfg.spawn.vertical_aperture is None: - self.cfg.spawn.vertical_aperture = self.cfg.spawn.horizontal_aperture * self.cfg.height / self.cfg.width - # spawn the asset - self.cfg.spawn.func( - self.cfg.prim_path, self.cfg.spawn, translation=self.cfg.offset.pos, orientation=rot_offset - ) - # check that spawn was successful - matching_prims = sim_utils.find_matching_prims(self.cfg.prim_path) - if len(matching_prims) == 0: - raise RuntimeError(f"Could not find prim with path {self.cfg.prim_path}.") - - # UsdGeom Camera prim for the sensor - self._sensor_prims: list[UsdGeom.Camera] = list() - # Create empty variables for storing output data - self._data = CameraData() - - # HACK: We need to disable instancing for semantic_segmentation and instance_segmentation_fast to work - # checks for Isaac Sim v4.5 as this issue exists there - if get_isaac_sim_version() == version.parse("4.5"): - if "semantic_segmentation" in self.cfg.data_types or "instance_segmentation_fast" in self.cfg.data_types: - logger.warning( - "Isaac Sim 4.5 introduced a bug in Camera and TiledCamera when outputting instance and semantic" - " segmentation outputs for instanceable assets. As a workaround, the instanceable flag on assets" - " will be disabled in the current workflow and may lead to longer load times and increased memory" - " usage." - ) - with Sdf.ChangeBlock(): - for prim in self.stage.Traverse(): - prim.SetInstanceable(False) - - def __del__(self): - """Unsubscribes from callbacks and detach from the replicator registry.""" - # unsubscribe callbacks - super().__del__() - # delete from replicator registry - for _, annotators in self._rep_registry.items(): - for annotator, render_product_path in zip(annotators, self._render_product_paths): - annotator.detach([render_product_path]) - annotator = None - - def __str__(self) -> str: - """Returns: A string containing information about the instance.""" - # message for class - return ( - f"Camera @ '{self.cfg.prim_path}': \n" - f"\tdata types : {list(self.data.output.keys())} \n" - f"\tsemantic filter : {self.cfg.semantic_filter}\n" - f"\tcolorize semantic segm. : {self.cfg.colorize_semantic_segmentation}\n" - f"\tcolorize instance segm. : {self.cfg.colorize_instance_segmentation}\n" - f"\tcolorize instance id segm.: {self.cfg.colorize_instance_id_segmentation}\n" - f"\tupdate period (s): {self.cfg.update_period}\n" - f"\tshape : {self.image_shape}\n" - f"\tnumber of sensors : {self._view.count}" - ) - - """ - Properties - """ - - @property - def num_instances(self) -> int: - return self._view.count - - @property - def data(self) -> CameraData: - # update sensors if needed - self._update_outdated_buffers() - # return the data - return self._data - - @property - def frame(self) -> torch.tensor: - """Frame number when the measurement took place.""" - return self._frame - - @property - def render_product_paths(self) -> list[str]: - """The path of the render products for the cameras. - - This can be used via replicator interfaces to attach to writes or external annotator registry. - """ - return self._render_product_paths - - @property - def image_shape(self) -> tuple[int, int]: - """A tuple containing (height, width) of the camera sensor.""" - return (self.cfg.height, self.cfg.width) - - """ - Configuration - """ - - def set_intrinsic_matrices( - self, matrices: torch.Tensor, focal_length: float | None = None, env_ids: Sequence[int] | None = None - ): - """Set parameters of the USD camera from its intrinsic matrix. - - The intrinsic matrix is used to set the following parameters to the USD camera: - - - ``focal_length``: The focal length of the camera. - - ``horizontal_aperture``: The horizontal aperture of the camera. - - ``vertical_aperture``: The vertical aperture of the camera. - - ``horizontal_aperture_offset``: The horizontal offset of the camera. - - ``vertical_aperture_offset``: The vertical offset of the camera. - - .. warning:: - - Due to limitations of Omniverse camera, we need to assume that the camera is a spherical lens, - i.e. has square pixels, and the optical center is centered at the camera eye. If this assumption - is not true in the input intrinsic matrix, then the camera will not set up correctly. - - Args: - matrices: The intrinsic matrices for the camera. Shape is (N, 3, 3). - focal_length: Perspective focal length (in cm) used to calculate pixel size. Defaults to None. If None, - focal_length will be calculated 1 / width. - env_ids: A sensor ids to manipulate. Defaults to None, which means all sensor indices. - """ - # resolve env_ids - if env_ids is None: - env_ids = self._ALL_INDICES - # convert matrices to numpy tensors - if isinstance(matrices, torch.Tensor): - matrices = matrices.cpu().numpy() - else: - matrices = np.asarray(matrices, dtype=float) - # iterate over env_ids - for i, intrinsic_matrix in zip(env_ids, matrices): - height, width = self.image_shape - - params = sensor_utils.convert_camera_intrinsics_to_usd( - intrinsic_matrix=intrinsic_matrix.reshape(-1), height=height, width=width, focal_length=focal_length - ) - - # change data for corresponding camera index - sensor_prim = self._sensor_prims[i] - # set parameters for camera - for param_name, param_value in params.items(): - # convert to camel case (CC) - param_name = to_camel_case(param_name, to="CC") - # get attribute from the class - param_attr = getattr(sensor_prim, f"Get{param_name}Attr") - # set value - # note: We have to do it this way because the camera might be on a different - # layer (default cameras are on session layer), and this is the simplest - # way to set the property on the right layer. - omni.usd.set_prop_val(param_attr(), param_value) - # update the internal buffers - self._update_intrinsic_matrices(env_ids) - - """ - Operations - Set pose. - """ - - def set_world_poses( - self, - positions: torch.Tensor | None = None, - orientations: torch.Tensor | None = None, - env_ids: Sequence[int] | None = None, - convention: Literal["opengl", "ros", "world"] = "ros", - ): - r"""Set the pose of the camera w.r.t. the world frame using specified convention. - - Since different fields use different conventions for camera orientations, the method allows users to - set the camera poses in the specified convention. Possible conventions are: - - - :obj:`"opengl"` - forward axis: -Z - up axis +Y - Offset is applied in the OpenGL (Usd.Camera) convention - - :obj:`"ros"` - forward axis: +Z - up axis -Y - Offset is applied in the ROS convention - - :obj:`"world"` - forward axis: +X - up axis +Z - Offset is applied in the World Frame convention - - See :meth:`isaaclab.sensors.camera.utils.convert_camera_frame_orientation_convention` for more details - on the conventions. - - Args: - positions: The cartesian coordinates (in meters). Shape is (N, 3). - Defaults to None, in which case the camera position in not changed. - orientations: The quaternion orientation in (w, x, y, z). Shape is (N, 4). - Defaults to None, in which case the camera orientation in not changed. - env_ids: A sensor ids to manipulate. Defaults to None, which means all sensor indices. - convention: The convention in which the poses are fed. Defaults to "ros". - - Raises: - RuntimeError: If the camera prim is not set. Need to call :meth:`initialize` method first. - """ - # resolve env_ids - if env_ids is None: - env_ids = self._ALL_INDICES - # convert to backend tensor - if positions is not None: - if isinstance(positions, np.ndarray): - positions = torch.from_numpy(positions).to(device=self._device) - elif not isinstance(positions, torch.Tensor): - positions = torch.tensor(positions, device=self._device) - # convert rotation matrix from input convention to OpenGL - if orientations is not None: - if isinstance(orientations, np.ndarray): - orientations = torch.from_numpy(orientations).to(device=self._device) - elif not isinstance(orientations, torch.Tensor): - orientations = torch.tensor(orientations, device=self._device) - orientations = convert_camera_frame_orientation_convention(orientations, origin=convention, target="opengl") - # set the pose - self._view.set_world_poses(positions, orientations, env_ids) - - def set_world_poses_from_view( - self, eyes: torch.Tensor, targets: torch.Tensor, env_ids: Sequence[int] | None = None - ): - """Set the poses of the camera from the eye position and look-at target position. - - Args: - eyes: The positions of the camera's eye. Shape is (N, 3). - targets: The target locations to look at. Shape is (N, 3). - env_ids: A sensor ids to manipulate. Defaults to None, which means all sensor indices. - - Raises: - RuntimeError: If the camera prim is not set. Need to call :meth:`initialize` method first. - NotImplementedError: If the stage up-axis is not "Y" or "Z". - """ - # resolve env_ids - if env_ids is None: - env_ids = self._ALL_INDICES - # get up axis of current stage - up_axis = UsdGeom.GetStageUpAxis(self.stage) - # set camera poses using the view - orientations = quat_from_matrix(create_rotation_matrix_from_view(eyes, targets, up_axis, device=self._device)) - self._view.set_world_poses(eyes, orientations, env_ids) - - """ - Operations - """ - - def reset(self, env_ids: Sequence[int] | None = None): - if not self._is_initialized: - raise RuntimeError( - "Camera could not be initialized. Please ensure --enable_cameras is used to enable rendering." - ) - # reset the timestamps - super().reset(env_ids) - # resolve None - # note: cannot do smart indexing here since we do a for loop over data. - if env_ids is None: - env_ids = self._ALL_INDICES - # reset the data - # note: this recomputation is useful if one performs events such as randomizations on the camera poses. - self._update_poses(env_ids) - # Reset the frame count - self._frame[env_ids] = 0 - - """ - Implementation. - """ - - def _initialize_impl(self): - """Initializes the sensor handles and internal buffers. - - This function creates handles and registers the provided data types with the replicator registry to - be able to access the data from the sensor. It also initializes the internal buffers to store the data. - - Raises: - RuntimeError: If the number of camera prims in the view does not match the number of environments. - RuntimeError: If replicator was not found. - """ - carb_settings_iface = carb.settings.get_settings() - if not carb_settings_iface.get("/isaaclab/cameras_enabled"): - raise RuntimeError( - "A camera was spawned without the --enable_cameras flag. Please use --enable_cameras to enable" - " rendering." - ) - - import omni.replicator.core as rep - from omni.syntheticdata.scripts.SyntheticData import SyntheticData - - # Initialize parent class - super()._initialize_impl() - # Create a view for the sensor with Fabric enabled for fast pose querie, otherwise position will be staling - self._view = XformPrimView( - self.cfg.prim_path, device=self._device, stage=self.stage, sync_usd_on_fabric_write=True - ) - # Check that sizes are correct - if self._view.count != self._num_envs: - raise RuntimeError( - f"Number of camera prims in the view ({self._view.count}) does not match" - f" the number of environments ({self._num_envs})." - ) - - # Create all env_ids buffer - self._ALL_INDICES = torch.arange(self._view.count, device=self._device, dtype=torch.long) - # Create frame count buffer - self._frame = torch.zeros(self._view.count, device=self._device, dtype=torch.long) - - # Attach the sensor data types to render node - self._render_product_paths: list[str] = list() - self._rep_registry: dict[str, list[rep.annotators.Annotator]] = {name: list() for name in self.cfg.data_types} - - # Convert all encapsulated prims to Camera - for cam_prim in self._view.prims: - # Obtain the prim path - cam_prim_path = cam_prim.GetPath().pathString - # Check if prim is a camera - if not cam_prim.IsA(UsdGeom.Camera): - raise RuntimeError(f"Prim at path '{cam_prim_path}' is not a Camera.") - # Add to list - sensor_prim = UsdGeom.Camera(cam_prim) - self._sensor_prims.append(sensor_prim) - - # Get render product - # From Isaac Sim 2023.1 onwards, render product is a HydraTexture so we need to extract the path - render_prod_path = rep.create.render_product(cam_prim_path, resolution=(self.cfg.width, self.cfg.height)) - if not isinstance(render_prod_path, str): - render_prod_path = render_prod_path.path - self._render_product_paths.append(render_prod_path) - - # Check if semantic types or semantic filter predicate is provided - if isinstance(self.cfg.semantic_filter, list): - semantic_filter_predicate = ":*; ".join(self.cfg.semantic_filter) + ":*" - elif isinstance(self.cfg.semantic_filter, str): - semantic_filter_predicate = self.cfg.semantic_filter - else: - raise ValueError(f"Semantic types must be a list or a string. Received: {self.cfg.semantic_filter}.") - # set the semantic filter predicate - # copied from rep.scripts.writes_default.basic_writer.py - SyntheticData.Get().set_instance_mapping_semantic_filter(semantic_filter_predicate) - - # Iterate over each data type and create annotator - # TODO: This will move out of the loop once Replicator supports multiple render products within a single - # annotator, i.e.: rep_annotator.attach(self._render_product_paths) - for name in self.cfg.data_types: - # note: we are verbose here to make it easier to understand the code. - # if colorize is true, the data is mapped to colors and a uint8 4 channel image is returned. - # if colorize is false, the data is returned as a uint32 image with ids as values. - if name == "semantic_segmentation": - init_params = { - "colorize": self.cfg.colorize_semantic_segmentation, - "mapping": json.dumps(self.cfg.semantic_segmentation_mapping), - } - elif name == "instance_segmentation_fast": - init_params = {"colorize": self.cfg.colorize_instance_segmentation} - elif name == "instance_id_segmentation_fast": - init_params = {"colorize": self.cfg.colorize_instance_id_segmentation} - else: - init_params = None - - # Resolve device name - if "cuda" in self._device: - device_name = self._device.split(":")[0] - else: - device_name = "cpu" - - # Map special cases to their corresponding annotator names - special_cases = {"rgba": "rgb", "depth": "distance_to_image_plane"} - # Get the annotator name, falling back to the original name if not a special case - annotator_name = special_cases.get(name, name) - # Create the annotator node - rep_annotator = rep.AnnotatorRegistry.get_annotator(annotator_name, init_params, device=device_name) - - # attach annotator to render product - rep_annotator.attach(render_prod_path) - # add to registry - self._rep_registry[name].append(rep_annotator) - - # Create internal buffers - self._create_buffers() - self._update_intrinsic_matrices(self._ALL_INDICES) - - def _update_buffers_impl(self, env_ids: Sequence[int]): - # Increment frame count - self._frame[env_ids] += 1 - # -- pose - if self.cfg.update_latest_camera_pose: - self._update_poses(env_ids) - # -- read the data from annotator registry - # check if buffer is called for the first time. If so then, allocate the memory - if len(self._data.output) == 0: - # this is the first time buffer is called - # it allocates memory for all the sensors - self._create_annotator_data() - else: - # iterate over all the data types - for name, annotators in self._rep_registry.items(): - # iterate over all the annotators - for index in env_ids: - # get the output - output = annotators[index].get_data() - # process the output - data, info = self._process_annotator_output(name, output) - # add data to output - self._data.output[name][index] = data - # add info to output - self._data.info[index][name] = info - # NOTE: The `distance_to_camera` annotator returns the distance to the camera optical center. However, - # the replicator depth clipping is applied w.r.t. to the image plane which may result in values - # larger than the clipping range in the output. We apply an additional clipping to ensure values - # are within the clipping range for all the annotators. - if name == "distance_to_camera": - self._data.output[name][self._data.output[name] > self.cfg.spawn.clipping_range[1]] = torch.inf - # apply defined clipping behavior - if ( - name == "distance_to_camera" or name == "distance_to_image_plane" - ) and self.cfg.depth_clipping_behavior != "none": - self._data.output[name][torch.isinf(self._data.output[name])] = ( - 0.0 if self.cfg.depth_clipping_behavior == "zero" else self.cfg.spawn.clipping_range[1] - ) - - """ - Private Helpers - """ - - def _check_supported_data_types(self, cfg: CameraCfg): - """Checks if the data types are supported by the ray-caster camera.""" - # check if there is any intersection in unsupported types - # reason: these use np structured data types which we can't yet convert to torch tensor - common_elements = set(cfg.data_types) & Camera.UNSUPPORTED_TYPES - if common_elements: - # provide alternative fast counterparts - fast_common_elements = [] - for item in common_elements: - if "instance_segmentation" in item or "instance_id_segmentation" in item: - fast_common_elements.append(item + "_fast") - # raise error - raise ValueError( - f"Camera class does not support the following sensor types: {common_elements}." - "\n\tThis is because these sensor types output numpy structured data types which" - "can't be converted to torch tensors easily." - "\n\tHint: If you need to work with these sensor types, we recommend using their fast counterparts." - f"\n\t\tFast counterparts: {fast_common_elements}" - ) - - def _create_buffers(self): - """Create buffers for storing data.""" - # create the data object - # -- pose of the cameras - self._data.pos_w = torch.zeros((self._view.count, 3), device=self._device) - self._data.quat_w_world = torch.zeros((self._view.count, 4), device=self._device) - # -- intrinsic matrix - self._data.intrinsic_matrices = torch.zeros((self._view.count, 3, 3), device=self._device) - self._data.image_shape = self.image_shape - # -- output data - # lazy allocation of data dictionary - # since the size of the output data is not known in advance, we leave it as None - # the memory will be allocated when the buffer() function is called for the first time. - self._data.output = {} - self._data.info = [{name: None for name in self.cfg.data_types} for _ in range(self._view.count)] - - def _update_intrinsic_matrices(self, env_ids: Sequence[int]): - """Compute camera's matrix of intrinsic parameters. - - Also called calibration matrix. This matrix works for linear depth images. We assume square pixels. - - Note: - The calibration matrix projects points in the 3D scene onto an imaginary screen of the camera. - The coordinates of points on the image plane are in the homogeneous representation. - """ - # iterate over all cameras - for i in env_ids: - # Get corresponding sensor prim - sensor_prim = self._sensor_prims[i] - # get camera parameters - # currently rendering does not use aperture offsets or vertical aperture - focal_length = sensor_prim.GetFocalLengthAttr().Get() - horiz_aperture = sensor_prim.GetHorizontalApertureAttr().Get() - - # get viewport parameters - height, width = self.image_shape - # extract intrinsic parameters - f_x = (width * focal_length) / horiz_aperture - f_y = f_x - c_x = width * 0.5 - c_y = height * 0.5 - # create intrinsic matrix for depth linear - self._data.intrinsic_matrices[i, 0, 0] = f_x - self._data.intrinsic_matrices[i, 0, 2] = c_x - self._data.intrinsic_matrices[i, 1, 1] = f_y - self._data.intrinsic_matrices[i, 1, 2] = c_y - self._data.intrinsic_matrices[i, 2, 2] = 1 - - def _update_poses(self, env_ids: Sequence[int]): - """Computes the pose of the camera in the world frame with ROS convention. - - This methods uses the ROS convention to resolve the input pose. In this convention, - we assume that the camera front-axis is +Z-axis and up-axis is -Y-axis. - - Returns: - A tuple of the position (in meters) and quaternion (w, x, y, z). - """ - # check camera prim exists - if len(self._sensor_prims) == 0: - raise RuntimeError("Camera prim is None. Please call 'sim.play()' first.") - - # get the poses from the view - poses, quat = self._view.get_world_poses(env_ids) - self._data.pos_w[env_ids] = poses - self._data.quat_w_world[env_ids] = convert_camera_frame_orientation_convention( - quat, origin="opengl", target="world" - ) - - def _create_annotator_data(self): - """Create the buffers to store the annotator data. - - We create a buffer for each annotator and store the data in a dictionary. Since the data - shape is not known beforehand, we create a list of buffers and concatenate them later. - - This is an expensive operation and should be called only once. - """ - # add data from the annotators - for name, annotators in self._rep_registry.items(): - # create a list to store the data for each annotator - data_all_cameras = list() - # iterate over all the annotators - for index in self._ALL_INDICES: - # get the output - output = annotators[index].get_data() - # process the output - data, info = self._process_annotator_output(name, output) - # append the data - data_all_cameras.append(data) - # store the info - self._data.info[index][name] = info - # concatenate the data along the batch dimension - self._data.output[name] = torch.stack(data_all_cameras, dim=0) - # NOTE: `distance_to_camera` and `distance_to_image_plane` are not both clipped to the maximum defined - # in the clipping range. The clipping is applied only to `distance_to_image_plane` and then both - # outputs are only clipped where the values in `distance_to_image_plane` exceed the threshold. To - # have a unified behavior between all cameras, we clip both outputs to the maximum value defined. - if name == "distance_to_camera": - self._data.output[name][self._data.output[name] > self.cfg.spawn.clipping_range[1]] = torch.inf - # clip the data if needed - if ( - name == "distance_to_camera" or name == "distance_to_image_plane" - ) and self.cfg.depth_clipping_behavior != "none": - self._data.output[name][torch.isinf(self._data.output[name])] = ( - 0.0 if self.cfg.depth_clipping_behavior == "zero" else self.cfg.spawn.clipping_range[1] - ) - - def _process_annotator_output(self, name: str, output: Any) -> tuple[torch.tensor, dict | None]: - """Process the annotator output. - - This function is called after the data has been collected from all the cameras. - """ - # extract info and data from the output - if isinstance(output, dict): - data = output["data"] - info = output["info"] - else: - data = output - info = None - # convert data into torch tensor - data = convert_to_torch(data, device=self.device) - - # process data for different segmentation types - # Note: Replicator returns raw buffers of dtype int32 for segmentation types - # so we need to convert them to uint8 4 channel images for colorized types - height, width = self.image_shape - if name == "semantic_segmentation": - if self.cfg.colorize_semantic_segmentation: - data = data.view(torch.uint8).reshape(height, width, -1) - else: - data = data.view(height, width, 1) - elif name == "instance_segmentation_fast": - if self.cfg.colorize_instance_segmentation: - data = data.view(torch.uint8).reshape(height, width, -1) - else: - data = data.view(height, width, 1) - elif name == "instance_id_segmentation_fast": - if self.cfg.colorize_instance_id_segmentation: - data = data.view(torch.uint8).reshape(height, width, -1) - else: - data = data.view(height, width, 1) - # make sure buffer dimensions are consistent as (H, W, C) - elif name == "distance_to_camera" or name == "distance_to_image_plane" or name == "depth": - data = data.view(height, width, 1) - # we only return the RGB channels from the RGBA output if rgb is required - # normals return (x, y, z) in first 3 channels, 4th channel is unused - elif name == "rgb" or name == "normals": - data = data[..., :3] - # motion vectors return (x, y) in first 2 channels, 3rd and 4th channels are unused - elif name == "motion_vectors": - data = data[..., :2] - - # return the data and info - return data, info - - """ - Internal simulation callbacks. - """ - - def _invalidate_initialize_callback(self, event): - """Invalidates the scene elements.""" - # call parent - super()._invalidate_initialize_callback(event) - # set all existing views to None to invalidate them - self._view = None diff --git a/source/isaaclab_physx/isaaclab_physx/sensors/camera/camera_cfg.py b/source/isaaclab_physx/isaaclab_physx/sensors/camera/camera_cfg.py deleted file mode 100644 index 8fd9f307d18..00000000000 --- a/source/isaaclab_physx/isaaclab_physx/sensors/camera/camera_cfg.py +++ /dev/null @@ -1,143 +0,0 @@ -# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). -# All rights reserved. -# -# SPDX-License-Identifier: BSD-3-Clause - -from dataclasses import MISSING -from typing import Literal - -from isaaclab.sim import FisheyeCameraCfg, PinholeCameraCfg -from isaaclab.utils import configclass - -from ..sensor_base_cfg import SensorBaseCfg -from .camera import Camera - - -@configclass -class CameraCfg(SensorBaseCfg): - """Configuration for a camera sensor.""" - - @configclass - class OffsetCfg: - """The offset pose of the sensor's frame from the sensor's parent frame.""" - - pos: tuple[float, float, float] = (0.0, 0.0, 0.0) - """Translation w.r.t. the parent frame. Defaults to (0.0, 0.0, 0.0).""" - - rot: tuple[float, float, float, float] = (1.0, 0.0, 0.0, 0.0) - """Quaternion rotation (w, x, y, z) w.r.t. the parent frame. Defaults to (1.0, 0.0, 0.0, 0.0).""" - - convention: Literal["opengl", "ros", "world"] = "ros" - """The convention in which the frame offset is applied. Defaults to "ros". - - - ``"opengl"`` - forward axis: ``-Z`` - up axis: ``+Y`` - Offset is applied in the OpenGL (Usd.Camera) - convention. - - ``"ros"`` - forward axis: ``+Z`` - up axis: ``-Y`` - Offset is applied in the ROS convention. - - ``"world"`` - forward axis: ``+X`` - up axis: ``+Z`` - Offset is applied in the World Frame convention. - - """ - - class_type: type = Camera - - offset: OffsetCfg = OffsetCfg() - """The offset pose of the sensor's frame from the sensor's parent frame. Defaults to identity. - - Note: - The parent frame is the frame the sensor attaches to. For example, the parent frame of a - camera at path ``/World/envs/env_0/Robot/Camera`` is ``/World/envs/env_0/Robot``. - """ - - spawn: PinholeCameraCfg | FisheyeCameraCfg | None = MISSING - """Spawn configuration for the asset. - - If None, then the prim is not spawned by the asset. Instead, it is assumed that the - asset is already present in the scene. - """ - - depth_clipping_behavior: Literal["max", "zero", "none"] = "none" - """Clipping behavior for the camera for values exceed the maximum value. Defaults to "none". - - - ``"max"``: Values are clipped to the maximum value. - - ``"zero"``: Values are clipped to zero. - - ``"none``: No clipping is applied. Values will be returned as ``inf``. - """ - - data_types: list[str] = ["rgb"] - """List of sensor names/types to enable for the camera. Defaults to ["rgb"]. - - Please refer to the :class:`Camera` class for a list of available data types. - """ - - width: int = MISSING - """Width of the image in pixels.""" - - height: int = MISSING - """Height of the image in pixels.""" - - update_latest_camera_pose: bool = False - """Whether to update the latest camera pose when fetching the camera's data. Defaults to False. - - If True, the latest camera pose is updated in the camera's data which will slow down performance - due to the use of :class:`XformPrimView`. - If False, the pose of the camera during initialization is returned. - """ - - semantic_filter: str | list[str] = "*:*" - """A string or a list specifying a semantic filter predicate. Defaults to ``"*:*"``. - - If a string, it should be a disjunctive normal form of (semantic type, labels). For examples: - - * ``"typeA : labelA & !labelB | labelC , typeB: labelA ; typeC: labelE"``: - All prims with semantic type "typeA" and label "labelA" but not "labelB" or with label "labelC". - Also, all prims with semantic type "typeB" and label "labelA", or with semantic type "typeC" and label "labelE". - * ``"typeA : * ; * : labelA"``: All prims with semantic type "typeA" or with label "labelA" - - If a list of strings, each string should be a semantic type. The segmentation for prims with - semantics of the specified types will be retrieved. For example, if the list is ["class"], only - the segmentation for prims with semantics of type "class" will be retrieved. - - .. seealso:: - - For more information on the semantics filter, see the documentation on `Replicator Semantics Schema Editor`_. - - .. _Replicator Semantics Schema Editor: https://docs.omniverse.nvidia.com/extensions/latest/ext_replicator/semantics_schema_editor.html#semantics-filtering - """ - - colorize_semantic_segmentation: bool = True - """Whether to colorize the semantic segmentation images. Defaults to True. - - If True, semantic segmentation is converted to an image where semantic IDs are mapped to colors - and returned as a ``uint8`` 4-channel array. If False, the output is returned as a ``int32`` array. - """ - - colorize_instance_id_segmentation: bool = True - """Whether to colorize the instance ID segmentation images. Defaults to True. - - If True, instance id segmentation is converted to an image where instance IDs are mapped to colors. - and returned as a ``uint8`` 4-channel array. If False, the output is returned as a ``int32`` array. - """ - - colorize_instance_segmentation: bool = True - """Whether to colorize the instance ID segmentation images. Defaults to True. - - If True, instance segmentation is converted to an image where instance IDs are mapped to colors. - and returned as a ``uint8`` 4-channel array. If False, the output is returned as a ``int32`` array. - """ - - semantic_segmentation_mapping: dict = {} - """Dictionary mapping semantics to specific colours - - Eg. - - .. code-block:: python - - { - "class:cube_1": (255, 36, 66, 255), - "class:cube_2": (255, 184, 48, 255), - "class:cube_3": (55, 255, 139, 255), - "class:table": (255, 237, 218, 255), - "class:ground": (100, 100, 100, 255), - "class:robot": (61, 178, 255, 255), - } - - """ diff --git a/source/isaaclab_physx/isaaclab_physx/sensors/camera/camera_data.py b/source/isaaclab_physx/isaaclab_physx/sensors/camera/camera_data.py deleted file mode 100644 index ec3288b04e9..00000000000 --- a/source/isaaclab_physx/isaaclab_physx/sensors/camera/camera_data.py +++ /dev/null @@ -1,92 +0,0 @@ -# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). -# All rights reserved. -# -# SPDX-License-Identifier: BSD-3-Clause - -from dataclasses import dataclass -from typing import Any - -import torch - -from isaaclab.utils.math import convert_camera_frame_orientation_convention - - -@dataclass -class CameraData: - """Data container for the camera sensor.""" - - ## - # Frame state. - ## - - pos_w: torch.Tensor = None - """Position of the sensor origin in world frame, following ROS convention. - - Shape is (N, 3) where N is the number of sensors. - """ - - quat_w_world: torch.Tensor = None - """Quaternion orientation `(w, x, y, z)` of the sensor origin in world frame, following the world coordinate frame - - .. note:: - World frame convention follows the camera aligned with forward axis +X and up axis +Z. - - Shape is (N, 4) where N is the number of sensors. - """ - - ## - # Camera data - ## - - image_shape: tuple[int, int] = None - """A tuple containing (height, width) of the camera sensor.""" - - intrinsic_matrices: torch.Tensor = None - """The intrinsic matrices for the camera. - - Shape is (N, 3, 3) where N is the number of sensors. - """ - - output: dict[str, torch.Tensor] = None - """The retrieved sensor data with sensor types as key. - - The format of the data is available in the `Replicator Documentation`_. For semantic-based data, - this corresponds to the ``"data"`` key in the output of the sensor. - - .. _Replicator Documentation: https://docs.omniverse.nvidia.com/prod_extensions/prod_extensions/ext_replicator/annotators_details.html#annotator-output - """ - - info: list[dict[str, Any]] = None - """The retrieved sensor info with sensor types as key. - - This contains extra information provided by the sensor such as semantic segmentation label mapping, prim paths. - For semantic-based data, this corresponds to the ``"info"`` key in the output of the sensor. For other sensor - types, the info is empty. - """ - - ## - # Additional Frame orientation conventions - ## - - @property - def quat_w_ros(self) -> torch.Tensor: - """Quaternion orientation `(w, x, y, z)` of the sensor origin in the world frame, following ROS convention. - - .. note:: - ROS convention follows the camera aligned with forward axis +Z and up axis -Y. - - Shape is (N, 4) where N is the number of sensors. - """ - return convert_camera_frame_orientation_convention(self.quat_w_world, origin="world", target="ros") - - @property - def quat_w_opengl(self) -> torch.Tensor: - """Quaternion orientation `(w, x, y, z)` of the sensor origin in the world frame, following - Opengl / USD Camera convention. - - .. note:: - OpenGL convention follows the camera aligned with forward axis -Z and up axis +Y. - - Shape is (N, 4) where N is the number of sensors. - """ - return convert_camera_frame_orientation_convention(self.quat_w_world, origin="world", target="opengl") diff --git a/source/isaaclab_physx/isaaclab_physx/sensors/camera/tiled_camera.py b/source/isaaclab_physx/isaaclab_physx/sensors/camera/tiled_camera.py deleted file mode 100644 index 4b3676158e7..00000000000 --- a/source/isaaclab_physx/isaaclab_physx/sensors/camera/tiled_camera.py +++ /dev/null @@ -1,425 +0,0 @@ -# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). -# All rights reserved. -# -# SPDX-License-Identifier: BSD-3-Clause - -from __future__ import annotations - -import json -import math -from collections.abc import Sequence -from typing import TYPE_CHECKING, Any - -import numpy as np -import torch -import warp as wp - -import carb -from pxr import UsdGeom - -from isaaclab.sim.views import XformPrimView -from isaaclab.utils.warp.kernels import reshape_tiled_image - -from ..sensor_base import SensorBase -from .camera import Camera - -if TYPE_CHECKING: - from .tiled_camera_cfg import TiledCameraCfg - - -class TiledCamera(Camera): - r"""The tiled rendering based camera sensor for acquiring the same data as the Camera class. - - This class inherits from the :class:`Camera` class but uses the tiled-rendering API to acquire - the visual data. Tiled-rendering concatenates the rendered images from multiple cameras into a single image. - This allows for rendering multiple cameras in parallel and is useful for rendering large scenes with multiple - cameras efficiently. - - The following sensor types are supported: - - - ``"rgb"``: A 3-channel rendered color image. - - ``"rgba"``: A 4-channel rendered color image with alpha channel. - - ``"distance_to_camera"``: An image containing the distance to camera optical center. - - ``"distance_to_image_plane"``: An image containing distances of 3D points from camera plane along camera's z-axis. - - ``"depth"``: Alias for ``"distance_to_image_plane"``. - - ``"normals"``: An image containing the local surface normal vectors at each pixel. - - ``"motion_vectors"``: An image containing the motion vector data at each pixel. - - ``"semantic_segmentation"``: The semantic segmentation data. - - ``"instance_segmentation_fast"``: The instance segmentation data. - - ``"instance_id_segmentation_fast"``: The instance id segmentation data. - - .. note:: - Currently the following sensor types are not supported in a "view" format: - - - ``"instance_segmentation"``: The instance segmentation data. Please use the fast counterparts instead. - - ``"instance_id_segmentation"``: The instance id segmentation data. Please use the fast counterparts instead. - - ``"bounding_box_2d_tight"``: The tight 2D bounding box data (only contains non-occluded regions). - - ``"bounding_box_2d_tight_fast"``: The tight 2D bounding box data (only contains non-occluded regions). - - ``"bounding_box_2d_loose"``: The loose 2D bounding box data (contains occluded regions). - - ``"bounding_box_2d_loose_fast"``: The loose 2D bounding box data (contains occluded regions). - - ``"bounding_box_3d"``: The 3D view space bounding box data. - - ``"bounding_box_3d_fast"``: The 3D view space bounding box data. - - .. _replicator extension: https://docs.omniverse.nvidia.com/extensions/latest/ext_replicator/annotators_details.html#annotator-output - .. _USDGeom Camera: https://graphics.pixar.com/usd/docs/api/class_usd_geom_camera.html - - .. versionadded:: v1.0.0 - - This feature is available starting from Isaac Sim 4.2. Before this version, the tiled rendering APIs - were not available. - - """ - - cfg: TiledCameraCfg - """The configuration parameters.""" - - def __init__(self, cfg: TiledCameraCfg): - """Initializes the tiled camera sensor. - - Args: - cfg: The configuration parameters. - - Raises: - RuntimeError: If no camera prim is found at the given path. - ValueError: If the provided data types are not supported by the camera. - """ - super().__init__(cfg) - - def __del__(self): - """Unsubscribes from callbacks and detach from the replicator registry.""" - # unsubscribe from callbacks - SensorBase.__del__(self) - # detach from the replicator registry - for annotator in self._annotators.values(): - annotator.detach(self.render_product_paths) - - def __str__(self) -> str: - """Returns: A string containing information about the instance.""" - # message for class - return ( - f"Tiled Camera @ '{self.cfg.prim_path}': \n" - f"\tdata types : {list(self.data.output.keys())} \n" - f"\tsemantic filter : {self.cfg.semantic_filter}\n" - f"\tcolorize semantic segm. : {self.cfg.colorize_semantic_segmentation}\n" - f"\tcolorize instance segm. : {self.cfg.colorize_instance_segmentation}\n" - f"\tcolorize instance id segm.: {self.cfg.colorize_instance_id_segmentation}\n" - f"\tupdate period (s): {self.cfg.update_period}\n" - f"\tshape : {self.image_shape}\n" - f"\tnumber of sensors : {self._view.count}" - ) - - """ - Operations - """ - - def reset(self, env_ids: Sequence[int] | None = None): - if not self._is_initialized: - raise RuntimeError( - "TiledCamera could not be initialized. Please ensure --enable_cameras is used to enable rendering." - ) - # reset the timestamps - SensorBase.reset(self, env_ids) - # resolve None - if env_ids is None: - env_ids = slice(None) - # reset the frame count - self._frame[env_ids] = 0 - - """ - Implementation. - """ - - def _initialize_impl(self): - """Initializes the sensor handles and internal buffers. - - This function creates handles and registers the provided data types with the replicator registry to - be able to access the data from the sensor. It also initializes the internal buffers to store the data. - - Raises: - RuntimeError: If the number of camera prims in the view does not match the number of environments. - RuntimeError: If replicator was not found. - """ - carb_settings_iface = carb.settings.get_settings() - if not carb_settings_iface.get("/isaaclab/cameras_enabled"): - raise RuntimeError( - "A camera was spawned without the --enable_cameras flag. Please use --enable_cameras to enable" - " rendering." - ) - - import omni.replicator.core as rep - - # Initialize parent class - SensorBase._initialize_impl(self) - # Create a view for the sensor - self._view = XformPrimView(self.cfg.prim_path, device=self._device, stage=self.stage) - # Check that sizes are correct - if self._view.count != self._num_envs: - raise RuntimeError( - f"Number of camera prims in the view ({self._view.count}) does not match" - f" the number of environments ({self._num_envs})." - ) - - # Create all env_ids buffer - self._ALL_INDICES = torch.arange(self._view.count, device=self._device, dtype=torch.long) - # Create frame count buffer - self._frame = torch.zeros(self._view.count, device=self._device, dtype=torch.long) - - # Convert all encapsulated prims to Camera - cam_prim_paths = [] - for cam_prim in self._view.prims: - # Get camera prim - cam_prim_path = cam_prim.GetPath().pathString - # Check if prim is a camera - if not cam_prim.IsA(UsdGeom.Camera): - raise RuntimeError(f"Prim at path '{cam_prim_path}' is not a Camera.") - # Add to list - self._sensor_prims.append(UsdGeom.Camera(cam_prim)) - cam_prim_paths.append(cam_prim_path) - - # Create replicator tiled render product - rp = rep.create.render_product_tiled(cameras=cam_prim_paths, tile_resolution=(self.cfg.width, self.cfg.height)) - self._render_product_paths = [rp.path] - - # Define the annotators based on requested data types - self._annotators = dict() - for annotator_type in self.cfg.data_types: - if annotator_type == "rgba" or annotator_type == "rgb": - annotator = rep.AnnotatorRegistry.get_annotator("rgb", device=self.device, do_array_copy=False) - self._annotators["rgba"] = annotator - elif annotator_type == "depth" or annotator_type == "distance_to_image_plane": - # keep depth for backwards compatibility - annotator = rep.AnnotatorRegistry.get_annotator( - "distance_to_image_plane", device=self.device, do_array_copy=False - ) - self._annotators[annotator_type] = annotator - # note: we are verbose here to make it easier to understand the code. - # if colorize is true, the data is mapped to colors and a uint8 4 channel image is returned. - # if colorize is false, the data is returned as a uint32 image with ids as values. - else: - init_params = None - if annotator_type == "semantic_segmentation": - init_params = { - "colorize": self.cfg.colorize_semantic_segmentation, - "mapping": json.dumps(self.cfg.semantic_segmentation_mapping), - } - elif annotator_type == "instance_segmentation_fast": - init_params = {"colorize": self.cfg.colorize_instance_segmentation} - elif annotator_type == "instance_id_segmentation_fast": - init_params = {"colorize": self.cfg.colorize_instance_id_segmentation} - - annotator = rep.AnnotatorRegistry.get_annotator( - annotator_type, init_params, device=self.device, do_array_copy=False - ) - self._annotators[annotator_type] = annotator - - # Attach the annotator to the render product - for annotator in self._annotators.values(): - annotator.attach(self._render_product_paths) - - # Create internal buffers - self._create_buffers() - - def _update_buffers_impl(self, env_ids: Sequence[int]): - # Increment frame count - self._frame[env_ids] += 1 - - # update latest camera pose - if self.cfg.update_latest_camera_pose: - self._update_poses(env_ids) - - # Extract the flattened image buffer - for data_type, annotator in self._annotators.items(): - # check whether returned data is a dict (used for segmentation) - output = annotator.get_data() - if isinstance(output, dict): - tiled_data_buffer = output["data"] - self._data.info[data_type] = output["info"] - else: - tiled_data_buffer = output - - # convert data buffer to warp array - if isinstance(tiled_data_buffer, np.ndarray): - # Let warp infer the dtype from numpy array instead of hardcoding uint8 - # Different annotators return different dtypes: RGB(uint8), depth(float32), segmentation(uint32) - tiled_data_buffer = wp.array(tiled_data_buffer, device=self.device) - else: - tiled_data_buffer = tiled_data_buffer.to(device=self.device) - - # process data for different segmentation types - # Note: Replicator returns raw buffers of dtype uint32 for segmentation types - # so we need to convert them to uint8 4 channel images for colorized types - if ( - (data_type == "semantic_segmentation" and self.cfg.colorize_semantic_segmentation) - or (data_type == "instance_segmentation_fast" and self.cfg.colorize_instance_segmentation) - or (data_type == "instance_id_segmentation_fast" and self.cfg.colorize_instance_id_segmentation) - ): - tiled_data_buffer = wp.array( - ptr=tiled_data_buffer.ptr, shape=(*tiled_data_buffer.shape, 4), dtype=wp.uint8, device=self.device - ) - - # For motion vectors, we only require the first two channels of the tiled buffer - # Note: Not doing this breaks the alignment of the data (check: https://github.com/isaac-sim/IsaacLab/issues/2003) - if data_type == "motion_vectors": - tiled_data_buffer = tiled_data_buffer[:, :, :2].contiguous() - - # For normals, we only require the first three channels of the tiled buffer - # Note: Not doing this breaks the alignment of the data (check: https://github.com/isaac-sim/IsaacLab/issues/4239) - if data_type == "normals": - tiled_data_buffer = tiled_data_buffer[:, :, :3].contiguous() - - wp.launch( - kernel=reshape_tiled_image, - dim=(self._view.count, self.cfg.height, self.cfg.width), - inputs=[ - tiled_data_buffer.flatten(), - wp.from_torch(self._data.output[data_type]), # zero-copy alias - *list(self._data.output[data_type].shape[1:]), # height, width, num_channels - self._tiling_grid_shape()[0], # num_tiles_x - ], - device=self.device, - ) - - # alias rgb as first 3 channels of rgba - if data_type == "rgba" and "rgb" in self.cfg.data_types: - self._data.output["rgb"] = self._data.output["rgba"][..., :3] - - # NOTE: The `distance_to_camera` annotator returns the distance to the camera optical center. However, - # the replicator depth clipping is applied w.r.t. to the image plane which may result in values - # larger than the clipping range in the output. We apply an additional clipping to ensure values - # are within the clipping range for all the annotators. - if data_type == "distance_to_camera": - self._data.output[data_type][self._data.output[data_type] > self.cfg.spawn.clipping_range[1]] = ( - torch.inf - ) - # apply defined clipping behavior - if ( - data_type == "distance_to_camera" or data_type == "distance_to_image_plane" or data_type == "depth" - ) and self.cfg.depth_clipping_behavior != "none": - self._data.output[data_type][torch.isinf(self._data.output[data_type])] = ( - 0.0 if self.cfg.depth_clipping_behavior == "zero" else self.cfg.spawn.clipping_range[1] - ) - - """ - Private Helpers - """ - - def _check_supported_data_types(self, cfg: TiledCameraCfg): - """Checks if the data types are supported by the ray-caster camera.""" - # check if there is any intersection in unsupported types - # reason: these use np structured data types which we can't yet convert to torch tensor - common_elements = set(cfg.data_types) & Camera.UNSUPPORTED_TYPES - if common_elements: - # provide alternative fast counterparts - fast_common_elements = [] - for item in common_elements: - if "instance_segmentation" in item or "instance_id_segmentation" in item: - fast_common_elements.append(item + "_fast") - # raise error - raise ValueError( - f"TiledCamera class does not support the following sensor types: {common_elements}." - "\n\tThis is because these sensor types output numpy structured data types which" - "can't be converted to torch tensors easily." - "\n\tHint: If you need to work with these sensor types, we recommend using their fast counterparts." - f"\n\t\tFast counterparts: {fast_common_elements}" - ) - - def _create_buffers(self): - """Create buffers for storing data.""" - # create the data object - # -- pose of the cameras - self._data.pos_w = torch.zeros((self._view.count, 3), device=self._device) - self._data.quat_w_world = torch.zeros((self._view.count, 4), device=self._device) - self._update_poses(self._ALL_INDICES) - # -- intrinsic matrix - self._data.intrinsic_matrices = torch.zeros((self._view.count, 3, 3), device=self._device) - self._update_intrinsic_matrices(self._ALL_INDICES) - self._data.image_shape = self.image_shape - # -- output data - data_dict = dict() - if "rgba" in self.cfg.data_types or "rgb" in self.cfg.data_types: - data_dict["rgba"] = torch.zeros( - (self._view.count, self.cfg.height, self.cfg.width, 4), device=self.device, dtype=torch.uint8 - ).contiguous() - if "rgb" in self.cfg.data_types: - # RGB is the first 3 channels of RGBA - data_dict["rgb"] = data_dict["rgba"][..., :3] - if "distance_to_image_plane" in self.cfg.data_types: - data_dict["distance_to_image_plane"] = torch.zeros( - (self._view.count, self.cfg.height, self.cfg.width, 1), device=self.device, dtype=torch.float32 - ).contiguous() - if "depth" in self.cfg.data_types: - data_dict["depth"] = torch.zeros( - (self._view.count, self.cfg.height, self.cfg.width, 1), device=self.device, dtype=torch.float32 - ).contiguous() - if "distance_to_camera" in self.cfg.data_types: - data_dict["distance_to_camera"] = torch.zeros( - (self._view.count, self.cfg.height, self.cfg.width, 1), device=self.device, dtype=torch.float32 - ).contiguous() - if "normals" in self.cfg.data_types: - data_dict["normals"] = torch.zeros( - (self._view.count, self.cfg.height, self.cfg.width, 3), device=self.device, dtype=torch.float32 - ).contiguous() - if "motion_vectors" in self.cfg.data_types: - data_dict["motion_vectors"] = torch.zeros( - (self._view.count, self.cfg.height, self.cfg.width, 2), device=self.device, dtype=torch.float32 - ).contiguous() - if "semantic_segmentation" in self.cfg.data_types: - if self.cfg.colorize_semantic_segmentation: - data_dict["semantic_segmentation"] = torch.zeros( - (self._view.count, self.cfg.height, self.cfg.width, 4), device=self.device, dtype=torch.uint8 - ).contiguous() - else: - data_dict["semantic_segmentation"] = torch.zeros( - (self._view.count, self.cfg.height, self.cfg.width, 1), device=self.device, dtype=torch.int32 - ).contiguous() - if "instance_segmentation_fast" in self.cfg.data_types: - if self.cfg.colorize_instance_segmentation: - data_dict["instance_segmentation_fast"] = torch.zeros( - (self._view.count, self.cfg.height, self.cfg.width, 4), device=self.device, dtype=torch.uint8 - ).contiguous() - else: - data_dict["instance_segmentation_fast"] = torch.zeros( - (self._view.count, self.cfg.height, self.cfg.width, 1), device=self.device, dtype=torch.int32 - ).contiguous() - if "instance_id_segmentation_fast" in self.cfg.data_types: - if self.cfg.colorize_instance_id_segmentation: - data_dict["instance_id_segmentation_fast"] = torch.zeros( - (self._view.count, self.cfg.height, self.cfg.width, 4), device=self.device, dtype=torch.uint8 - ).contiguous() - else: - data_dict["instance_id_segmentation_fast"] = torch.zeros( - (self._view.count, self.cfg.height, self.cfg.width, 1), device=self.device, dtype=torch.int32 - ).contiguous() - - self._data.output = data_dict - self._data.info = dict() - - def _tiled_image_shape(self) -> tuple[int, int]: - """Returns a tuple containing the dimension of the tiled image.""" - cols, rows = self._tiling_grid_shape() - return (self.cfg.width * cols, self.cfg.height * rows) - - def _tiling_grid_shape(self) -> tuple[int, int]: - """Returns a tuple containing the tiling grid dimension.""" - cols = math.ceil(math.sqrt(self._view.count)) - rows = math.ceil(self._view.count / cols) - return (cols, rows) - - def _create_annotator_data(self): - # we do not need to create annotator data for the tiled camera sensor - raise RuntimeError("This function should not be called for the tiled camera sensor.") - - def _process_annotator_output(self, name: str, output: Any) -> tuple[torch.tensor, dict | None]: - # we do not need to process annotator output for the tiled camera sensor - raise RuntimeError("This function should not be called for the tiled camera sensor.") - - """ - Internal simulation callbacks. - """ - - def _invalidate_initialize_callback(self, event): - """Invalidates the scene elements.""" - # call parent - super()._invalidate_initialize_callback(event) - # set all existing views to None to invalidate them - self._view = None diff --git a/source/isaaclab_physx/isaaclab_physx/sensors/camera/tiled_camera_cfg.py b/source/isaaclab_physx/isaaclab_physx/sensors/camera/tiled_camera_cfg.py deleted file mode 100644 index 2241a0648fd..00000000000 --- a/source/isaaclab_physx/isaaclab_physx/sensors/camera/tiled_camera_cfg.py +++ /dev/null @@ -1,16 +0,0 @@ -# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). -# All rights reserved. -# -# SPDX-License-Identifier: BSD-3-Clause - -from isaaclab.utils import configclass - -from .camera_cfg import CameraCfg -from .tiled_camera import TiledCamera - - -@configclass -class TiledCameraCfg(CameraCfg): - """Configuration for a tiled rendering-based camera sensor.""" - - class_type: type = TiledCamera diff --git a/source/isaaclab_physx/isaaclab_physx/sensors/camera/utils.py b/source/isaaclab_physx/isaaclab_physx/sensors/camera/utils.py deleted file mode 100644 index f9db81551b4..00000000000 --- a/source/isaaclab_physx/isaaclab_physx/sensors/camera/utils.py +++ /dev/null @@ -1,272 +0,0 @@ -# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). -# All rights reserved. -# -# SPDX-License-Identifier: BSD-3-Clause - -"""Helper functions to project between pointcloud and depth images.""" - -# needed to import for allowing type-hinting: torch.device | str | None -from __future__ import annotations - -from collections.abc import Sequence - -import numpy as np -import torch -import warp as wp - -import isaaclab.utils.math as math_utils -from isaaclab.utils.array import TensorData, convert_to_torch - -""" -Depth <-> Pointcloud conversions. -""" - - -def transform_points( - points: TensorData, - position: Sequence[float] | None = None, - orientation: Sequence[float] | None = None, - device: torch.device | str | None = None, -) -> np.ndarray | torch.Tensor: - r"""Transform input points in a given frame to a target frame. - - This function transform points from a source frame to a target frame. The transformation is defined by the - position ``t`` and orientation ``R`` of the target frame in the source frame. - - .. math:: - p_{target} = R_{target} \times p_{source} + t_{target} - - If either the inputs `position` and `orientation` are None, the corresponding transformation is not applied. - - Args: - points: a tensor of shape (p, 3) or (n, p, 3) comprising of 3d points in source frame. - position: The position of source frame in target frame. Defaults to None. - orientation: The orientation (w, x, y, z) of source frame in target frame. - Defaults to None. - device: The device for torch where the computation - should be executed. Defaults to None, i.e. takes the device that matches the depth image. - - Returns: - A tensor of shape (N, 3) comprising of 3D points in target frame. - If the input is a numpy array, the output is a numpy array. Otherwise, it is a torch tensor. - """ - # check if numpy - is_numpy = isinstance(points, np.ndarray) - # decide device - if device is None and is_numpy: - device = torch.device("cpu") - # convert to torch - points = convert_to_torch(points, dtype=torch.float32, device=device) - # update the device with the device of the depth image - # note: this is needed since warp does not provide the device directly - device = points.device - # apply rotation - if orientation is not None: - orientation = convert_to_torch(orientation, dtype=torch.float32, device=device) - # apply translation - if position is not None: - position = convert_to_torch(position, dtype=torch.float32, device=device) - # apply transformation - points = math_utils.transform_points(points, position, orientation) - - # return everything according to input type - if is_numpy: - return points.detach().cpu().numpy() - else: - return points - - -def create_pointcloud_from_depth( - intrinsic_matrix: np.ndarray | torch.Tensor | wp.array, - depth: np.ndarray | torch.Tensor | wp.array, - keep_invalid: bool = False, - position: Sequence[float] | None = None, - orientation: Sequence[float] | None = None, - device: torch.device | str | None = None, -) -> np.ndarray | torch.Tensor: - r"""Creates pointcloud from input depth image and camera intrinsic matrix. - - This function creates a pointcloud from a depth image and camera intrinsic matrix. The pointcloud is - computed using the following equation: - - .. math:: - p_{camera} = K^{-1} \times [u, v, 1]^T \times d - - where :math:`K` is the camera intrinsic matrix, :math:`u` and :math:`v` are the pixel coordinates and - :math:`d` is the depth value at the pixel. - - Additionally, the pointcloud can be transformed from the camera frame to a target frame by providing - the position ``t`` and orientation ``R`` of the camera in the target frame: - - .. math:: - p_{target} = R_{target} \times p_{camera} + t_{target} - - Args: - intrinsic_matrix: A (3, 3) array providing camera's calibration matrix. - depth: An array of shape (H, W) with values encoding the depth measurement. - keep_invalid: Whether to keep invalid points in the cloud or not. Invalid points - correspond to pixels with depth values 0.0 or NaN. Defaults to False. - position: The position of the camera in a target frame. Defaults to None. - orientation: The orientation (w, x, y, z) of the camera in a target frame. Defaults to None. - device: The device for torch where the computation should be executed. - Defaults to None, i.e. takes the device that matches the depth image. - - Returns: - An array/tensor of shape (N, 3) comprising of 3D coordinates of points. - The returned datatype is torch if input depth is of type torch.tensor or wp.array. Otherwise, a np.ndarray - is returned. - """ - # We use PyTorch here for matrix multiplication since it is compiled with Intel MKL while numpy - # by default uses OpenBLAS. With PyTorch (CPU), we could process a depth image of size (480, 640) - # in 0.0051 secs, while with numpy it took 0.0292 secs. - - # convert to numpy matrix - is_numpy = isinstance(depth, np.ndarray) - # decide device - if device is None and is_numpy: - device = torch.device("cpu") - # convert depth to torch tensor - depth = convert_to_torch(depth, dtype=torch.float32, device=device) - # update the device with the device of the depth image - # note: this is needed since warp does not provide the device directly - device = depth.device - # convert inputs to torch tensors - intrinsic_matrix = convert_to_torch(intrinsic_matrix, dtype=torch.float32, device=device) - if position is not None: - position = convert_to_torch(position, dtype=torch.float32, device=device) - if orientation is not None: - orientation = convert_to_torch(orientation, dtype=torch.float32, device=device) - # compute pointcloud - depth_cloud = math_utils.unproject_depth(depth, intrinsic_matrix) - # convert 3D points to world frame - depth_cloud = math_utils.transform_points(depth_cloud, position, orientation) - - # keep only valid entries if flag is set - if not keep_invalid: - pts_idx_to_keep = torch.all(torch.logical_and(~torch.isnan(depth_cloud), ~torch.isinf(depth_cloud)), dim=1) - depth_cloud = depth_cloud[pts_idx_to_keep, ...] - - # return everything according to input type - if is_numpy: - return depth_cloud.detach().cpu().numpy() - else: - return depth_cloud - - -def create_pointcloud_from_rgbd( - intrinsic_matrix: torch.Tensor | np.ndarray | wp.array, - depth: torch.Tensor | np.ndarray | wp.array, - rgb: torch.Tensor | wp.array | np.ndarray | tuple[float, float, float] = None, - normalize_rgb: bool = False, - position: Sequence[float] | None = None, - orientation: Sequence[float] | None = None, - device: torch.device | str | None = None, - num_channels: int = 3, -) -> tuple[torch.Tensor, torch.Tensor] | tuple[np.ndarray, np.ndarray]: - """Creates pointcloud from input depth image and camera transformation matrix. - - This function provides the same functionality as :meth:`create_pointcloud_from_depth` but also allows - to provide the RGB values for each point. - - The ``rgb`` attribute is used to resolve the corresponding point's color: - - - If a ``np.array``/``wp.array``/``torch.tensor`` of shape (H, W, 3), then the corresponding channels - encode the RGB values. - - If a tuple, then the point cloud has a single color specified by the values (r, g, b). - - If None, then default color is white, i.e. (0, 0, 0). - - If the input ``normalize_rgb`` is set to :obj:`True`, then the RGB values are normalized to be in the range [0, 1]. - - Args: - intrinsic_matrix: A (3, 3) array/tensor providing camera's calibration matrix. - depth: An array/tensor of shape (H, W) with values encoding the depth measurement. - rgb: Color for generated point cloud. Defaults to None. - normalize_rgb: Whether to normalize input rgb. Defaults to False. - position: The position of the camera in a target frame. Defaults to None. - orientation: The orientation `(w, x, y, z)` of the camera in a target frame. Defaults to None. - device: The device for torch where the computation should be executed. Defaults to None, in which case - it takes the device that matches the depth image. - num_channels: Number of channels in RGB pointcloud. Defaults to 3. - - Returns: - A tuple of (N, 3) arrays or tensors containing the 3D coordinates of points and their RGB color respectively. - The returned datatype is torch if input depth is of type torch.tensor or wp.array. Otherwise, a np.ndarray - is returned. - - Raises: - ValueError: When rgb image is a numpy array but not of shape (H, W, 3) or (H, W, 4). - """ - # check valid inputs - if rgb is not None and not isinstance(rgb, tuple): - if len(rgb.shape) == 3: - if rgb.shape[2] not in [3, 4]: - raise ValueError(f"Input rgb image of invalid shape: {rgb.shape} != (H, W, 3) or (H, W, 4).") - else: - raise ValueError(f"Input rgb image not three-dimensional. Received shape: {rgb.shape}.") - if num_channels not in [3, 4]: - raise ValueError(f"Invalid number of channels: {num_channels} != 3 or 4.") - - # check if input depth is numpy array - is_numpy = isinstance(depth, np.ndarray) - # decide device - if device is None and is_numpy: - device = torch.device("cpu") - # convert depth to torch tensor - if is_numpy: - depth = torch.from_numpy(depth).to(device=device) - # retrieve XYZ pointcloud - points_xyz = create_pointcloud_from_depth(intrinsic_matrix, depth, True, position, orientation, device=device) - - # get image height and width - im_height, im_width = depth.shape[:2] - # total number of points - num_points = im_height * im_width - # extract color value - if rgb is not None: - if isinstance(rgb, (np.ndarray, torch.Tensor, wp.array)): - # copy numpy array to preserve - rgb = convert_to_torch(rgb, device=device, dtype=torch.float32) - rgb = rgb[:, :, :3] - # convert the matrix to (W, H, 3) from (H, W, 3) since depth processing - # is done in the order (u, v) where u: (0, W-1) and v: (0 - H-1) - points_rgb = rgb.permute(1, 0, 2).reshape(-1, 3) - elif isinstance(rgb, (tuple, list)): - # same color for all points - points_rgb = torch.Tensor((rgb,) * num_points, device=device, dtype=torch.uint8) - else: - # default color is white - points_rgb = torch.Tensor(((0, 0, 0),) * num_points, device=device, dtype=torch.uint8) - else: - points_rgb = torch.Tensor(((0, 0, 0),) * num_points, device=device, dtype=torch.uint8) - # normalize color values - if normalize_rgb: - points_rgb = points_rgb.float() / 255 - - # remove invalid points - pts_idx_to_keep = torch.all(torch.logical_and(~torch.isnan(points_xyz), ~torch.isinf(points_xyz)), dim=1) - points_rgb = points_rgb[pts_idx_to_keep, ...] - points_xyz = points_xyz[pts_idx_to_keep, ...] - - # add additional channels if required - if num_channels == 4: - points_rgb = torch.nn.functional.pad(points_rgb, (0, 1), mode="constant", value=1.0) - - # return everything according to input type - if is_numpy: - return points_xyz.cpu().numpy(), points_rgb.cpu().numpy() - else: - return points_xyz, points_rgb - - -def save_images_to_file(images: torch.Tensor, file_path: str): - """Save images to file. - - Args: - images: A tensor of shape (N, H, W, C) containing the images. - file_path: The path to save the images to. - """ - from torchvision.utils import make_grid, save_image - - save_image( - make_grid(torch.swapaxes(images.unsqueeze(1), 1, -1).squeeze(-1), nrow=round(images.shape[0] ** 0.5)), file_path - ) diff --git a/source/isaaclab_physx/isaaclab_physx/sensors/contact_sensor/__init__.py b/source/isaaclab_physx/isaaclab_physx/sensors/contact_sensor/__init__.py deleted file mode 100644 index 94b402d41a3..00000000000 --- a/source/isaaclab_physx/isaaclab_physx/sensors/contact_sensor/__init__.py +++ /dev/null @@ -1,10 +0,0 @@ -# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). -# All rights reserved. -# -# SPDX-License-Identifier: BSD-3-Clause - -"""Sub-module for rigid contact sensor.""" - -from .contact_sensor import ContactSensor -from .contact_sensor_cfg import ContactSensorCfg -from .contact_sensor_data import ContactSensorData diff --git a/source/isaaclab_physx/isaaclab_physx/sensors/contact_sensor/contact_sensor.py b/source/isaaclab_physx/isaaclab_physx/sensors/contact_sensor/contact_sensor.py deleted file mode 100644 index 2a85a2661f6..00000000000 --- a/source/isaaclab_physx/isaaclab_physx/sensors/contact_sensor/contact_sensor.py +++ /dev/null @@ -1,539 +0,0 @@ -# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). -# All rights reserved. -# -# SPDX-License-Identifier: BSD-3-Clause - -# Ignore optional memory usage warning globally -# pyright: reportOptionalSubscript=false - -from __future__ import annotations - -from collections.abc import Sequence -from typing import TYPE_CHECKING - -import torch - -import carb -import omni.physics.tensors.impl.api as physx -from isaacsim.core.simulation_manager import SimulationManager -from pxr import PhysxSchema - -import isaaclab.sim as sim_utils -import isaaclab.utils.string as string_utils -from isaaclab.markers import VisualizationMarkers -from isaaclab.utils.math import convert_quat - -from ..sensor_base import SensorBase -from .contact_sensor_data import ContactSensorData - -if TYPE_CHECKING: - from .contact_sensor_cfg import ContactSensorCfg - - -class ContactSensor(SensorBase): - """A contact reporting sensor. - - The contact sensor reports the normal contact forces on a rigid body in the world frame. - It relies on the `PhysX ContactReporter`_ API to be activated on the rigid bodies. - - To enable the contact reporter on a rigid body, please make sure to enable the - :attr:`isaaclab.sim.spawner.RigidObjectSpawnerCfg.activate_contact_sensors` on your - asset spawner configuration. This will enable the contact reporter on all the rigid bodies - in the asset. - - The sensor can be configured to report the contact forces on a set of bodies with a given - filter pattern using the :attr:`ContactSensorCfg.filter_prim_paths_expr`. This is useful - when you want to report the contact forces between the sensor bodies and a specific set of - bodies in the scene. The data can be accessed using the :attr:`ContactSensorData.force_matrix_w`. - Please check the documentation on `RigidContact`_ for more details. - - The reporting of the filtered contact forces is only possible as one-to-many. This means that only one - sensor body in an environment can be filtered against multiple bodies in that environment. If you need to - filter multiple sensor bodies against multiple bodies, you need to create separate sensors for each sensor - body. - - As an example, suppose you want to report the contact forces for all the feet of a robot against an object - exclusively. In that case, setting the :attr:`ContactSensorCfg.prim_path` and - :attr:`ContactSensorCfg.filter_prim_paths_expr` with ``{ENV_REGEX_NS}/Robot/.*_FOOT`` and ``{ENV_REGEX_NS}/Object`` - respectively will not work. Instead, you need to create a separate sensor for each foot and filter - it against the object. - - .. _PhysX ContactReporter: https://docs.omniverse.nvidia.com/kit/docs/omni_usd_schema_physics/104.2/class_physx_schema_physx_contact_report_a_p_i.html - .. _RigidContact: https://docs.isaacsim.omniverse.nvidia.com/latest/py/source/extensions/isaacsim.core.api/docs/index.html#isaacsim.core.api.sensors.RigidContactView - """ - - cfg: ContactSensorCfg - """The configuration parameters.""" - - def __init__(self, cfg: ContactSensorCfg): - """Initializes the contact sensor object. - - Args: - cfg: The configuration parameters. - """ - # initialize base class - super().__init__(cfg) - - # Enable contact processing - carb_settings_iface = carb.settings.get_settings() - carb_settings_iface.set_bool("/physics/disableContactProcessing", False) - - # Create empty variables for storing output data - self._data: ContactSensorData = ContactSensorData() - # initialize self._body_physx_view for running in extension mode - self._body_physx_view = None - - def __str__(self) -> str: - """Returns: A string containing information about the instance.""" - return ( - f"Contact sensor @ '{self.cfg.prim_path}': \n" - f"\tview type : {self.body_physx_view.__class__}\n" - f"\tupdate period (s) : {self.cfg.update_period}\n" - f"\tnumber of bodies : {self.num_bodies}\n" - f"\tbody names : {self.body_names}\n" - ) - - """ - Properties - """ - - @property - def num_instances(self) -> int: - return self.body_physx_view.count - - @property - def data(self) -> ContactSensorData: - # update sensors if needed - self._update_outdated_buffers() - # return the data - return self._data - - @property - def num_bodies(self) -> int: - """Number of bodies with contact sensors attached.""" - return self._num_bodies - - @property - def body_names(self) -> list[str]: - """Ordered names of bodies with contact sensors attached.""" - prim_paths = self.body_physx_view.prim_paths[: self.num_bodies] - return [path.split("/")[-1] for path in prim_paths] - - @property - def body_physx_view(self) -> physx.RigidBodyView: - """View for the rigid bodies captured (PhysX). - - Note: - Use this view with caution. It requires handling of tensors in a specific way. - """ - return self._body_physx_view - - @property - def contact_physx_view(self) -> physx.RigidContactView: - """Contact reporter view for the bodies (PhysX). - - Note: - Use this view with caution. It requires handling of tensors in a specific way. - """ - return self._contact_physx_view - - """ - Operations - """ - - def reset(self, env_ids: Sequence[int] | None = None): - # reset the timers and counters - super().reset(env_ids) - # resolve None - if env_ids is None: - env_ids = slice(None) - # reset accumulative data buffers - self._data.net_forces_w[env_ids] = 0.0 - self._data.net_forces_w_history[env_ids] = 0.0 - # reset force matrix - if len(self.cfg.filter_prim_paths_expr) != 0: - self._data.force_matrix_w[env_ids] = 0.0 - self._data.force_matrix_w_history[env_ids] = 0.0 - # reset the current air time - if self.cfg.track_air_time: - self._data.current_air_time[env_ids] = 0.0 - self._data.last_air_time[env_ids] = 0.0 - self._data.current_contact_time[env_ids] = 0.0 - self._data.last_contact_time[env_ids] = 0.0 - # reset contact positions - if self.cfg.track_contact_points: - self._data.contact_pos_w[env_ids, :] = torch.nan - # reset friction forces - if self.cfg.track_friction_forces: - self._data.friction_forces_w[env_ids, :] = 0.0 - - def find_bodies(self, name_keys: str | Sequence[str], preserve_order: bool = False) -> tuple[list[int], list[str]]: - """Find bodies in the articulation based on the name keys. - - Args: - name_keys: A regular expression or a list of regular expressions to match the body names. - preserve_order: Whether to preserve the order of the name keys in the output. Defaults to False. - - Returns: - A tuple of lists containing the body indices and names. - """ - return string_utils.resolve_matching_names(name_keys, self.body_names, preserve_order) - - def compute_first_contact(self, dt: float, abs_tol: float = 1.0e-8) -> torch.Tensor: - """Checks if bodies that have established contact within the last :attr:`dt` seconds. - - This function checks if the bodies have established contact within the last :attr:`dt` seconds - by comparing the current contact time with the given time period. If the contact time is less - than the given time period, then the bodies are considered to be in contact. - - Note: - The function assumes that :attr:`dt` is a factor of the sensor update time-step. In other - words :math:`dt / dt_sensor = n`, where :math:`n` is a natural number. This is always true - if the sensor is updated by the physics or the environment stepping time-step and the sensor - is read by the environment stepping time-step. - - Args: - dt: The time period since the contact was established. - abs_tol: The absolute tolerance for the comparison. - - Returns: - A boolean tensor indicating the bodies that have established contact within the last - :attr:`dt` seconds. Shape is (N, B), where N is the number of sensors and B is the - number of bodies in each sensor. - - Raises: - RuntimeError: If the sensor is not configured to track contact time. - """ - # check if the sensor is configured to track contact time - if not self.cfg.track_air_time: - raise RuntimeError( - "The contact sensor is not configured to track contact time." - "Please enable the 'track_air_time' in the sensor configuration." - ) - # check if the bodies are in contact - currently_in_contact = self.data.current_contact_time > 0.0 - less_than_dt_in_contact = self.data.current_contact_time < (dt + abs_tol) - return currently_in_contact * less_than_dt_in_contact - - def compute_first_air(self, dt: float, abs_tol: float = 1.0e-8) -> torch.Tensor: - """Checks if bodies that have broken contact within the last :attr:`dt` seconds. - - This function checks if the bodies have broken contact within the last :attr:`dt` seconds - by comparing the current air time with the given time period. If the air time is less - than the given time period, then the bodies are considered to not be in contact. - - Note: - It assumes that :attr:`dt` is a factor of the sensor update time-step. In other words, - :math:`dt / dt_sensor = n`, where :math:`n` is a natural number. This is always true if - the sensor is updated by the physics or the environment stepping time-step and the sensor - is read by the environment stepping time-step. - - Args: - dt: The time period since the contract is broken. - abs_tol: The absolute tolerance for the comparison. - - Returns: - A boolean tensor indicating the bodies that have broken contact within the last :attr:`dt` seconds. - Shape is (N, B), where N is the number of sensors and B is the number of bodies in each sensor. - - Raises: - RuntimeError: If the sensor is not configured to track contact time. - """ - # check if the sensor is configured to track contact time - if not self.cfg.track_air_time: - raise RuntimeError( - "The contact sensor is not configured to track contact time." - "Please enable the 'track_air_time' in the sensor configuration." - ) - # check if the sensor is configured to track contact time - currently_detached = self.data.current_air_time > 0.0 - less_than_dt_detached = self.data.current_air_time < (dt + abs_tol) - return currently_detached * less_than_dt_detached - - """ - Implementation. - """ - - def _initialize_impl(self): - super()._initialize_impl() - # obtain global simulation view - self._physics_sim_view = SimulationManager.get_physics_sim_view() - # check that only rigid bodies are selected - leaf_pattern = self.cfg.prim_path.rsplit("/", 1)[-1] - template_prim_path = self._parent_prims[0].GetPath().pathString - body_names = list() - for prim in sim_utils.find_matching_prims(template_prim_path + "/" + leaf_pattern): - # check if prim has contact reporter API - if prim.HasAPI(PhysxSchema.PhysxContactReportAPI): - prim_path = prim.GetPath().pathString - body_names.append(prim_path.rsplit("/", 1)[-1]) - # check that there is at least one body with contact reporter API - if not body_names: - raise RuntimeError( - f"Sensor at path '{self.cfg.prim_path}' could not find any bodies with contact reporter API." - "\nHINT: Make sure to enable 'activate_contact_sensors' in the corresponding asset spawn configuration." - ) - - # construct regex expression for the body names - body_names_regex = r"(" + "|".join(body_names) + r")" - body_names_regex = f"{self.cfg.prim_path.rsplit('/', 1)[0]}/{body_names_regex}" - # convert regex expressions to glob expressions for PhysX - body_names_glob = body_names_regex.replace(".*", "*") - filter_prim_paths_glob = [expr.replace(".*", "*") for expr in self.cfg.filter_prim_paths_expr] - - # create a rigid prim view for the sensor - self._body_physx_view = self._physics_sim_view.create_rigid_body_view(body_names_glob) - self._contact_physx_view = self._physics_sim_view.create_rigid_contact_view( - body_names_glob, - filter_patterns=filter_prim_paths_glob, - max_contact_data_count=self.cfg.max_contact_data_count_per_prim * len(body_names) * self._num_envs, - ) - # resolve the true count of bodies - self._num_bodies = self.body_physx_view.count // self._num_envs - # check that contact reporter succeeded - if self._num_bodies != len(body_names): - raise RuntimeError( - "Failed to initialize contact reporter for specified bodies." - f"\n\tInput prim path : {self.cfg.prim_path}" - f"\n\tResolved prim paths: {body_names_regex}" - ) - - # prepare data buffers - self._data.net_forces_w = torch.zeros(self._num_envs, self._num_bodies, 3, device=self._device) - # optional buffers - # -- history of net forces - if self.cfg.history_length > 0: - self._data.net_forces_w_history = torch.zeros( - self._num_envs, self.cfg.history_length, self._num_bodies, 3, device=self._device - ) - else: - self._data.net_forces_w_history = self._data.net_forces_w.unsqueeze(1) - # -- pose of sensor origins - if self.cfg.track_pose: - self._data.pos_w = torch.zeros(self._num_envs, self._num_bodies, 3, device=self._device) - self._data.quat_w = torch.zeros(self._num_envs, self._num_bodies, 4, device=self._device) - - # check if filter paths are valid - if self.cfg.track_contact_points or self.cfg.track_friction_forces: - if len(self.cfg.filter_prim_paths_expr) == 0: - raise ValueError( - "The 'filter_prim_paths_expr' is empty. Please specify a valid filter pattern to track" - f" {'contact points' if self.cfg.track_contact_points else 'friction forces'}." - ) - if self.cfg.max_contact_data_count_per_prim < 1: - raise ValueError( - f"The 'max_contact_data_count_per_prim' is {self.cfg.max_contact_data_count_per_prim}. " - "Please set it to a value greater than 0 to track" - f" {'contact points' if self.cfg.track_contact_points else 'friction forces'}." - ) - - # -- position of contact points - if self.cfg.track_contact_points: - self._data.contact_pos_w = torch.full( - (self._num_envs, self._num_bodies, self.contact_physx_view.filter_count, 3), - torch.nan, - device=self._device, - ) - # -- friction forces at contact points - if self.cfg.track_friction_forces: - self._data.friction_forces_w = torch.full( - (self._num_envs, self._num_bodies, self.contact_physx_view.filter_count, 3), - 0.0, - device=self._device, - ) - # -- air/contact time between contacts - if self.cfg.track_air_time: - self._data.last_air_time = torch.zeros(self._num_envs, self._num_bodies, device=self._device) - self._data.current_air_time = torch.zeros(self._num_envs, self._num_bodies, device=self._device) - self._data.last_contact_time = torch.zeros(self._num_envs, self._num_bodies, device=self._device) - self._data.current_contact_time = torch.zeros(self._num_envs, self._num_bodies, device=self._device) - # force matrix: (num_envs, num_bodies, num_filter_shapes, 3) - # force matrix history: (num_envs, history_length, num_bodies, num_filter_shapes, 3) - if len(self.cfg.filter_prim_paths_expr) != 0: - num_filters = self.contact_physx_view.filter_count - self._data.force_matrix_w = torch.zeros( - self._num_envs, self._num_bodies, num_filters, 3, device=self._device - ) - if self.cfg.history_length > 0: - self._data.force_matrix_w_history = torch.zeros( - self._num_envs, self.cfg.history_length, self._num_bodies, num_filters, 3, device=self._device - ) - else: - self._data.force_matrix_w_history = self._data.force_matrix_w.unsqueeze(1) - - def _update_buffers_impl(self, env_ids: Sequence[int]): - """Fills the buffers of the sensor data.""" - # default to all sensors - if len(env_ids) == self._num_envs: - env_ids = slice(None) - - # obtain the contact forces - # TODO: We are handling the indexing ourself because of the shape; (N, B) vs expected (N * B). - # This isn't the most efficient way to do this, but it's the easiest to implement. - net_forces_w = self.contact_physx_view.get_net_contact_forces(dt=self._sim_physics_dt) - self._data.net_forces_w[env_ids, :, :] = net_forces_w.view(-1, self._num_bodies, 3)[env_ids] - # update contact force history - if self.cfg.history_length > 0: - self._data.net_forces_w_history[env_ids] = self._data.net_forces_w_history[env_ids].roll(1, dims=1) - self._data.net_forces_w_history[env_ids, 0] = self._data.net_forces_w[env_ids] - - # obtain the contact force matrix - if len(self.cfg.filter_prim_paths_expr) != 0: - # shape of the filtering matrix: (num_envs, num_bodies, num_filter_shapes, 3) - num_filters = self.contact_physx_view.filter_count - # acquire and shape the force matrix - force_matrix_w = self.contact_physx_view.get_contact_force_matrix(dt=self._sim_physics_dt) - force_matrix_w = force_matrix_w.view(-1, self._num_bodies, num_filters, 3) - self._data.force_matrix_w[env_ids] = force_matrix_w[env_ids] - if self.cfg.history_length > 0: - self._data.force_matrix_w_history[env_ids] = self._data.force_matrix_w_history[env_ids].roll(1, dims=1) - self._data.force_matrix_w_history[env_ids, 0] = self._data.force_matrix_w[env_ids] - - # obtain the pose of the sensor origin - if self.cfg.track_pose: - pose = self.body_physx_view.get_transforms().view(-1, self._num_bodies, 7)[env_ids] - pose[..., 3:] = convert_quat(pose[..., 3:], to="wxyz") - self._data.pos_w[env_ids], self._data.quat_w[env_ids] = pose.split([3, 4], dim=-1) - - # obtain contact points - if self.cfg.track_contact_points: - _, buffer_contact_points, _, _, buffer_count, buffer_start_indices = ( - self.contact_physx_view.get_contact_data(dt=self._sim_physics_dt) - ) - self._data.contact_pos_w[env_ids] = self._unpack_contact_buffer_data( - buffer_contact_points, buffer_count, buffer_start_indices - )[env_ids] - - # obtain friction forces - if self.cfg.track_friction_forces: - friction_forces, _, buffer_count, buffer_start_indices = self.contact_physx_view.get_friction_data( - dt=self._sim_physics_dt - ) - self._data.friction_forces_w[env_ids] = self._unpack_contact_buffer_data( - friction_forces, buffer_count, buffer_start_indices, avg=False, default=0.0 - )[env_ids] - - # obtain the air time - if self.cfg.track_air_time: - # -- time elapsed since last update - # since this function is called every frame, we can use the difference to get the elapsed time - elapsed_time = self._timestamp[env_ids] - self._timestamp_last_update[env_ids] - # -- check contact state of bodies - is_contact = torch.norm(self._data.net_forces_w[env_ids, :, :], dim=-1) > self.cfg.force_threshold - is_first_contact = (self._data.current_air_time[env_ids] > 0) * is_contact - is_first_detached = (self._data.current_contact_time[env_ids] > 0) * ~is_contact - # -- update the last contact time if body has just become in contact - self._data.last_air_time[env_ids] = torch.where( - is_first_contact, - self._data.current_air_time[env_ids] + elapsed_time.unsqueeze(-1), - self._data.last_air_time[env_ids], - ) - # -- increment time for bodies that are not in contact - self._data.current_air_time[env_ids] = torch.where( - ~is_contact, self._data.current_air_time[env_ids] + elapsed_time.unsqueeze(-1), 0.0 - ) - # -- update the last contact time if body has just detached - self._data.last_contact_time[env_ids] = torch.where( - is_first_detached, - self._data.current_contact_time[env_ids] + elapsed_time.unsqueeze(-1), - self._data.last_contact_time[env_ids], - ) - # -- increment time for bodies that are in contact - self._data.current_contact_time[env_ids] = torch.where( - is_contact, self._data.current_contact_time[env_ids] + elapsed_time.unsqueeze(-1), 0.0 - ) - - def _unpack_contact_buffer_data( - self, - contact_data: torch.Tensor, - buffer_count: torch.Tensor, - buffer_start_indices: torch.Tensor, - avg: bool = True, - default: float = float("nan"), - ) -> torch.Tensor: - """ - Unpacks and aggregates contact data for each (env, body, filter) group. - - This function vectorizes the following nested loop: - - for i in range(self._num_bodies * self._num_envs): - for j in range(self.contact_physx_view.filter_count): - start_index_ij = buffer_start_indices[i, j] - count_ij = buffer_count[i, j] - self._contact_position_aggregate_buffer[i, j, :] = torch.mean( - contact_data[start_index_ij : (start_index_ij + count_ij), :], dim=0 - ) - - For more details, see the `RigidContactView.get_contact_data() documentation `_. - - Args: - contact_data: Flat tensor of contact data, shape (N_envs * N_bodies, 3). - buffer_count: Number of contact points per (env, body, filter), shape (N_envs * N_bodies, N_filters). - buffer_start_indices: Start indices for each (env, body, filter), shape (N_envs * N_bodies, N_filters). - avg: If True, average the contact data for each group; if False, sum the data. Defaults to True. - default: Default value to use for groups with zero contacts. Defaults to NaN. - - Returns: - Aggregated contact data, shape (N_envs, N_bodies, N_filters, 3). - """ - counts, starts = buffer_count.view(-1), buffer_start_indices.view(-1) - n_rows, total = counts.numel(), int(counts.sum()) - agg = torch.full((n_rows, 3), default, device=self._device, dtype=contact_data.dtype) - if total > 0: - row_ids = torch.repeat_interleave(torch.arange(n_rows, device=self._device), counts) - - block_starts = counts.cumsum(0) - counts - deltas = torch.arange(row_ids.numel(), device=counts.device) - block_starts.repeat_interleave(counts) - flat_idx = starts[row_ids] + deltas - - pts = contact_data.index_select(0, flat_idx) - agg = agg.zero_().index_add_(0, row_ids, pts) - agg = agg / counts.clamp_min(1).unsqueeze(-1) if avg else agg - agg[counts == 0] = default - - return agg.view(self._num_envs * self.num_bodies, -1, 3).view( - self._num_envs, self._num_bodies, self.contact_physx_view.filter_count, 3 - ) - - def _set_debug_vis_impl(self, debug_vis: bool): - # set visibility of markers - # note: parent only deals with callbacks. not their visibility - if debug_vis: - # create markers if necessary for the first time - if not hasattr(self, "contact_visualizer"): - self.contact_visualizer = VisualizationMarkers(self.cfg.visualizer_cfg) - # set their visibility to true - self.contact_visualizer.set_visibility(True) - else: - if hasattr(self, "contact_visualizer"): - self.contact_visualizer.set_visibility(False) - - def _debug_vis_callback(self, event): - # safely return if view becomes invalid - # note: this invalidity happens because of isaac sim view callbacks - if self.body_physx_view is None: - return - # marker indices - # 0: contact, 1: no contact - net_contact_force_w = torch.norm(self._data.net_forces_w, dim=-1) - marker_indices = torch.where(net_contact_force_w > self.cfg.force_threshold, 0, 1) - # check if prim is visualized - if self.cfg.track_pose: - frame_origins: torch.Tensor = self._data.pos_w - else: - pose = self.body_physx_view.get_transforms() - frame_origins = pose.view(-1, self._num_bodies, 7)[:, :, :3] - # visualize - self.contact_visualizer.visualize(frame_origins.view(-1, 3), marker_indices=marker_indices.view(-1)) - - """ - Internal simulation callbacks. - """ - - def _invalidate_initialize_callback(self, event): - """Invalidates the scene elements.""" - # call parent - super()._invalidate_initialize_callback(event) - # set all existing views to None to invalidate them - self._body_physx_view = None - self._contact_physx_view = None diff --git a/source/isaaclab_physx/isaaclab_physx/sensors/contact_sensor/contact_sensor_cfg.py b/source/isaaclab_physx/isaaclab_physx/sensors/contact_sensor/contact_sensor_cfg.py deleted file mode 100644 index c811a7ca63d..00000000000 --- a/source/isaaclab_physx/isaaclab_physx/sensors/contact_sensor/contact_sensor_cfg.py +++ /dev/null @@ -1,79 +0,0 @@ -# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). -# All rights reserved. -# -# SPDX-License-Identifier: BSD-3-Clause - -from isaaclab.markers import VisualizationMarkersCfg -from isaaclab.markers.config import CONTACT_SENSOR_MARKER_CFG -from isaaclab.utils import configclass - -from ..sensor_base_cfg import SensorBaseCfg -from .contact_sensor import ContactSensor - - -@configclass -class ContactSensorCfg(SensorBaseCfg): - """Configuration for the contact sensor.""" - - class_type: type = ContactSensor - - track_pose: bool = False - """Whether to track the pose of the sensor's origin. Defaults to False.""" - - track_contact_points: bool = False - """Whether to track the contact point locations. Defaults to False.""" - - track_friction_forces: bool = False - """Whether to track the friction forces at the contact points. Defaults to False.""" - - max_contact_data_count_per_prim: int = 4 - """The maximum number of contacts across all batches of the sensor to keep track of. Default is 4. - - This parameter sets the total maximum counts of the simulation across all bodies and environments. The total number - of contacts allowed is max_contact_data_count_per_prim*num_envs*num_sensor_bodies. - - .. note:: - - If the environment is very contact rich it is suggested to increase this parameter to avoid out of bounds memory - errors and loss of contact data leading to inaccurate measurements. - - """ - - track_air_time: bool = False - """Whether to track the air/contact time of the bodies (time between contacts). Defaults to False.""" - - force_threshold: float = 1.0 - """The threshold on the norm of the contact force that determines whether two bodies are in collision or not. - - This value is only used for tracking the mode duration (the time in contact or in air), - if :attr:`track_air_time` is True. - """ - - filter_prim_paths_expr: list[str] = list() - """The list of primitive paths (or expressions) to filter contacts with. Defaults to an empty list, in which case - no filtering is applied. - - The contact sensor allows reporting contacts between the primitive specified with :attr:`prim_path` and - other primitives in the scene. For instance, in a scene containing a robot, a ground plane and an object, - you can obtain individual contact reports of the base of the robot with the ground plane and the object. - - .. note:: - The expression in the list can contain the environment namespace regex ``{ENV_REGEX_NS}`` which - will be replaced with the environment namespace. - - Example: ``{ENV_REGEX_NS}/Object`` will be replaced with ``/World/envs/env_.*/Object``. - - .. attention:: - The reporting of filtered contacts only works when the sensor primitive :attr:`prim_path` corresponds to a - single primitive in that environment. If the sensor primitive corresponds to multiple primitives, the - filtering will not work as expected. Please check :class:`~isaaclab.sensors.contact_sensor.ContactSensor` - for more details. - If track_contact_points is true, then filter_prim_paths_expr cannot be an empty list! - """ - - visualizer_cfg: VisualizationMarkersCfg = CONTACT_SENSOR_MARKER_CFG.replace(prim_path="/Visuals/ContactSensor") - """The configuration object for the visualization markers. Defaults to CONTACT_SENSOR_MARKER_CFG. - - .. note:: - This attribute is only used when debug visualization is enabled. - """ diff --git a/source/isaaclab_physx/isaaclab_physx/sensors/contact_sensor/contact_sensor_data.py b/source/isaaclab_physx/isaaclab_physx/sensors/contact_sensor/contact_sensor_data.py deleted file mode 100644 index fd6c15ebe96..00000000000 --- a/source/isaaclab_physx/isaaclab_physx/sensors/contact_sensor/contact_sensor_data.py +++ /dev/null @@ -1,153 +0,0 @@ -# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). -# All rights reserved. -# -# SPDX-License-Identifier: BSD-3-Clause - -# needed to import for allowing type-hinting: torch.Tensor | None -from __future__ import annotations - -from dataclasses import dataclass - -import torch - - -@dataclass -class ContactSensorData: - """Data container for the contact reporting sensor.""" - - pos_w: torch.Tensor | None = None - """Position of the sensor origin in world frame. - - Shape is (N, 3), where N is the number of sensors. - - Note: - If the :attr:`ContactSensorCfg.track_pose` is False, then this quantity is None. - - """ - - contact_pos_w: torch.Tensor | None = None - """Average of the positions of contact points between sensor body and filter prim in world frame. - - Shape is (N, B, M, 3), where N is the number of sensors, B is number of bodies in each sensor - and M is the number of filtered bodies. - - Collision pairs not in contact will result in NaN. - - Note: - - * If the :attr:`ContactSensorCfg.track_contact_points` is False, then this quantity is None. - * If the :attr:`ContactSensorCfg.track_contact_points` is True, a ValueError will be raised if: - - * If the :attr:`ContactSensorCfg.filter_prim_paths_expr` is empty. - * If the :attr:`ContactSensorCfg.max_contact_data_per_prim` is not specified or less than 1. - will not be calculated. - - """ - - friction_forces_w: torch.Tensor | None = None - """Sum of the friction forces between sensor body and filter prim in world frame. - - Shape is (N, B, M, 3), where N is the number of sensors, B is number of bodies in each sensor - and M is the number of filtered bodies. - - Collision pairs not in contact will result in NaN. - - Note: - - * If the :attr:`ContactSensorCfg.track_friction_forces` is False, then this quantity is None. - * If the :attr:`ContactSensorCfg.track_friction_forces` is True, a ValueError will be raised if: - - * The :attr:`ContactSensorCfg.filter_prim_paths_expr` is empty. - * The :attr:`ContactSensorCfg.max_contact_data_per_prim` is not specified or less than 1. - - """ - - quat_w: torch.Tensor | None = None - """Orientation of the sensor origin in quaternion (w, x, y, z) in world frame. - - Shape is (N, 4), where N is the number of sensors. - - Note: - If the :attr:`ContactSensorCfg.track_pose` is False, then this quantity is None. - """ - - net_forces_w: torch.Tensor | None = None - """The net normal contact forces in world frame. - - Shape is (N, B, 3), where N is the number of sensors and B is the number of bodies in each sensor. - - Note: - This quantity is the sum of the normal contact forces acting on the sensor bodies. It must not be confused - with the total contact forces acting on the sensor bodies (which also includes the tangential forces). - """ - - net_forces_w_history: torch.Tensor | None = None - """The net normal contact forces in world frame. - - Shape is (N, T, B, 3), where N is the number of sensors, T is the configured history length - and B is the number of bodies in each sensor. - - In the history dimension, the first index is the most recent and the last index is the oldest. - - Note: - This quantity is the sum of the normal contact forces acting on the sensor bodies. It must not be confused - with the total contact forces acting on the sensor bodies (which also includes the tangential forces). - """ - - force_matrix_w: torch.Tensor | None = None - """The normal contact forces filtered between the sensor bodies and filtered bodies in world frame. - - Shape is (N, B, M, 3), where N is the number of sensors, B is number of bodies in each sensor - and M is the number of filtered bodies. - - Note: - If the :attr:`ContactSensorCfg.filter_prim_paths_expr` is empty, then this quantity is None. - """ - - force_matrix_w_history: torch.Tensor | None = None - """The normal contact forces filtered between the sensor bodies and filtered bodies in world frame. - - Shape is (N, T, B, M, 3), where N is the number of sensors, T is the configured history length, - B is number of bodies in each sensor and M is the number of filtered bodies. - - In the history dimension, the first index is the most recent and the last index is the oldest. - - Note: - If the :attr:`ContactSensorCfg.filter_prim_paths_expr` is empty, then this quantity is None. - """ - - last_air_time: torch.Tensor | None = None - """Time spent (in s) in the air before the last contact. - - Shape is (N, B), where N is the number of sensors and B is the number of bodies in each sensor. - - Note: - If the :attr:`ContactSensorCfg.track_air_time` is False, then this quantity is None. - """ - - current_air_time: torch.Tensor | None = None - """Time spent (in s) in the air since the last detach. - - Shape is (N, B), where N is the number of sensors and B is the number of bodies in each sensor. - - Note: - If the :attr:`ContactSensorCfg.track_air_time` is False, then this quantity is None. - """ - - last_contact_time: torch.Tensor | None = None - """Time spent (in s) in contact before the last detach. - - Shape is (N, B), where N is the number of sensors and B is the number of bodies in each sensor. - - Note: - If the :attr:`ContactSensorCfg.track_air_time` is False, then this quantity is None. - """ - - current_contact_time: torch.Tensor | None = None - """Time spent (in s) in contact since the last contact. - - Shape is (N, B), where N is the number of sensors and B is the number of bodies in each sensor. - - Note: - If the :attr:`ContactSensorCfg.track_air_time` is False, then this quantity is None. - """ diff --git a/source/isaaclab_physx/isaaclab_physx/sensors/frame_transformer/__init__.py b/source/isaaclab_physx/isaaclab_physx/sensors/frame_transformer/__init__.py deleted file mode 100644 index d5db853e8cc..00000000000 --- a/source/isaaclab_physx/isaaclab_physx/sensors/frame_transformer/__init__.py +++ /dev/null @@ -1,10 +0,0 @@ -# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). -# All rights reserved. -# -# SPDX-License-Identifier: BSD-3-Clause - -"""Sub-module for frame transformer sensor.""" - -from .frame_transformer import FrameTransformer -from .frame_transformer_cfg import FrameTransformerCfg, OffsetCfg -from .frame_transformer_data import FrameTransformerData diff --git a/source/isaaclab_physx/isaaclab_physx/sensors/frame_transformer/frame_transformer.py b/source/isaaclab_physx/isaaclab_physx/sensors/frame_transformer/frame_transformer.py deleted file mode 100644 index ed83392b3aa..00000000000 --- a/source/isaaclab_physx/isaaclab_physx/sensors/frame_transformer/frame_transformer.py +++ /dev/null @@ -1,560 +0,0 @@ -# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). -# All rights reserved. -# -# SPDX-License-Identifier: BSD-3-Clause - -from __future__ import annotations - -import logging -import re -from collections.abc import Sequence -from typing import TYPE_CHECKING - -import torch - -from isaacsim.core.simulation_manager import SimulationManager -from pxr import UsdPhysics - -import isaaclab.sim as sim_utils -import isaaclab.utils.string as string_utils -from isaaclab.markers import VisualizationMarkers -from isaaclab.utils.math import ( - combine_frame_transforms, - convert_quat, - is_identity_pose, - normalize, - quat_from_angle_axis, - subtract_frame_transforms, -) - -from ..sensor_base import SensorBase -from .frame_transformer_data import FrameTransformerData - -if TYPE_CHECKING: - from .frame_transformer_cfg import FrameTransformerCfg - -# import logger -logger = logging.getLogger(__name__) - - -class FrameTransformer(SensorBase): - """A sensor for reporting frame transforms. - - This class provides an interface for reporting the transform of one or more frames (target frames) - with respect to another frame (source frame). The source frame is specified by the user as a prim path - (:attr:`FrameTransformerCfg.prim_path`) and the target frames are specified by the user as a list of - prim paths (:attr:`FrameTransformerCfg.target_frames`). - - The source frame and target frames are assumed to be rigid bodies. The transform of the target frames - with respect to the source frame is computed by first extracting the transform of the source frame - and target frames from the physics engine and then computing the relative transform between the two. - - Additionally, the user can specify an offset for the source frame and each target frame. This is useful - for specifying the transform of the desired frame with respect to the body's center of mass, for instance. - - A common example of using this sensor is to track the position and orientation of the end effector of a - robotic manipulator. In this case, the source frame would be the body corresponding to the base frame of the - manipulator, and the target frame would be the body corresponding to the end effector. Since the end-effector is - typically a fictitious body, the user may need to specify an offset from the end-effector to the body of the - manipulator. - - """ - - cfg: FrameTransformerCfg - """The configuration parameters.""" - - def __init__(self, cfg: FrameTransformerCfg): - """Initializes the frame transformer object. - - Args: - cfg: The configuration parameters. - """ - # initialize base class - super().__init__(cfg) - # Create empty variables for storing output data - self._data: FrameTransformerData = FrameTransformerData() - - def __str__(self) -> str: - """Returns: A string containing information about the instance.""" - return ( - f"FrameTransformer @ '{self.cfg.prim_path}': \n" - f"\ttracked body frames: {[self._source_frame_body_name] + self._target_frame_body_names} \n" - f"\tnumber of envs: {self._num_envs}\n" - f"\tsource body frame: {self._source_frame_body_name}\n" - f"\ttarget frames (count: {self._target_frame_names}): {len(self._target_frame_names)}\n" - ) - - """ - Properties - """ - - @property - def data(self) -> FrameTransformerData: - # update sensors if needed - self._update_outdated_buffers() - # return the data - return self._data - - @property - def num_bodies(self) -> int: - """Returns the number of target bodies being tracked. - - Note: - This is an alias used for consistency with other sensors. Otherwise, we recommend using - :attr:`len(data.target_frame_names)` to access the number of target frames. - """ - return len(self._target_frame_body_names) - - @property - def body_names(self) -> list[str]: - """Returns the names of the target bodies being tracked. - - Note: - This is an alias used for consistency with other sensors. Otherwise, we recommend using - :attr:`data.target_frame_names` to access the target frame names. - """ - return self._target_frame_body_names - - """ - Operations - """ - - def reset(self, env_ids: Sequence[int] | None = None): - # reset the timers and counters - super().reset(env_ids) - # resolve None - if env_ids is None: - env_ids = ... - - def find_bodies(self, name_keys: str | Sequence[str], preserve_order: bool = False) -> tuple[list[int], list[str]]: - """Find bodies in the articulation based on the name keys. - - Args: - name_keys: A regular expression or a list of regular expressions to match the body names. - preserve_order: Whether to preserve the order of the name keys in the output. Defaults to False. - - Returns: - A tuple of lists containing the body indices and names. - """ - return string_utils.resolve_matching_names(name_keys, self._target_frame_names, preserve_order) - - """ - Implementation. - """ - - def _initialize_impl(self): - super()._initialize_impl() - - # resolve source frame offset - source_frame_offset_pos = torch.tensor(self.cfg.source_frame_offset.pos, device=self.device) - source_frame_offset_quat = torch.tensor(self.cfg.source_frame_offset.rot, device=self.device) - # Only need to perform offsetting of source frame if the position offsets is non-zero and rotation offset is - # not the identity quaternion for efficiency in _update_buffer_impl - self._apply_source_frame_offset = True - # Handle source frame offsets - if is_identity_pose(source_frame_offset_pos, source_frame_offset_quat): - logger.debug(f"No offset application needed for source frame as it is identity: {self.cfg.prim_path}") - self._apply_source_frame_offset = False - else: - logger.debug(f"Applying offset to source frame as it is not identity: {self.cfg.prim_path}") - # Store offsets as tensors (duplicating each env's offsets for ease of multiplication later) - self._source_frame_offset_pos = source_frame_offset_pos.unsqueeze(0).repeat(self._num_envs, 1) - self._source_frame_offset_quat = source_frame_offset_quat.unsqueeze(0).repeat(self._num_envs, 1) - - # Keep track of mapping from the rigid body name to the desired frames and prim path, - # as there may be multiple frames based upon the same body name and we don't want to - # create unnecessary views. - body_names_to_frames: dict[str, dict[str, set[str] | str]] = {} - # The offsets associated with each target frame - target_offsets: dict[str, dict[str, torch.Tensor]] = {} - # The frames whose offsets are not identity - non_identity_offset_frames: list[str] = [] - - # Only need to perform offsetting of target frame if any of the position offsets are non-zero or any of the - # rotation offsets are not the identity quaternion for efficiency in _update_buffer_impl - self._apply_target_frame_offset = False - - # Need to keep track of whether the source frame is also a target frame - self._source_is_also_target_frame = False - - # Collect all target frames, their associated body prim paths and their offsets so that we can extract - # the prim, check that it has the appropriate rigid body API in a single loop. - # First element is None because user can't specify source frame name - frames = [None] + [target_frame.name for target_frame in self.cfg.target_frames] - frame_prim_paths = [self.cfg.prim_path] + [target_frame.prim_path for target_frame in self.cfg.target_frames] - # First element is None because source frame offset is handled separately - frame_offsets = [None] + [target_frame.offset for target_frame in self.cfg.target_frames] - frame_types = ["source"] + ["target"] * len(self.cfg.target_frames) - for frame, prim_path, offset, frame_type in zip(frames, frame_prim_paths, frame_offsets, frame_types): - # Find correct prim - matching_prims = sim_utils.find_matching_prims(prim_path) - if len(matching_prims) == 0: - raise ValueError( - f"Failed to create frame transformer for frame '{frame}' with path '{prim_path}'." - " No matching prims were found." - ) - for prim in matching_prims: - # Get the prim path of the matching prim - matching_prim_path = prim.GetPath().pathString - # Check if it is a rigid prim - if not prim.HasAPI(UsdPhysics.RigidBodyAPI): - raise ValueError( - f"While resolving expression '{prim_path}' found a prim '{matching_prim_path}' which is not a" - " rigid body. The class only supports transformations between rigid bodies." - ) - - # Get the name of the body: use relative prim path for unique identification - body_name = self._get_relative_body_path(matching_prim_path) - # Use leaf name of prim path if frame name isn't specified by user - frame_name = frame if frame is not None else matching_prim_path.rsplit("/", 1)[-1] - - # Keep track of which frames are associated with which bodies - if body_name in body_names_to_frames: - body_names_to_frames[body_name]["frames"].add(frame_name) - - # This is a corner case where the source frame is also a target frame - if body_names_to_frames[body_name]["type"] == "source" and frame_type == "target": - self._source_is_also_target_frame = True - - else: - # Store the first matching prim path and the type of frame - body_names_to_frames[body_name] = { - "frames": {frame_name}, - "prim_path": matching_prim_path, - "type": frame_type, - } - - if offset is not None: - offset_pos = torch.tensor(offset.pos, device=self.device) - offset_quat = torch.tensor(offset.rot, device=self.device) - # Check if we need to apply offsets (optimized code path in _update_buffer_impl) - if not is_identity_pose(offset_pos, offset_quat): - non_identity_offset_frames.append(frame_name) - self._apply_target_frame_offset = True - target_offsets[frame_name] = {"pos": offset_pos, "quat": offset_quat} - - if not self._apply_target_frame_offset: - logger.info( - f"No offsets application needed from '{self.cfg.prim_path}' to target frames as all" - f" are identity: {frames[1:]}" - ) - else: - logger.info( - f"Offsets application needed from '{self.cfg.prim_path}' to the following target frames:" - f" {non_identity_offset_frames}" - ) - - # The names of bodies that RigidPrim will be tracking to later extract transforms from - tracked_prim_paths = [body_names_to_frames[body_name]["prim_path"] for body_name in body_names_to_frames.keys()] - tracked_body_names = [body_name for body_name in body_names_to_frames.keys()] - - body_names_regex = [tracked_prim_path.replace("env_0", "env_*") for tracked_prim_path in tracked_prim_paths] - - # obtain global simulation view - self._physics_sim_view = SimulationManager.get_physics_sim_view() - # Create a prim view for all frames and initialize it - # order of transforms coming out of view will be source frame followed by target frame(s) - self._frame_physx_view = self._physics_sim_view.create_rigid_body_view(body_names_regex) - - # Determine the order in which regex evaluated body names so we can later index into frame transforms - # by frame name correctly - all_prim_paths = self._frame_physx_view.prim_paths - - if "env_" in all_prim_paths[0]: - - def extract_env_num_and_prim_path(item: str) -> tuple[int, str]: - """Separates the environment number and prim_path from the item. - - Args: - item: The item to extract the environment number from. Assumes item is of the form - `/World/envs/env_1/blah` or `/World/envs/env_11/blah`. - Returns: - The environment number and the prim_path. - """ - match = re.search(r"env_(\d+)(.*)", item) - return (int(match.group(1)), match.group(2)) - - # Find the indices that would reorganize output to be per environment. - # We want `env_1/blah` to come before `env_11/blah` and env_1/Robot/base - # to come before env_1/Robot/foot so we need to use custom key function - self._per_env_indices = [ - index - for index, _ in sorted( - list(enumerate(all_prim_paths)), key=lambda x: extract_env_num_and_prim_path(x[1]) - ) - ] - - # Only need 0th env as the names and their ordering are the same across environments - sorted_prim_paths = [ - all_prim_paths[index] for index in self._per_env_indices if "env_0" in all_prim_paths[index] - ] - - else: - # If no environment is present, then the order of the body names is the same as the order of the - # prim paths sorted alphabetically - self._per_env_indices = [index for index, _ in sorted(enumerate(all_prim_paths), key=lambda x: x[1])] - sorted_prim_paths = [all_prim_paths[index] for index in self._per_env_indices] - - # -- target frames: use relative prim path for unique identification - self._target_frame_body_names = [self._get_relative_body_path(prim_path) for prim_path in sorted_prim_paths] - - # -- source frame: use relative prim path for unique identification - self._source_frame_body_name = self._get_relative_body_path(self.cfg.prim_path) - source_frame_index = self._target_frame_body_names.index(self._source_frame_body_name) - - # Only remove source frame from tracked bodies if it is not also a target frame - if not self._source_is_also_target_frame: - self._target_frame_body_names.remove(self._source_frame_body_name) - - # Determine indices into all tracked body frames for both source and target frames - all_ids = torch.arange(self._num_envs * len(tracked_body_names)) - self._source_frame_body_ids = torch.arange(self._num_envs) * len(tracked_body_names) + source_frame_index - - # If source frame is also a target frame, then the target frame body ids are the same as - # the source frame body ids - if self._source_is_also_target_frame: - self._target_frame_body_ids = all_ids - else: - self._target_frame_body_ids = all_ids[~torch.isin(all_ids, self._source_frame_body_ids)] - - # The name of each of the target frame(s) - either user specified or defaulted to the body name - self._target_frame_names: list[str] = [] - # The position and rotation components of target frame offsets - target_frame_offset_pos = [] - target_frame_offset_quat = [] - # Stores the indices of bodies that need to be duplicated. For instance, if body "LF_SHANK" is needed - # for 2 frames, this list enables us to duplicate the body to both frames when doing the calculations - # when updating sensor in _update_buffers_impl - duplicate_frame_indices = [] - - # Go through each body name and determine the number of duplicates we need for that frame - # and extract the offsets. This is all done to handle the case where multiple frames - # reference the same body, but have different names and/or offsets - for i, body_name in enumerate(self._target_frame_body_names): - for frame in body_names_to_frames[body_name]["frames"]: - # Only need to handle target frames here as source frame is handled separately - if frame in target_offsets: - target_frame_offset_pos.append(target_offsets[frame]["pos"]) - target_frame_offset_quat.append(target_offsets[frame]["quat"]) - self._target_frame_names.append(frame) - duplicate_frame_indices.append(i) - - # To handle multiple environments, need to expand so [0, 1, 1, 2] with 2 environments becomes - # [0, 1, 1, 2, 3, 4, 4, 5]. Again, this is a optimization to make _update_buffer_impl more efficient - duplicate_frame_indices = torch.tensor(duplicate_frame_indices, device=self.device) - if self._source_is_also_target_frame: - num_target_body_frames = len(tracked_body_names) - else: - num_target_body_frames = len(tracked_body_names) - 1 - - self._duplicate_frame_indices = torch.cat( - [duplicate_frame_indices + num_target_body_frames * env_num for env_num in range(self._num_envs)] - ) - - # Target frame offsets are only applied if at least one of the offsets are non-identity - if self._apply_target_frame_offset: - # Stack up all the frame offsets for shape (num_envs, num_frames, 3) and (num_envs, num_frames, 4) - self._target_frame_offset_pos = torch.stack(target_frame_offset_pos).repeat(self._num_envs, 1) - self._target_frame_offset_quat = torch.stack(target_frame_offset_quat).repeat(self._num_envs, 1) - - # fill the data buffer - self._data.target_frame_names = self._target_frame_names - self._data.source_pos_w = torch.zeros(self._num_envs, 3, device=self._device) - self._data.source_quat_w = torch.zeros(self._num_envs, 4, device=self._device) - self._data.target_pos_w = torch.zeros(self._num_envs, len(duplicate_frame_indices), 3, device=self._device) - self._data.target_quat_w = torch.zeros(self._num_envs, len(duplicate_frame_indices), 4, device=self._device) - self._data.target_pos_source = torch.zeros_like(self._data.target_pos_w) - self._data.target_quat_source = torch.zeros_like(self._data.target_quat_w) - - def _update_buffers_impl(self, env_ids: Sequence[int]): - """Fills the buffers of the sensor data.""" - # default to all sensors - if len(env_ids) == self._num_envs: - env_ids = ... - - # Extract transforms from view - shape is: - # (the total number of source and target body frames being tracked * self._num_envs, 7) - transforms = self._frame_physx_view.get_transforms() - - # Reorder the transforms to be per environment as is expected of SensorData - transforms = transforms[self._per_env_indices] - - # Convert quaternions as PhysX uses xyzw form - transforms[:, 3:] = convert_quat(transforms[:, 3:], to="wxyz") - - # Process source frame transform - source_frames = transforms[self._source_frame_body_ids] - # Only apply offset if the offsets will result in a coordinate frame transform - if self._apply_source_frame_offset: - source_pos_w, source_quat_w = combine_frame_transforms( - source_frames[:, :3], - source_frames[:, 3:], - self._source_frame_offset_pos, - self._source_frame_offset_quat, - ) - else: - source_pos_w = source_frames[:, :3] - source_quat_w = source_frames[:, 3:] - - # Process target frame transforms - target_frames = transforms[self._target_frame_body_ids] - duplicated_target_frame_pos_w = target_frames[self._duplicate_frame_indices, :3] - duplicated_target_frame_quat_w = target_frames[self._duplicate_frame_indices, 3:] - - # Only apply offset if the offsets will result in a coordinate frame transform - if self._apply_target_frame_offset: - target_pos_w, target_quat_w = combine_frame_transforms( - duplicated_target_frame_pos_w, - duplicated_target_frame_quat_w, - self._target_frame_offset_pos, - self._target_frame_offset_quat, - ) - else: - target_pos_w = duplicated_target_frame_pos_w - target_quat_w = duplicated_target_frame_quat_w - - # Compute the transform of the target frame with respect to the source frame - total_num_frames = len(self._target_frame_names) - target_pos_source, target_quat_source = subtract_frame_transforms( - source_pos_w.unsqueeze(1).expand(-1, total_num_frames, -1).reshape(-1, 3), - source_quat_w.unsqueeze(1).expand(-1, total_num_frames, -1).reshape(-1, 4), - target_pos_w, - target_quat_w, - ) - - # Update buffers - # note: The frame names / ordering don't change so no need to update them after initialization - self._data.source_pos_w[:] = source_pos_w.view(-1, 3) - self._data.source_quat_w[:] = source_quat_w.view(-1, 4) - self._data.target_pos_w[:] = target_pos_w.view(-1, total_num_frames, 3) - self._data.target_quat_w[:] = target_quat_w.view(-1, total_num_frames, 4) - self._data.target_pos_source[:] = target_pos_source.view(-1, total_num_frames, 3) - self._data.target_quat_source[:] = target_quat_source.view(-1, total_num_frames, 4) - - def _set_debug_vis_impl(self, debug_vis: bool): - # set visibility of markers - # note: parent only deals with callbacks. not their visibility - if debug_vis: - if not hasattr(self, "frame_visualizer"): - self.frame_visualizer = VisualizationMarkers(self.cfg.visualizer_cfg) - - # set their visibility to true - self.frame_visualizer.set_visibility(True) - else: - if hasattr(self, "frame_visualizer"): - self.frame_visualizer.set_visibility(False) - - def _debug_vis_callback(self, event): - # Get the all frames pose - frames_pos = torch.cat([self._data.source_pos_w, self._data.target_pos_w.view(-1, 3)], dim=0) - frames_quat = torch.cat([self._data.source_quat_w, self._data.target_quat_w.view(-1, 4)], dim=0) - - # Get the all connecting lines between frames pose - lines_pos, lines_quat, lines_length = self._get_connecting_lines( - start_pos=self._data.source_pos_w.repeat_interleave(self._data.target_pos_w.size(1), dim=0), - end_pos=self._data.target_pos_w.view(-1, 3), - ) - - # Initialize default (identity) scales and marker indices for all markers (frames + lines) - marker_scales = torch.ones(frames_pos.size(0) + lines_pos.size(0), 3) - marker_indices = torch.zeros(marker_scales.size(0)) - - # Set the z-scale of line markers to represent their actual length - marker_scales[-lines_length.size(0) :, -1] = lines_length - - # Assign marker config index 1 to line markers - marker_indices[-lines_length.size(0) :] = 1 - - # Update the frame and the connecting line visualizer - self.frame_visualizer.visualize( - translations=torch.cat((frames_pos, lines_pos), dim=0), - orientations=torch.cat((frames_quat, lines_quat), dim=0), - scales=marker_scales, - marker_indices=marker_indices, - ) - - """ - Internal simulation callbacks. - """ - - def _invalidate_initialize_callback(self, event): - """Invalidates the scene elements.""" - # call parent - super()._invalidate_initialize_callback(event) - # set all existing views to None to invalidate them - self._frame_physx_view = None - - """ - Internal helpers. - """ - - def _get_connecting_lines( - self, start_pos: torch.Tensor, end_pos: torch.Tensor - ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]: - """Draws connecting lines between frames. - - Given start and end points, this function computes the positions (mid-point), orientations, - and lengths of the connecting lines. - - Args: - start_pos: The start positions of the connecting lines. Shape is (N, 3). - end_pos: The end positions of the connecting lines. Shape is (N, 3). - - Returns: - A tuple containing: - - The positions of each connecting line. Shape is (N, 3). - - The orientations of each connecting line in quaternion. Shape is (N, 4). - - The lengths of each connecting line. Shape is (N,). - """ - direction = end_pos - start_pos - lengths = torch.norm(direction, dim=-1) - positions = (start_pos + end_pos) / 2 - - # Get default direction (along z-axis) - default_direction = torch.tensor([0.0, 0.0, 1.0], device=self.device).expand(start_pos.size(0), -1) - - # Normalize direction vector - direction_norm = normalize(direction) - - # Calculate rotation from default direction to target direction - rotation_axis = torch.linalg.cross(default_direction, direction_norm) - rotation_axis_norm = torch.norm(rotation_axis, dim=-1) - - # Handle case where vectors are parallel - mask = rotation_axis_norm > 1e-6 - rotation_axis = torch.where( - mask.unsqueeze(-1), - normalize(rotation_axis), - torch.tensor([1.0, 0.0, 0.0], device=self.device).expand(start_pos.size(0), -1), - ) - - # Calculate rotation angle - cos_angle = torch.sum(default_direction * direction_norm, dim=-1) - cos_angle = torch.clamp(cos_angle, -1.0, 1.0) - angle = torch.acos(cos_angle) - orientations = quat_from_angle_axis(angle, rotation_axis) - - return positions, orientations, lengths - - @staticmethod - def _get_relative_body_path(prim_path: str) -> str: - """Extract a normalized body path from a prim path. - - Removes the environment instance segment `/envs/env_/` to normalize paths - across multiple environments, while preserving the `/envs/` prefix to - distinguish environment-scoped paths from non-environment paths. - - Examples: - - '/World/envs/env_0/Robot/torso' -> '/World/envs/Robot/torso' - - '/World/envs/env_123/Robot/left_hand' -> '/World/envs/Robot/left_hand' - - '/World/Robot' -> '/World/Robot' - - '/World/Robot_2/left_hand' -> '/World/Robot_2/left_hand' - - Args: - prim_path: The full prim path. - - Returns: - The prim path with `/envs/env_/` removed, preserving `/envs/`. - """ - pattern = re.compile(r"/envs/env_[^/]+/") - return pattern.sub("/envs/", prim_path) diff --git a/source/isaaclab_physx/isaaclab_physx/sensors/frame_transformer/frame_transformer_cfg.py b/source/isaaclab_physx/isaaclab_physx/sensors/frame_transformer/frame_transformer_cfg.py deleted file mode 100644 index ca9b528aa1d..00000000000 --- a/source/isaaclab_physx/isaaclab_physx/sensors/frame_transformer/frame_transformer_cfg.py +++ /dev/null @@ -1,76 +0,0 @@ -# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). -# All rights reserved. -# -# SPDX-License-Identifier: BSD-3-Clause - -from dataclasses import MISSING - -from isaaclab.markers.config import FRAME_MARKER_CFG, VisualizationMarkersCfg -from isaaclab.utils import configclass - -from ..sensor_base_cfg import SensorBaseCfg -from .frame_transformer import FrameTransformer - - -@configclass -class OffsetCfg: - """The offset pose of one frame relative to another frame.""" - - pos: tuple[float, float, float] = (0.0, 0.0, 0.0) - """Translation w.r.t. the parent frame. Defaults to (0.0, 0.0, 0.0).""" - rot: tuple[float, float, float, float] = (1.0, 0.0, 0.0, 0.0) - """Quaternion rotation (w, x, y, z) w.r.t. the parent frame. Defaults to (1.0, 0.0, 0.0, 0.0).""" - - -@configclass -class FrameTransformerCfg(SensorBaseCfg): - """Configuration for the frame transformer sensor.""" - - @configclass - class FrameCfg: - """Information specific to a coordinate frame.""" - - prim_path: str = MISSING - """The prim path corresponding to a rigid body. - - This can be a regex pattern to match multiple prims. For example, "/Robot/.*" - will match all prims under "/Robot". - - This means that if the source :attr:`FrameTransformerCfg.prim_path` is "/Robot/base", - and the target :attr:`FrameTransformerCfg.FrameCfg.prim_path` is "/Robot/.*", then - the frame transformer will track the poses of all the prims under "/Robot", - including "/Robot/base" (even though this will result in an identity pose w.r.t. - the source frame). - """ - - name: str | None = None - """User-defined name for the new coordinate frame. Defaults to None. - - If None, then the name is extracted from the leaf of the prim path. - """ - - offset: OffsetCfg = OffsetCfg() - """The pose offset from the parent prim frame.""" - - class_type: type = FrameTransformer - - prim_path: str = MISSING - """The prim path of the body to transform from (source frame).""" - - source_frame_offset: OffsetCfg = OffsetCfg() - """The pose offset from the source prim frame.""" - - target_frames: list[FrameCfg] = MISSING - """A list of the target frames. - - This allows a single FrameTransformer to handle multiple target prims. For example, in a quadruped, - we can use a single FrameTransformer to track each foot's position and orientation in the body - frame using four frame offsets. - """ - - visualizer_cfg: VisualizationMarkersCfg = FRAME_MARKER_CFG.replace(prim_path="/Visuals/FrameTransformer") - """The configuration object for the visualization markers. Defaults to FRAME_MARKER_CFG. - - Note: - This attribute is only used when debug visualization is enabled. - """ diff --git a/source/isaaclab_physx/isaaclab_physx/sensors/frame_transformer/frame_transformer_data.py b/source/isaaclab_physx/isaaclab_physx/sensors/frame_transformer/frame_transformer_data.py deleted file mode 100644 index 942ddbd5144..00000000000 --- a/source/isaaclab_physx/isaaclab_physx/sensors/frame_transformer/frame_transformer_data.py +++ /dev/null @@ -1,57 +0,0 @@ -# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). -# All rights reserved. -# -# SPDX-License-Identifier: BSD-3-Clause - -from dataclasses import dataclass - -import torch - - -@dataclass -class FrameTransformerData: - """Data container for the frame transformer sensor.""" - - target_frame_names: list[str] = None - """Target frame names (this denotes the order in which that frame data is ordered). - - The frame names are resolved from the :attr:`FrameTransformerCfg.FrameCfg.name` field. - This does not necessarily follow the order in which the frames are defined in the config due to - the regex matching. - """ - - target_pos_source: torch.Tensor = None - """Position of the target frame(s) relative to source frame. - - Shape is (N, M, 3), where N is the number of environments, and M is the number of target frames. - """ - - target_quat_source: torch.Tensor = None - """Orientation of the target frame(s) relative to source frame quaternion (w, x, y, z). - - Shape is (N, M, 4), where N is the number of environments, and M is the number of target frames. - """ - - target_pos_w: torch.Tensor = None - """Position of the target frame(s) after offset (in world frame). - - Shape is (N, M, 3), where N is the number of environments, and M is the number of target frames. - """ - - target_quat_w: torch.Tensor = None - """Orientation of the target frame(s) after offset (in world frame) quaternion (w, x, y, z). - - Shape is (N, M, 4), where N is the number of environments, and M is the number of target frames. - """ - - source_pos_w: torch.Tensor = None - """Position of the source frame after offset (in world frame). - - Shape is (N, 3), where N is the number of environments. - """ - - source_quat_w: torch.Tensor = None - """Orientation of the source frame after offset (in world frame) quaternion (w, x, y, z). - - Shape is (N, 4), where N is the number of environments. - """ diff --git a/source/isaaclab_physx/isaaclab_physx/sensors/imu/__init__.py b/source/isaaclab_physx/isaaclab_physx/sensors/imu/__init__.py deleted file mode 100644 index 7501e41cf49..00000000000 --- a/source/isaaclab_physx/isaaclab_physx/sensors/imu/__init__.py +++ /dev/null @@ -1,12 +0,0 @@ -# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). -# All rights reserved. -# -# SPDX-License-Identifier: BSD-3-Clause - -""" -Imu Sensor -""" - -from .imu import Imu -from .imu_cfg import ImuCfg -from .imu_data import ImuData diff --git a/source/isaaclab_physx/isaaclab_physx/sensors/imu/imu.py b/source/isaaclab_physx/isaaclab_physx/sensors/imu/imu.py deleted file mode 100644 index e092b39502e..00000000000 --- a/source/isaaclab_physx/isaaclab_physx/sensors/imu/imu.py +++ /dev/null @@ -1,299 +0,0 @@ -# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). -# All rights reserved. -# -# SPDX-License-Identifier: BSD-3-Clause - -from __future__ import annotations - -from collections.abc import Sequence -from typing import TYPE_CHECKING - -import torch - -from isaacsim.core.simulation_manager import SimulationManager -from pxr import UsdGeom, UsdPhysics - -import isaaclab.sim as sim_utils -import isaaclab.utils.math as math_utils -from isaaclab.markers import VisualizationMarkers - -from ..sensor_base import SensorBase -from .imu_data import ImuData - -if TYPE_CHECKING: - from .imu_cfg import ImuCfg - - -class Imu(SensorBase): - """The Inertia Measurement Unit (IMU) sensor. - - The sensor can be attached to any prim path with a rigid ancestor in its tree and produces body-frame - linear acceleration and angular velocity, along with world-frame pose and body-frame linear and angular - accelerations/velocities. - - If the provided path is not a rigid body, the closest rigid-body ancestor is used for simulation queries. - The fixed transform from that ancestor to the target prim is computed once during initialization and - composed with the configured sensor offset. - - .. note:: - - We are computing the accelerations using numerical differentiation from the velocities. Consequently, the - IMU sensor accuracy depends on the chosen phsyx timestep. For a sufficient accuracy, we recommend to keep the - timestep at least as 200Hz. - - .. note:: - - The user can configure the sensor offset in the configuration file. The offset is applied relative to the - rigid source prim. If the target prim is not a rigid body, the offset is composed with the fixed transform - from the rigid ancestor to the target prim. The offset is applied in the body frame of the rigid source prim. - The offset is defined as a position vector and a quaternion rotation, which - are applied in the order: position, then rotation. The position is applied as a translation - in the body frame of the rigid source prim, and the rotation is applied as a rotation - in the body frame of the rigid source prim. - - """ - - cfg: ImuCfg - """The configuration parameters.""" - - def __init__(self, cfg: ImuCfg): - """Initializes the Imu sensor. - - Args: - cfg: The configuration parameters. - """ - # initialize base class - super().__init__(cfg) - # Create empty variables for storing output data - self._data = ImuData() - - # Internal: expression used to build the rigid body view (may be different from cfg.prim_path) - self._rigid_parent_expr: str | None = None - - def __str__(self) -> str: - """Returns: A string containing information about the instance.""" - return ( - f"Imu sensor @ '{self.cfg.prim_path}': \n" - f"\tview type : {self._view.__class__}\n" - f"\tupdate period (s) : {self.cfg.update_period}\n" - f"\tnumber of sensors : {self._view.count}\n" - ) - - """ - Properties - """ - - @property - def data(self) -> ImuData: - # update sensors if needed - self._update_outdated_buffers() - # return the data - return self._data - - @property - def num_instances(self) -> int: - return self._view.count - - """ - Operations - """ - - def reset(self, env_ids: Sequence[int] | None = None): - # reset the timestamps - super().reset(env_ids) - # resolve None - if env_ids is None: - env_ids = slice(None) - # reset accumulative data buffers - self._data.pos_w[env_ids] = 0.0 - self._data.quat_w[env_ids] = 0.0 - self._data.quat_w[env_ids, 0] = 1.0 - self._data.projected_gravity_b[env_ids] = 0.0 - self._data.projected_gravity_b[env_ids, 2] = -1.0 - self._data.lin_vel_b[env_ids] = 0.0 - self._data.ang_vel_b[env_ids] = 0.0 - self._data.lin_acc_b[env_ids] = 0.0 - self._data.ang_acc_b[env_ids] = 0.0 - self._prev_lin_vel_w[env_ids] = 0.0 - self._prev_ang_vel_w[env_ids] = 0.0 - - def update(self, dt: float, force_recompute: bool = False): - # save timestamp - self._dt = dt - # execute updating - super().update(dt, force_recompute) - - """ - Implementation. - """ - - def _initialize_impl(self): - """Initializes the sensor handles and internal buffers. - - - If the target prim path is a rigid body, build the view directly on it. - - Otherwise find the closest rigid-body ancestor, cache the fixed transform from that ancestor - to the target prim, and build the view on the ancestor expression. - """ - # Initialize parent class - super()._initialize_impl() - # obtain global simulation view - self._physics_sim_view = SimulationManager.get_physics_sim_view() - # check if the prim at path is a rigid prim - prim = sim_utils.find_first_matching_prim(self.cfg.prim_path) - if prim is None: - raise RuntimeError(f"Failed to find a prim at path expression: {self.cfg.prim_path}") - - # Find the first matching ancestor prim that implements rigid body API - ancestor_prim = sim_utils.get_first_matching_ancestor_prim( - prim.GetPath(), predicate=lambda _prim: _prim.HasAPI(UsdPhysics.RigidBodyAPI) - ) - if ancestor_prim is None: - raise RuntimeError(f"Failed to find a rigid body ancestor prim at path expression: {self.cfg.prim_path}") - # Convert ancestor prim path to expression - if ancestor_prim == prim: - self._rigid_parent_expr = self.cfg.prim_path - fixed_pos_b, fixed_quat_b = None, None - else: - # Convert ancestor prim path to expression - relative_path = prim.GetPath().MakeRelativePath(ancestor_prim.GetPath()).pathString - self._rigid_parent_expr = self.cfg.prim_path.replace(relative_path, "") - # Resolve the relative pose between the target prim and the ancestor prim - fixed_pos_b, fixed_quat_b = sim_utils.resolve_prim_pose(prim, ancestor_prim) - - # Create the rigid body view on the ancestor - self._view = self._physics_sim_view.create_rigid_body_view(self._rigid_parent_expr.replace(".*", "*")) - - # Get world gravity - gravity = self._physics_sim_view.get_gravity() - gravity_dir = torch.tensor((gravity[0], gravity[1], gravity[2]), device=self.device) - gravity_dir = math_utils.normalize(gravity_dir.unsqueeze(0)).squeeze(0) - self.GRAVITY_VEC_W = gravity_dir.repeat(self.num_instances, 1) - - # Create internal buffers - self._initialize_buffers_impl() - - # Compose the configured offset with the fixed ancestor->target transform (done once) - # new_offset = fixed * cfg.offset - # where composition is: p = p_fixed + R_fixed * p_cfg, q = q_fixed * q_cfg - if fixed_pos_b is not None and fixed_quat_b is not None: - # Broadcast fixed transform across instances - fixed_p = torch.tensor(fixed_pos_b, device=self._device).repeat(self._view.count, 1) - fixed_q = torch.tensor(fixed_quat_b, device=self._device).repeat(self._view.count, 1) - - cfg_p = self._offset_pos_b.clone() - cfg_q = self._offset_quat_b.clone() - - composed_p = fixed_p + math_utils.quat_apply(fixed_q, cfg_p) - composed_q = math_utils.quat_mul(fixed_q, cfg_q) - - self._offset_pos_b = composed_p - self._offset_quat_b = composed_q - - def _update_buffers_impl(self, env_ids: Sequence[int]): - """Fills the buffers of the sensor data.""" - - # default to all sensors - if len(env_ids) == self._num_envs: - env_ids = slice(None) - # world pose of the rigid source (ancestor) from the PhysX view - pos_w, quat_w = self._view.get_transforms()[env_ids].split([3, 4], dim=-1) - quat_w = quat_w.roll(1, dims=-1) - - # sensor pose in world: apply composed offset - self._data.pos_w[env_ids] = pos_w + math_utils.quat_apply(quat_w, self._offset_pos_b[env_ids]) - self._data.quat_w[env_ids] = math_utils.quat_mul(quat_w, self._offset_quat_b[env_ids]) - - # COM of rigid source (body frame) - com_pos_b = self._view.get_coms().to(self.device).split([3, 4], dim=-1)[0] - - # Velocities at rigid source COM - lin_vel_w, ang_vel_w = self._view.get_velocities()[env_ids].split([3, 3], dim=-1) - - # If sensor offset or COM != link origin, account for angular velocity contribution - lin_vel_w += torch.linalg.cross( - ang_vel_w, math_utils.quat_apply(quat_w, self._offset_pos_b[env_ids] - com_pos_b[env_ids]), dim=-1 - ) - - # numerical derivative (world frame) - lin_acc_w = (lin_vel_w - self._prev_lin_vel_w[env_ids]) / self._dt + self._gravity_bias_w[env_ids] - ang_acc_w = (ang_vel_w - self._prev_ang_vel_w[env_ids]) / self._dt - - # batch rotate world->body using current sensor orientation - dynamics_data = torch.stack((lin_vel_w, ang_vel_w, lin_acc_w, ang_acc_w, self.GRAVITY_VEC_W[env_ids]), dim=0) - dynamics_data_rot = math_utils.quat_apply_inverse(self._data.quat_w[env_ids].repeat(5, 1), dynamics_data).chunk( - 5, dim=0 - ) - # store the velocities. - self._data.lin_vel_b[env_ids] = dynamics_data_rot[0] - self._data.ang_vel_b[env_ids] = dynamics_data_rot[1] - # store the accelerations - self._data.lin_acc_b[env_ids] = dynamics_data_rot[2] - self._data.ang_acc_b[env_ids] = dynamics_data_rot[3] - # store projected gravity - self._data.projected_gravity_b[env_ids] = dynamics_data_rot[4] - - self._prev_lin_vel_w[env_ids] = lin_vel_w - self._prev_ang_vel_w[env_ids] = ang_vel_w - - def _initialize_buffers_impl(self): - """Create buffers for storing data.""" - # data buffers - self._data.pos_w = torch.zeros(self._view.count, 3, device=self._device) - self._data.quat_w = torch.zeros(self._view.count, 4, device=self._device) - self._data.quat_w[:, 0] = 1.0 - self._data.projected_gravity_b = torch.zeros(self._view.count, 3, device=self._device) - self._data.lin_vel_b = torch.zeros_like(self._data.pos_w) - self._data.ang_vel_b = torch.zeros_like(self._data.pos_w) - self._data.lin_acc_b = torch.zeros_like(self._data.pos_w) - self._data.ang_acc_b = torch.zeros_like(self._data.pos_w) - self._prev_lin_vel_w = torch.zeros_like(self._data.pos_w) - self._prev_ang_vel_w = torch.zeros_like(self._data.pos_w) - - # store sensor offset (applied relative to rigid source). - # This may be composed later with a fixed ancestor->target transform. - self._offset_pos_b = torch.tensor(list(self.cfg.offset.pos), device=self._device).repeat(self._view.count, 1) - self._offset_quat_b = torch.tensor(list(self.cfg.offset.rot), device=self._device).repeat(self._view.count, 1) - # set gravity bias - self._gravity_bias_w = torch.tensor(list(self.cfg.gravity_bias), device=self._device).repeat( - self._view.count, 1 - ) - - def _set_debug_vis_impl(self, debug_vis: bool): - # set visibility of markers - # note: parent only deals with callbacks. not their visibility - if debug_vis: - # create markers if necessary for the first time - if not hasattr(self, "acceleration_visualizer"): - self.acceleration_visualizer = VisualizationMarkers(self.cfg.visualizer_cfg) - # set their visibility to true - self.acceleration_visualizer.set_visibility(True) - else: - if hasattr(self, "acceleration_visualizer"): - self.acceleration_visualizer.set_visibility(False) - - def _debug_vis_callback(self, event): - # safely return if view becomes invalid - # note: this invalidity happens because of isaac sim view callbacks - if self._view is None: - return - # get marker location - # -- base state - base_pos_w = self._data.pos_w.clone() - base_pos_w[:, 2] += 0.5 - # -- resolve the scales - default_scale = self.acceleration_visualizer.cfg.markers["arrow"].scale - arrow_scale = torch.tensor(default_scale, device=self.device).repeat(self._data.lin_acc_b.shape[0], 1) - # get up axis of current stage - up_axis = UsdGeom.GetStageUpAxis(self.stage) - # arrow-direction - quat_opengl = math_utils.quat_from_matrix( - math_utils.create_rotation_matrix_from_view( - self._data.pos_w, - self._data.pos_w + math_utils.quat_apply(self._data.quat_w, self._data.lin_acc_b), - up_axis=up_axis, - device=self._device, - ) - ) - quat_w = math_utils.convert_camera_frame_orientation_convention(quat_opengl, "opengl", "world") - # display markers - self.acceleration_visualizer.visualize(base_pos_w, quat_w, arrow_scale) diff --git a/source/isaaclab_physx/isaaclab_physx/sensors/imu/imu_cfg.py b/source/isaaclab_physx/isaaclab_physx/sensors/imu/imu_cfg.py deleted file mode 100644 index 06aeca5fa95..00000000000 --- a/source/isaaclab_physx/isaaclab_physx/sensors/imu/imu_cfg.py +++ /dev/null @@ -1,46 +0,0 @@ -# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). -# All rights reserved. -# -# SPDX-License-Identifier: BSD-3-Clause - -from __future__ import annotations - -from isaaclab.markers import VisualizationMarkersCfg -from isaaclab.markers.config import RED_ARROW_X_MARKER_CFG -from isaaclab.utils import configclass - -from ..sensor_base_cfg import SensorBaseCfg -from .imu import Imu - - -@configclass -class ImuCfg(SensorBaseCfg): - """Configuration for an Inertial Measurement Unit (IMU) sensor.""" - - class_type: type = Imu - - @configclass - class OffsetCfg: - """The offset pose of the sensor's frame from the sensor's parent frame.""" - - pos: tuple[float, float, float] = (0.0, 0.0, 0.0) - """Translation w.r.t. the parent frame. Defaults to (0.0, 0.0, 0.0).""" - - rot: tuple[float, float, float, float] = (1.0, 0.0, 0.0, 0.0) - """Quaternion rotation (w, x, y, z) w.r.t. the parent frame. Defaults to (1.0, 0.0, 0.0, 0.0).""" - - offset: OffsetCfg = OffsetCfg() - """The offset pose of the sensor's frame from the sensor's parent frame. Defaults to identity.""" - - visualizer_cfg: VisualizationMarkersCfg = RED_ARROW_X_MARKER_CFG.replace(prim_path="/Visuals/Command/velocity_goal") - """The configuration object for the visualization markers. Defaults to RED_ARROW_X_MARKER_CFG. - - This attribute is only used when debug visualization is enabled. - """ - gravity_bias: tuple[float, float, float] = (0.0, 0.0, 9.81) - """The linear acceleration bias applied to the linear acceleration in the world frame (x,y,z). - - Imu sensors typically output a positive gravity acceleration in opposition to the direction of gravity. This - config parameter allows users to subtract that bias if set to (0.,0.,0.). By default this is set to (0.0,0.0,9.81) - which results in a positive acceleration reading in the world Z. - """ diff --git a/source/isaaclab_physx/isaaclab_physx/sensors/imu/imu_data.py b/source/isaaclab_physx/isaaclab_physx/sensors/imu/imu_data.py deleted file mode 100644 index dd06e09a8b7..00000000000 --- a/source/isaaclab_physx/isaaclab_physx/sensors/imu/imu_data.py +++ /dev/null @@ -1,57 +0,0 @@ -# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). -# All rights reserved. -# -# SPDX-License-Identifier: BSD-3-Clause - -from __future__ import annotations - -from dataclasses import dataclass - -import torch - - -@dataclass -class ImuData: - """Data container for the Imu sensor.""" - - pos_w: torch.Tensor = None - """Position of the sensor origin in world frame. - - Shape is (N, 3), where ``N`` is the number of environments. - """ - - quat_w: torch.Tensor = None - """Orientation of the sensor origin in quaternion ``(w, x, y, z)`` in world frame. - - Shape is (N, 4), where ``N`` is the number of environments. - """ - - projected_gravity_b: torch.Tensor = None - """Gravity direction unit vector projected on the imu frame. - - Shape is (N,3), where ``N`` is the number of environments. - """ - - lin_vel_b: torch.Tensor = None - """IMU frame angular velocity relative to the world expressed in IMU frame. - - Shape is (N, 3), where ``N`` is the number of environments. - """ - - ang_vel_b: torch.Tensor = None - """IMU frame angular velocity relative to the world expressed in IMU frame. - - Shape is (N, 3), where ``N`` is the number of environments. - """ - - lin_acc_b: torch.Tensor = None - """IMU frame linear acceleration relative to the world expressed in IMU frame. - - Shape is (N, 3), where ``N`` is the number of environments. - """ - - ang_acc_b: torch.Tensor = None - """IMU frame angular acceleration relative to the world expressed in IMU frame. - - Shape is (N, 3), where ``N`` is the number of environments. - """ diff --git a/source/isaaclab_physx/isaaclab_physx/sensors/ray_caster/__init__.py b/source/isaaclab_physx/isaaclab_physx/sensors/ray_caster/__init__.py deleted file mode 100644 index 06f479ed2ee..00000000000 --- a/source/isaaclab_physx/isaaclab_physx/sensors/ray_caster/__init__.py +++ /dev/null @@ -1,29 +0,0 @@ -# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). -# All rights reserved. -# -# SPDX-License-Identifier: BSD-3-Clause - -"""Sub-module for Warp-based ray-cast sensor. - -The sub-module contains two implementations of the ray-cast sensor: - -- :class:`isaaclab.sensors.ray_caster.RayCaster`: A basic ray-cast sensor that can be used to ray-cast against a single mesh. -- :class:`isaaclab.sensors.ray_caster.MultiMeshRayCaster`: A multi-mesh ray-cast sensor that can be used to ray-cast against - multiple meshes. For these meshes, it tracks their transformations and updates the warp meshes accordingly. - -Corresponding camera implementations are also provided for each of the sensor implementations. Internally, they perform -the same ray-casting operations as the sensor implementations, but return the results as images. -""" - -from . import patterns -from .multi_mesh_ray_caster import MultiMeshRayCaster -from .multi_mesh_ray_caster_camera import MultiMeshRayCasterCamera -from .multi_mesh_ray_caster_camera_cfg import MultiMeshRayCasterCameraCfg -from .multi_mesh_ray_caster_camera_data import MultiMeshRayCasterCameraData -from .multi_mesh_ray_caster_cfg import MultiMeshRayCasterCfg -from .multi_mesh_ray_caster_data import MultiMeshRayCasterData -from .ray_caster import RayCaster -from .ray_caster_camera import RayCasterCamera -from .ray_caster_camera_cfg import RayCasterCameraCfg -from .ray_caster_cfg import RayCasterCfg -from .ray_caster_data import RayCasterData diff --git a/source/isaaclab_physx/isaaclab_physx/sensors/ray_caster/multi_mesh_ray_caster.py b/source/isaaclab_physx/isaaclab_physx/sensors/ray_caster/multi_mesh_ray_caster.py deleted file mode 100644 index 39be0d7ca0d..00000000000 --- a/source/isaaclab_physx/isaaclab_physx/sensors/ray_caster/multi_mesh_ray_caster.py +++ /dev/null @@ -1,427 +0,0 @@ -# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). -# All rights reserved. -# -# SPDX-License-Identifier: BSD-3-Clause - -from __future__ import annotations - -import logging -import re -from collections.abc import Sequence -from typing import TYPE_CHECKING, ClassVar - -import numpy as np -import torch -import trimesh -import warp as wp - -import omni.physics.tensors.impl.api as physx - -import isaaclab.sim as sim_utils -from isaaclab.sim.views import XformPrimView -from isaaclab.utils.math import matrix_from_quat, quat_mul -from isaaclab.utils.mesh import PRIMITIVE_MESH_TYPES, create_trimesh_from_geom_mesh, create_trimesh_from_geom_shape -from isaaclab.utils.warp import convert_to_warp_mesh, raycast_dynamic_meshes - -from .multi_mesh_ray_caster_data import MultiMeshRayCasterData -from .ray_cast_utils import obtain_world_pose_from_view -from .ray_caster import RayCaster - -if TYPE_CHECKING: - from .multi_mesh_ray_caster_cfg import MultiMeshRayCasterCfg - -# import logger -logger = logging.getLogger(__name__) - - -class MultiMeshRayCaster(RayCaster): - """A multi-mesh ray-casting sensor. - - The ray-caster uses a set of rays to detect collisions with meshes in the scene. The rays are - defined in the sensor's local coordinate frame. The sensor can be configured to ray-cast against - a set of meshes with a given ray pattern. - - The meshes are parsed from the list of primitive paths provided in the configuration. These are then - converted to warp meshes and stored in the :attr:`meshes` list. The ray-caster then ray-casts against - these warp meshes using the ray pattern provided in the configuration. - - Compared to the default RayCaster, the MultiMeshRayCaster provides additional functionality and flexibility as - an extension of the default RayCaster with the following enhancements: - - - Raycasting against multiple target types : Supports primitive shapes (spheres, cubes, etc.) as well as arbitrary - meshes. - - Dynamic mesh tracking : Keeps track of specified meshes, enabling raycasting against moving parts - (e.g., robot links, articulated bodies, or dynamic obstacles). - - Memory-efficient caching : Avoids redundant memory usage by reusing mesh data across environments. - - Example usage to raycast against the visual meshes of a robot (e.g. ANYmal): - - .. code-block:: python - - ray_caster_cfg = MultiMeshRayCasterCfg( - prim_path="{ENV_REGEX_NS}/Robot", - mesh_prim_paths=[ - "/World/Ground", - MultiMeshRayCasterCfg.RaycastTargetCfg(prim_expr="{ENV_REGEX_NS}/Robot/LF_.*/visuals"), - MultiMeshRayCasterCfg.RaycastTargetCfg(prim_expr="{ENV_REGEX_NS}/Robot/RF_.*/visuals"), - MultiMeshRayCasterCfg.RaycastTargetCfg(prim_expr="{ENV_REGEX_NS}/Robot/LH_.*/visuals"), - MultiMeshRayCasterCfg.RaycastTargetCfg(prim_expr="{ENV_REGEX_NS}/Robot/RH_.*/visuals"), - MultiMeshRayCasterCfg.RaycastTargetCfg(prim_expr="{ENV_REGEX_NS}/Robot/base/visuals"), - ], - ray_alignment="world", - pattern_cfg=patterns.GridPatternCfg(resolution=0.02, size=(2.5, 2.5), direction=(0, 0, -1)), - ) - - """ - - cfg: MultiMeshRayCasterCfg - """The configuration parameters.""" - - mesh_offsets: dict[str, tuple[torch.Tensor, torch.Tensor]] = {} - - mesh_views: ClassVar[dict[str, XformPrimView | physx.ArticulationView | physx.RigidBodyView]] = {} - """A dictionary to store mesh views for raycasting, shared across all instances. - - The keys correspond to the prim path for the mesh views, and values are the corresponding view objects. - """ - - def __init__(self, cfg: MultiMeshRayCasterCfg): - """Initializes the ray-caster object. - - Args: - cfg: The configuration parameters. - """ - # Initialize base class - super().__init__(cfg) - - # Create empty variables for storing output data - self._num_meshes_per_env: dict[str, int] = {} - """Keeps track of the number of meshes per env for each ray_cast target. - Since we allow regex indexing (e.g. env_*/object_*) they can differ - """ - - self._raycast_targets_cfg: list[MultiMeshRayCasterCfg.RaycastTargetCfg] = [] - for target in self.cfg.mesh_prim_paths: - # Legacy support for string targets. Treat them as global targets. - if isinstance(target, str): - self._raycast_targets_cfg.append(cfg.RaycastTargetCfg(prim_expr=target, track_mesh_transforms=False)) - else: - self._raycast_targets_cfg.append(target) - - # Resolve regex namespace if set - for cfg in self._raycast_targets_cfg: - cfg.prim_expr = cfg.prim_expr.format(ENV_REGEX_NS="/World/envs/env_.*") - - # overwrite the data class - self._data = MultiMeshRayCasterData() - - def __str__(self) -> str: - """Returns: A string containing information about the instance.""" - - return ( - f"Ray-caster @ '{self.cfg.prim_path}': \n" - f"\tview type : {self._view.__class__}\n" - f"\tupdate period (s) : {self.cfg.update_period}\n" - f"\tnumber of meshes : {self._num_envs} x {sum(self._num_meshes_per_env.values())} \n" - f"\tnumber of sensors : {self._view.count}\n" - f"\tnumber of rays/sensor: {self.num_rays}\n" - f"\ttotal number of rays : {self.num_rays * self._view.count}" - ) - - """ - Properties - """ - - @property - def data(self) -> MultiMeshRayCasterData: - # update sensors if needed - self._update_outdated_buffers() - # return the data - return self._data - - """ - Implementation. - """ - - def _initialize_warp_meshes(self): - """Parse mesh prim expressions, build (or reuse) Warp meshes, and cache per-env mesh IDs. - - High-level steps (per target expression): - - 1. Resolve matching prims by regex/path expression. - 2. Collect supported mesh child prims; merge into a single mesh if configured. - 3. Deduplicate identical vertex buffers (exact match) to avoid uploading duplicates to Warp. - 4. Partition mesh IDs per environment or mark as globally shared. - 5. Optionally create physics views (articulation / rigid body / fallback XForm) and cache local offsets. - - Exceptions: - Raises a RuntimeError if: - - - No prims match the provided expression. - - No supported mesh prims are found under a matched prim. - - Multiple mesh prims are found but merging is disabled. - - """ - multi_mesh_ids: dict[str, list[list[int]]] = {} - for target_cfg in self._raycast_targets_cfg: - # target prim path to ray cast against - target_prim_path = target_cfg.prim_expr - # # check if mesh already casted into warp mesh and skip if so. - if target_prim_path in multi_mesh_ids: - logger.warning( - f"Mesh at target prim path '{target_prim_path}' already exists in the mesh cache. Duplicate entries" - " in `mesh_prim_paths`? This mesh will be skipped." - ) - continue - - # find all matching prim paths to provided expression of the target - target_prims = sim_utils.find_matching_prims(target_prim_path) - if len(target_prims) == 0: - raise RuntimeError(f"Failed to find a prim at path expression: {target_prim_path}") - - # If only one prim is found, treat it as a global prim. - # Either it's a single global object (e.g. ground) or we are only using one env. - is_global_prim = len(target_prims) == 1 - - loaded_vertices: list[np.ndarray | None] = [] - wp_mesh_ids = [] - - for target_prim in target_prims: - # Reuse previously parsed shared mesh instance if possible. - if target_cfg.is_shared and len(wp_mesh_ids) > 0: - # Verify if this mesh has already been registered in an earlier environment. - # Note, this check may fail, if the prim path is not following the env_.* pattern - # Which (worst case) leads to parsing the mesh and skipping registering it at a later stage - curr_prim_base_path = re.sub(r"env_\d+", "env_0", str(target_prim.GetPath())) # - if curr_prim_base_path in MultiMeshRayCaster.meshes: - MultiMeshRayCaster.meshes[str(target_prim.GetPath())] = MultiMeshRayCaster.meshes[ - curr_prim_base_path - ] - # Reuse mesh imported by another ray-cast sensor (global cache). - if str(target_prim.GetPath()) in MultiMeshRayCaster.meshes: - wp_mesh_ids.append(MultiMeshRayCaster.meshes[str(target_prim.GetPath())].id) - loaded_vertices.append(None) - continue - - mesh_prims = sim_utils.get_all_matching_child_prims( - target_prim.GetPath(), lambda prim: prim.GetTypeName() in PRIMITIVE_MESH_TYPES + ["Mesh"] - ) - if len(mesh_prims) == 0: - warn_msg = ( - f"No mesh prims found at path: {target_prim.GetPath()} with supported types:" - f" {PRIMITIVE_MESH_TYPES + ['Mesh']}" - " Skipping this target." - ) - for prim in sim_utils.get_all_matching_child_prims(target_prim.GetPath(), lambda prim: True): - warn_msg += f"\n - Available prim '{prim.GetPath()}' of type '{prim.GetTypeName()}'" - logger.warning(warn_msg) - continue - - trimesh_meshes = [] - - for mesh_prim in mesh_prims: - # check if valid - if mesh_prim is None or not mesh_prim.IsValid(): - raise RuntimeError(f"Invalid mesh prim path: {target_prim}") - - if mesh_prim.GetTypeName() == "Mesh": - mesh = create_trimesh_from_geom_mesh(mesh_prim) - else: - mesh = create_trimesh_from_geom_shape(mesh_prim) - scale = sim_utils.resolve_prim_scale(mesh_prim) - mesh.apply_scale(scale) - - relative_pos, relative_quat = sim_utils.resolve_prim_pose(mesh_prim, target_prim) - relative_pos = torch.tensor(relative_pos, dtype=torch.float32) - relative_quat = torch.tensor(relative_quat, dtype=torch.float32) - - rotation = matrix_from_quat(relative_quat) - transform = np.eye(4) - transform[:3, :3] = rotation.numpy() - transform[:3, 3] = relative_pos.numpy() - mesh.apply_transform(transform) - - # add to list of parsed meshes - trimesh_meshes.append(mesh) - - if len(trimesh_meshes) == 1: - trimesh_mesh = trimesh_meshes[0] - elif target_cfg.merge_prim_meshes: - # combine all trimesh meshes into a single mesh - trimesh_mesh = trimesh.util.concatenate(trimesh_meshes) - else: - raise RuntimeError( - f"Multiple mesh prims found at path: {target_prim.GetPath()} but merging is disabled. Please" - " enable `merge_prim_meshes` in the configuration or specify each mesh separately." - ) - - # check if the mesh is already registered, if so only reference the mesh - registered_idx = _registered_points_idx(trimesh_mesh.vertices, loaded_vertices) - if registered_idx != -1 and self.cfg.reference_meshes: - logger.info("Found a duplicate mesh, only reference the mesh.") - # Found a duplicate mesh, only reference the mesh. - loaded_vertices.append(None) - wp_mesh_ids.append(wp_mesh_ids[registered_idx]) - else: - loaded_vertices.append(trimesh_mesh.vertices) - wp_mesh = convert_to_warp_mesh(trimesh_mesh.vertices, trimesh_mesh.faces, device=self.device) - MultiMeshRayCaster.meshes[str(target_prim.GetPath())] = wp_mesh - wp_mesh_ids.append(wp_mesh.id) - - # print info - if registered_idx != -1: - logger.info(f"Found duplicate mesh for mesh prims under path '{target_prim.GetPath()}'.") - else: - logger.info( - f"Read '{len(mesh_prims)}' mesh prims under path '{target_prim.GetPath()}' with" - f" {len(trimesh_mesh.vertices)} vertices and {len(trimesh_mesh.faces)} faces." - ) - - if is_global_prim: - # reference the mesh for each environment to ray cast against - multi_mesh_ids[target_prim_path] = [wp_mesh_ids] * self._num_envs - self._num_meshes_per_env[target_prim_path] = len(wp_mesh_ids) - else: - # split up the meshes for each environment. Little bit ugly, since - # the current order is interleaved (env1_obj1, env1_obj2, env2_obj1, env2_obj2, ...) - multi_mesh_ids[target_prim_path] = [] - mesh_idx = 0 - n_meshes_per_env = len(wp_mesh_ids) // self._num_envs - self._num_meshes_per_env[target_prim_path] = n_meshes_per_env - for _ in range(self._num_envs): - multi_mesh_ids[target_prim_path].append(wp_mesh_ids[mesh_idx : mesh_idx + n_meshes_per_env]) - mesh_idx += n_meshes_per_env - - if target_cfg.track_mesh_transforms: - MultiMeshRayCaster.mesh_views[target_prim_path], MultiMeshRayCaster.mesh_offsets[target_prim_path] = ( - self._obtain_trackable_prim_view(target_prim_path) - ) - - # throw an error if no meshes are found - if all([target_cfg.prim_expr not in multi_mesh_ids for target_cfg in self._raycast_targets_cfg]): - raise RuntimeError( - f"No meshes found for ray-casting! Please check the mesh prim paths: {self.cfg.mesh_prim_paths}" - ) - - total_n_meshes_per_env = sum(self._num_meshes_per_env.values()) - self._mesh_positions_w = torch.zeros(self._num_envs, total_n_meshes_per_env, 3, device=self.device) - self._mesh_orientations_w = torch.zeros(self._num_envs, total_n_meshes_per_env, 4, device=self.device) - - # Update the mesh positions and rotations - mesh_idx = 0 - for target_cfg in self._raycast_targets_cfg: - n_meshes = self._num_meshes_per_env[target_cfg.prim_expr] - - # update position of the target meshes - pos_w, ori_w = [], [] - for prim in sim_utils.find_matching_prims(target_cfg.prim_expr): - translation, quat = sim_utils.resolve_prim_pose(prim) - pos_w.append(translation) - ori_w.append(quat) - pos_w = torch.tensor(pos_w, device=self.device, dtype=torch.float32).view(-1, n_meshes, 3) - ori_w = torch.tensor(ori_w, device=self.device, dtype=torch.float32).view(-1, n_meshes, 4) - - self._mesh_positions_w[:, mesh_idx : mesh_idx + n_meshes] = pos_w - self._mesh_orientations_w[:, mesh_idx : mesh_idx + n_meshes] = ori_w - mesh_idx += n_meshes - - # flatten the list of meshes that are included in mesh_prim_paths of the specific ray caster - multi_mesh_ids_flattened = [] - for env_idx in range(self._num_envs): - meshes_in_env = [] - for target_cfg in self._raycast_targets_cfg: - meshes_in_env.extend(multi_mesh_ids[target_cfg.prim_expr][env_idx]) - multi_mesh_ids_flattened.append(meshes_in_env) - - self._mesh_views = [ - self.mesh_views[target_cfg.prim_expr] if target_cfg.track_mesh_transforms else None - for target_cfg in self._raycast_targets_cfg - ] - - # save a warp array with mesh ids that is passed to the raycast function - self._mesh_ids_wp = wp.array2d(multi_mesh_ids_flattened, dtype=wp.uint64, device=self.device) - - def _initialize_rays_impl(self): - super()._initialize_rays_impl() - if self.cfg.update_mesh_ids: - self._data.ray_mesh_ids = torch.zeros( - self._num_envs, self.num_rays, 1, device=self.device, dtype=torch.int16 - ) - - def _update_buffers_impl(self, env_ids: Sequence[int]): - """Fills the buffers of the sensor data. - - Args: - env_ids: The environment ids to update. - """ - - self._update_ray_infos(env_ids) - - # Update the mesh positions and rotations - mesh_idx = 0 - for view, target_cfg in zip(self._mesh_views, self._raycast_targets_cfg): - if not target_cfg.track_mesh_transforms: - mesh_idx += self._num_meshes_per_env[target_cfg.prim_expr] - continue - - # update position of the target meshes - pos_w, ori_w = obtain_world_pose_from_view(view, None) - pos_w = pos_w.squeeze(0) if len(pos_w.shape) == 3 else pos_w - ori_w = ori_w.squeeze(0) if len(ori_w.shape) == 3 else ori_w - - if target_cfg.prim_expr in MultiMeshRayCaster.mesh_offsets: - pos_offset, ori_offset = MultiMeshRayCaster.mesh_offsets[target_cfg.prim_expr] - pos_w -= pos_offset - ori_w = quat_mul(ori_offset.expand(ori_w.shape[0], -1), ori_w) - - count = view.count - if count != 1: # Mesh is not global, i.e. we have different meshes for each env - count = count // self._num_envs - pos_w = pos_w.view(self._num_envs, count, 3) - ori_w = ori_w.view(self._num_envs, count, 4) - - self._mesh_positions_w[:, mesh_idx : mesh_idx + count] = pos_w - self._mesh_orientations_w[:, mesh_idx : mesh_idx + count] = ori_w - mesh_idx += count - - self._data.ray_hits_w[env_ids], _, _, _, mesh_ids = raycast_dynamic_meshes( - self._ray_starts_w[env_ids], - self._ray_directions_w[env_ids], - mesh_ids_wp=self._mesh_ids_wp, # list with shape num_envs x num_meshes_per_env - max_dist=self.cfg.max_distance, - mesh_positions_w=self._mesh_positions_w[env_ids], - mesh_orientations_w=self._mesh_orientations_w[env_ids], - return_mesh_id=self.cfg.update_mesh_ids, - ) - - if self.cfg.update_mesh_ids: - self._data.ray_mesh_ids[env_ids] = mesh_ids - - def __del__(self): - super().__del__() - if RayCaster._instance_count == 0: - MultiMeshRayCaster.mesh_offsets.clear() - MultiMeshRayCaster.mesh_views.clear() - - -""" -Helper functions -""" - - -def _registered_points_idx(points: np.ndarray, registered_points: list[np.ndarray | None]) -> int: - """Check if the points are already registered in the list of registered points. - - Args: - points: The points to check. - registered_points: The list of registered points. - - Returns: - The index of the registered points if found, otherwise -1. - """ - for idx, reg_points in enumerate(registered_points): - if reg_points is None: - continue - if reg_points.shape == points.shape and (reg_points == points).all(): - return idx - return -1 diff --git a/source/isaaclab_physx/isaaclab_physx/sensors/ray_caster/multi_mesh_ray_caster_camera.py b/source/isaaclab_physx/isaaclab_physx/sensors/ray_caster/multi_mesh_ray_caster_camera.py deleted file mode 100644 index 970860fa50a..00000000000 --- a/source/isaaclab_physx/isaaclab_physx/sensors/ray_caster/multi_mesh_ray_caster_camera.py +++ /dev/null @@ -1,221 +0,0 @@ -# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). -# All rights reserved. -# -# SPDX-License-Identifier: BSD-3-Clause - -from __future__ import annotations - -from collections.abc import Sequence -from typing import TYPE_CHECKING - -import torch - -import isaaclab.utils.math as math_utils -from isaaclab.utils.warp import raycast_dynamic_meshes - -from .multi_mesh_ray_caster import MultiMeshRayCaster -from .multi_mesh_ray_caster_camera_data import MultiMeshRayCasterCameraData -from .ray_cast_utils import obtain_world_pose_from_view -from .ray_caster_camera import RayCasterCamera - -if TYPE_CHECKING: - from .multi_mesh_ray_caster_camera_cfg import MultiMeshRayCasterCameraCfg - - -class MultiMeshRayCasterCamera(RayCasterCamera, MultiMeshRayCaster): - """A multi-mesh ray-casting camera sensor. - - The ray-caster camera uses a set of rays to get the distances to meshes in the scene. The rays are - defined in the sensor's local coordinate frame. The sensor has the same interface as the - :class:`isaaclab.sensors.Camera` that implements the camera class through USD camera prims. - However, this class provides a faster image generation. The sensor converts meshes from the list of - primitive paths provided in the configuration to Warp meshes. The camera then ray-casts against these - Warp meshes only. - - Currently, only the following annotators are supported: - - - ``"distance_to_camera"``: An image containing the distance to camera optical center. - - ``"distance_to_image_plane"``: An image containing distances of 3D points from camera plane along camera's z-axis. - - ``"normals"``: An image containing the local surface normal vectors at each pixel. - """ - - cfg: MultiMeshRayCasterCameraCfg - """The configuration parameters.""" - - def __init__(self, cfg: MultiMeshRayCasterCameraCfg): - """Initializes the camera object. - - Args: - cfg: The configuration parameters. - - Raises: - ValueError: If the provided data types are not supported by the ray-caster camera. - """ - self._check_supported_data_types(cfg) - # initialize base class - MultiMeshRayCaster.__init__(self, cfg) - # create empty variables for storing output data - self._data = MultiMeshRayCasterCameraData() - - def __str__(self) -> str: - """Returns: A string containing information about the instance.""" - return ( - f"Multi-Mesh Ray-Caster-Camera @ '{self.cfg.prim_path}': \n" - f"\tview type : {self._view.__class__}\n" - f"\tupdate period (s) : {self.cfg.update_period}\n" - f"\tnumber of meshes : {len(MultiMeshRayCaster.meshes)}\n" - f"\tnumber of sensors : {self._view.count}\n" - f"\tnumber of rays/sensor: {self.num_rays}\n" - f"\ttotal number of rays : {self.num_rays * self._view.count}\n" - f"\timage shape : {self.image_shape}" - ) - - """ - Implementation. - """ - - def _initialize_warp_meshes(self): - MultiMeshRayCaster._initialize_warp_meshes(self) - - def _create_buffers(self): - super()._create_buffers() - self._data.image_mesh_ids = torch.zeros( - self._num_envs, *self.image_shape, 1, device=self.device, dtype=torch.int16 - ) - - def _initialize_rays_impl(self): - # Create all indices buffer - self._ALL_INDICES = torch.arange(self._view.count, device=self._device, dtype=torch.long) - # Create frame count buffer - self._frame = torch.zeros(self._view.count, device=self._device, dtype=torch.long) - # create buffers - self._create_buffers() - # compute intrinsic matrices - self._compute_intrinsic_matrices() - # compute ray stars and directions - self.ray_starts, self.ray_directions = self.cfg.pattern_cfg.func( - self.cfg.pattern_cfg, self._data.intrinsic_matrices, self._device - ) - self.num_rays = self.ray_directions.shape[1] - # create buffer to store ray hits - self.ray_hits_w = torch.zeros(self._view.count, self.num_rays, 3, device=self._device) - # set offsets - quat_w = math_utils.convert_camera_frame_orientation_convention( - torch.tensor([self.cfg.offset.rot], device=self._device), origin=self.cfg.offset.convention, target="world" - ) - self._offset_quat = quat_w.repeat(self._view.count, 1) - self._offset_pos = torch.tensor(list(self.cfg.offset.pos), device=self._device).repeat(self._view.count, 1) - - self._data.quat_w = torch.zeros(self._view.count, 4, device=self.device) - self._data.pos_w = torch.zeros(self._view.count, 3, device=self.device) - - self._ray_starts_w = torch.zeros(self._view.count, self.num_rays, 3, device=self.device) - self._ray_directions_w = torch.zeros(self._view.count, self.num_rays, 3, device=self.device) - - def _update_ray_infos(self, env_ids: Sequence[int]): - """Updates the ray information buffers.""" - - # compute poses from current view - pos_w, quat_w = obtain_world_pose_from_view(self._view, env_ids) - pos_w, quat_w = math_utils.combine_frame_transforms( - pos_w, quat_w, self._offset_pos[env_ids], self._offset_quat[env_ids] - ) - # update the data - self._data.pos_w[env_ids] = pos_w - self._data.quat_w_world[env_ids] = quat_w - self._data.quat_w_ros[env_ids] = quat_w - - # note: full orientation is considered - ray_starts_w = math_utils.quat_apply(quat_w.repeat(1, self.num_rays), self.ray_starts[env_ids]) - ray_starts_w += pos_w.unsqueeze(1) - ray_directions_w = math_utils.quat_apply(quat_w.repeat(1, self.num_rays), self.ray_directions[env_ids]) - - self._ray_starts_w[env_ids] = ray_starts_w - self._ray_directions_w[env_ids] = ray_directions_w - - def _update_buffers_impl(self, env_ids: Sequence[int] | torch.Tensor | None): - """Fills the buffers of the sensor data.""" - self._update_ray_infos(env_ids) - - # increment frame count - if env_ids is None: - env_ids = torch.arange(self._num_envs, device=self.device) - elif not isinstance(env_ids, torch.Tensor): - env_ids = torch.tensor(env_ids, device=self.device) - - self._frame[env_ids] += 1 - - # Update the mesh positions and rotations - mesh_idx = 0 - for view, target_cfg in zip(self._mesh_views, self._raycast_targets_cfg): - if not target_cfg.track_mesh_transforms: - mesh_idx += self._num_meshes_per_env[target_cfg.prim_expr] - continue - - # update position of the target meshes - pos_w, ori_w = obtain_world_pose_from_view(view, None) - pos_w = pos_w.squeeze(0) if len(pos_w.shape) == 3 else pos_w - ori_w = ori_w.squeeze(0) if len(ori_w.shape) == 3 else ori_w - - if target_cfg.prim_expr in MultiMeshRayCaster.mesh_offsets: - pos_offset, ori_offset = MultiMeshRayCaster.mesh_offsets[target_cfg.prim_expr] - pos_w -= pos_offset - ori_w = math_utils.quat_mul(ori_offset.expand(ori_w.shape[0], -1), ori_w) - - count = view.count - if count != 1: # Mesh is not global, i.e. we have different meshes for each env - count = count // self._num_envs - pos_w = pos_w.view(self._num_envs, count, 3) - ori_w = ori_w.view(self._num_envs, count, 4) - - self._mesh_positions_w[:, mesh_idx : mesh_idx + count] = pos_w - self._mesh_orientations_w[:, mesh_idx : mesh_idx + count] = ori_w - mesh_idx += count - - # ray cast and store the hits - self.ray_hits_w[env_ids], ray_depth, ray_normal, _, ray_mesh_ids = raycast_dynamic_meshes( - self._ray_starts_w[env_ids], - self._ray_directions_w[env_ids], - mesh_ids_wp=self._mesh_ids_wp, # list with shape num_envs x num_meshes_per_env - max_dist=self.cfg.max_distance, - mesh_positions_w=self._mesh_positions_w[env_ids], - mesh_orientations_w=self._mesh_orientations_w[env_ids], - return_distance=any( - [name in self.cfg.data_types for name in ["distance_to_image_plane", "distance_to_camera"]] - ), - return_normal="normals" in self.cfg.data_types, - return_mesh_id=self.cfg.update_mesh_ids, - ) - - # update output buffers - if "distance_to_image_plane" in self.cfg.data_types: - # note: data is in camera frame so we only take the first component (z-axis of camera frame) - distance_to_image_plane = ( - math_utils.quat_apply( - math_utils.quat_inv(self._data.quat_w_world[env_ids]).repeat(1, self.num_rays), - (ray_depth[:, :, None] * self._ray_directions_w[env_ids]), - ) - )[:, :, 0] - # apply the maximum distance after the transformation - if self.cfg.depth_clipping_behavior == "max": - distance_to_image_plane = torch.clip(distance_to_image_plane, max=self.cfg.max_distance) - distance_to_image_plane[torch.isnan(distance_to_image_plane)] = self.cfg.max_distance - elif self.cfg.depth_clipping_behavior == "zero": - distance_to_image_plane[distance_to_image_plane > self.cfg.max_distance] = 0.0 - distance_to_image_plane[torch.isnan(distance_to_image_plane)] = 0.0 - self._data.output["distance_to_image_plane"][env_ids] = distance_to_image_plane.view( - -1, *self.image_shape, 1 - ) - - if "distance_to_camera" in self.cfg.data_types: - if self.cfg.depth_clipping_behavior == "max": - ray_depth = torch.clip(ray_depth, max=self.cfg.max_distance) - elif self.cfg.depth_clipping_behavior == "zero": - ray_depth[ray_depth > self.cfg.max_distance] = 0.0 - self._data.output["distance_to_camera"][env_ids] = ray_depth.view(-1, *self.image_shape, 1) - - if "normals" in self.cfg.data_types: - self._data.output["normals"][env_ids] = ray_normal.view(-1, *self.image_shape, 3) - - if self.cfg.update_mesh_ids: - self._data.image_mesh_ids[env_ids] = ray_mesh_ids.view(-1, *self.image_shape, 1) diff --git a/source/isaaclab_physx/isaaclab_physx/sensors/ray_caster/multi_mesh_ray_caster_camera_cfg.py b/source/isaaclab_physx/isaaclab_physx/sensors/ray_caster/multi_mesh_ray_caster_camera_cfg.py deleted file mode 100644 index 45df51ce6d8..00000000000 --- a/source/isaaclab_physx/isaaclab_physx/sensors/ray_caster/multi_mesh_ray_caster_camera_cfg.py +++ /dev/null @@ -1,35 +0,0 @@ -# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). -# All rights reserved. -# -# SPDX-License-Identifier: BSD-3-Clause - -"""Configuration for the ray-cast camera sensor.""" - -import logging - -from isaaclab.utils import configclass - -from .multi_mesh_ray_caster_camera import MultiMeshRayCasterCamera -from .multi_mesh_ray_caster_cfg import MultiMeshRayCasterCfg -from .ray_caster_camera_cfg import RayCasterCameraCfg - -# import logger -logger = logging.getLogger(__name__) - - -@configclass -class MultiMeshRayCasterCameraCfg(RayCasterCameraCfg, MultiMeshRayCasterCfg): - """Configuration for the multi-mesh ray-cast camera sensor.""" - - class_type: type = MultiMeshRayCasterCamera - - def __post_init__(self): - super().__post_init__() - - # Camera only supports 'base' ray alignment. Ensure this is set correctly. - if self.ray_alignment != "base": - logger.warning( - "Ray alignment for MultiMeshRayCasterCameraCfg only supports 'base' alignment. Overriding from" - f"'{self.ray_alignment}' to 'base'." - ) - self.ray_alignment = "base" diff --git a/source/isaaclab_physx/isaaclab_physx/sensors/ray_caster/multi_mesh_ray_caster_camera_data.py b/source/isaaclab_physx/isaaclab_physx/sensors/ray_caster/multi_mesh_ray_caster_camera_data.py deleted file mode 100644 index d2f26abdbf4..00000000000 --- a/source/isaaclab_physx/isaaclab_physx/sensors/ray_caster/multi_mesh_ray_caster_camera_data.py +++ /dev/null @@ -1,23 +0,0 @@ -# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). -# All rights reserved. -# -# SPDX-License-Identifier: BSD-3-Clause - -"""Data container for the multi-mesh ray-cast camera sensor.""" - -import torch - -from isaaclab.sensors.camera import CameraData - -from .ray_caster_data import RayCasterData - - -class MultiMeshRayCasterCameraData(CameraData, RayCasterData): - """Data container for the multi-mesh ray-cast sensor.""" - - image_mesh_ids: torch.Tensor = None - """The mesh ids of the image pixels. - - Shape is (N, H, W, 1), where N is the number of sensors, H and W are the height and width of the image, - and 1 is the number of mesh ids per pixel. - """ diff --git a/source/isaaclab_physx/isaaclab_physx/sensors/ray_caster/multi_mesh_ray_caster_cfg.py b/source/isaaclab_physx/isaaclab_physx/sensors/ray_caster/multi_mesh_ray_caster_cfg.py deleted file mode 100644 index f5393920162..00000000000 --- a/source/isaaclab_physx/isaaclab_physx/sensors/ray_caster/multi_mesh_ray_caster_cfg.py +++ /dev/null @@ -1,71 +0,0 @@ -# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). -# All rights reserved. -# -# SPDX-License-Identifier: BSD-3-Clause - - -"""Configuration for the ray-cast sensor.""" - -from dataclasses import MISSING - -from isaaclab.utils import configclass - -from .multi_mesh_ray_caster import MultiMeshRayCaster -from .ray_caster_cfg import RayCasterCfg - - -@configclass -class MultiMeshRayCasterCfg(RayCasterCfg): - """Configuration for the multi-mesh ray-cast sensor.""" - - @configclass - class RaycastTargetCfg: - """Configuration for different ray-cast targets.""" - - prim_expr: str = MISSING - """The regex to specify the target prim to ray cast against.""" - - is_shared: bool = False - """Whether the target prim is assumed to be the same mesh across all environments. Defaults to False. - - If True, only the first mesh is read and then reused for all environments, rather than re-parsed. - This provides a startup performance boost when there are many environments that all use the same asset. - - .. note:: - If :attr:`MultiMeshRayCasterCfg.reference_meshes` is False, this flag has no effect. - """ - - merge_prim_meshes: bool = True - """Whether to merge the parsed meshes for a prim that contains multiple meshes. Defaults to True. - - This will create a new mesh that combines all meshes in the parsed prim. The raycast hits mesh IDs - will then refer to the single merged mesh. - """ - - track_mesh_transforms: bool = True - """Whether the mesh transformations should be tracked. Defaults to True. - - .. note:: - Not tracking the mesh transformations is recommended when the meshes are static to increase performance. - """ - - class_type: type = MultiMeshRayCaster - - mesh_prim_paths: list[str | RaycastTargetCfg] = MISSING - """The list of mesh primitive paths to ray cast against. - - If an entry is a string, it is internally converted to :class:`RaycastTargetCfg` with - :attr:`~RaycastTargetCfg.track_mesh_transforms` disabled. These settings ensure backwards compatibility - with the default raycaster. - """ - - update_mesh_ids: bool = False - """Whether to update the mesh ids of the ray hits in the :attr:`data` container.""" - - reference_meshes: bool = True - """Whether to reference duplicated meshes instead of loading each one separately into memory. - Defaults to True. - - When enabled, the raycaster parses all meshes in all environments, but reuses references - for duplicates instead of storing multiple copies. This reduces memory footprint. - """ diff --git a/source/isaaclab_physx/isaaclab_physx/sensors/ray_caster/multi_mesh_ray_caster_data.py b/source/isaaclab_physx/isaaclab_physx/sensors/ray_caster/multi_mesh_ray_caster_data.py deleted file mode 100644 index b9ae187591b..00000000000 --- a/source/isaaclab_physx/isaaclab_physx/sensors/ray_caster/multi_mesh_ray_caster_data.py +++ /dev/null @@ -1,22 +0,0 @@ -# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). -# All rights reserved. -# -# SPDX-License-Identifier: BSD-3-Clause - - -"""Data container for the multi-mesh ray-cast sensor.""" - -import torch - -from .ray_caster_data import RayCasterData - - -class MultiMeshRayCasterData(RayCasterData): - """Data container for the multi-mesh ray-cast sensor.""" - - ray_mesh_ids: torch.Tensor = None - """The mesh ids of the ray hits. - - Shape is (N, B, 1), where N is the number of sensors, B is the number of rays - in the scan pattern per sensor. - """ diff --git a/source/isaaclab_physx/isaaclab_physx/sensors/ray_caster/patterns/__init__.py b/source/isaaclab_physx/isaaclab_physx/sensors/ray_caster/patterns/__init__.py deleted file mode 100644 index d43f5437ce0..00000000000 --- a/source/isaaclab_physx/isaaclab_physx/sensors/ray_caster/patterns/__init__.py +++ /dev/null @@ -1,9 +0,0 @@ -# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). -# All rights reserved. -# -# SPDX-License-Identifier: BSD-3-Clause - -"""Sub-module for ray-casting patterns used by the ray-caster.""" - -from .patterns import bpearl_pattern, grid_pattern, lidar_pattern, pinhole_camera_pattern -from .patterns_cfg import BpearlPatternCfg, GridPatternCfg, LidarPatternCfg, PatternBaseCfg, PinholeCameraPatternCfg diff --git a/source/isaaclab_physx/isaaclab_physx/sensors/ray_caster/patterns/patterns.py b/source/isaaclab_physx/isaaclab_physx/sensors/ray_caster/patterns/patterns.py deleted file mode 100644 index d5255f64c75..00000000000 --- a/source/isaaclab_physx/isaaclab_physx/sensors/ray_caster/patterns/patterns.py +++ /dev/null @@ -1,180 +0,0 @@ -# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). -# All rights reserved. -# -# SPDX-License-Identifier: BSD-3-Clause - -from __future__ import annotations - -import math -from typing import TYPE_CHECKING - -import torch - -if TYPE_CHECKING: - from . import patterns_cfg - - -def grid_pattern(cfg: patterns_cfg.GridPatternCfg, device: str) -> tuple[torch.Tensor, torch.Tensor]: - """A regular grid pattern for ray casting. - - The grid pattern is made from rays that are parallel to each other. They span a 2D grid in the sensor's - local coordinates from ``(-length/2, -width/2)`` to ``(length/2, width/2)``, which is defined - by the ``size = (length, width)`` and ``resolution`` parameters in the config. - - Args: - cfg: The configuration instance for the pattern. - device: The device to create the pattern on. - - Returns: - The starting positions and directions of the rays. - - Raises: - ValueError: If the ordering is not "xy" or "yx". - ValueError: If the resolution is less than or equal to 0. - """ - # check valid arguments - if cfg.ordering not in ["xy", "yx"]: - raise ValueError(f"Ordering must be 'xy' or 'yx'. Received: '{cfg.ordering}'.") - if cfg.resolution <= 0: - raise ValueError(f"Resolution must be greater than 0. Received: '{cfg.resolution}'.") - - # resolve mesh grid indexing (note: torch meshgrid is different from numpy meshgrid) - # check: https://github.com/pytorch/pytorch/issues/15301 - indexing = cfg.ordering if cfg.ordering == "xy" else "ij" - # define grid pattern - x = torch.arange(start=-cfg.size[0] / 2, end=cfg.size[0] / 2 + 1.0e-9, step=cfg.resolution, device=device) - y = torch.arange(start=-cfg.size[1] / 2, end=cfg.size[1] / 2 + 1.0e-9, step=cfg.resolution, device=device) - grid_x, grid_y = torch.meshgrid(x, y, indexing=indexing) - - # store into ray starts - num_rays = grid_x.numel() - ray_starts = torch.zeros(num_rays, 3, device=device) - ray_starts[:, 0] = grid_x.flatten() - ray_starts[:, 1] = grid_y.flatten() - - # define ray-cast directions - ray_directions = torch.zeros_like(ray_starts) - ray_directions[..., :] = torch.tensor(list(cfg.direction), device=device) - - return ray_starts, ray_directions - - -def pinhole_camera_pattern( - cfg: patterns_cfg.PinholeCameraPatternCfg, intrinsic_matrices: torch.Tensor, device: str -) -> tuple[torch.Tensor, torch.Tensor]: - """The image pattern for ray casting. - - .. caution:: - This function does not follow the standard pattern interface. It requires the intrinsic matrices - of the cameras to be passed in. This is because we want to be able to randomize the intrinsic - matrices of the cameras, which is not possible with the standard pattern interface. - - Args: - cfg: The configuration instance for the pattern. - intrinsic_matrices: The intrinsic matrices of the cameras. Shape is (N, 3, 3). - device: The device to create the pattern on. - - Returns: - The starting positions and directions of the rays. The shape of the tensors are - (N, H * W, 3) and (N, H * W, 3) respectively. - """ - # get image plane mesh grid - grid = torch.meshgrid( - torch.arange(start=0, end=cfg.width, dtype=torch.int32, device=device), - torch.arange(start=0, end=cfg.height, dtype=torch.int32, device=device), - indexing="xy", - ) - pixels = torch.vstack(list(map(torch.ravel, grid))).T - # convert to homogeneous coordinate system - pixels = torch.hstack([pixels, torch.ones((len(pixels), 1), device=device)]) - # move each pixel coordinate to the center of the pixel - pixels += torch.tensor([[0.5, 0.5, 0]], device=device) - # get pixel coordinates in camera frame - pix_in_cam_frame = torch.matmul(torch.inverse(intrinsic_matrices), pixels.T) - - # robotics camera frame is (x forward, y left, z up) from camera frame with (x right, y down, z forward) - # transform to robotics camera frame - transform_vec = torch.tensor([1, -1, -1], device=device).unsqueeze(0).unsqueeze(2) - pix_in_cam_frame = pix_in_cam_frame[:, [2, 0, 1], :] * transform_vec - # normalize ray directions - ray_directions = (pix_in_cam_frame / torch.norm(pix_in_cam_frame, dim=1, keepdim=True)).permute(0, 2, 1) - # for camera, we always ray-cast from the sensor's origin - ray_starts = torch.zeros_like(ray_directions, device=device) - - return ray_starts, ray_directions - - -def bpearl_pattern(cfg: patterns_cfg.BpearlPatternCfg, device: str) -> tuple[torch.Tensor, torch.Tensor]: - """The RS-Bpearl pattern for ray casting. - - The `Robosense RS-Bpearl`_ is a short-range LiDAR that has a 360 degrees x 90 degrees super wide - field of view. It is designed for near-field blind-spots detection. - - .. _Robosense RS-Bpearl: https://www.roscomponents.com/product/rs-bpearl/ - - Args: - cfg: The configuration instance for the pattern. - device: The device to create the pattern on. - - Returns: - The starting positions and directions of the rays. - """ - h = torch.arange(-cfg.horizontal_fov / 2, cfg.horizontal_fov / 2, cfg.horizontal_res, device=device) - v = torch.tensor(list(cfg.vertical_ray_angles), device=device) - - pitch, yaw = torch.meshgrid(v, h, indexing="xy") - pitch, yaw = torch.deg2rad(pitch.reshape(-1)), torch.deg2rad(yaw.reshape(-1)) - pitch += torch.pi / 2 - x = torch.sin(pitch) * torch.cos(yaw) - y = torch.sin(pitch) * torch.sin(yaw) - z = torch.cos(pitch) - - ray_directions = -torch.stack([x, y, z], dim=1) - ray_starts = torch.zeros_like(ray_directions) - return ray_starts, ray_directions - - -def lidar_pattern(cfg: patterns_cfg.LidarPatternCfg, device: str) -> tuple[torch.Tensor, torch.Tensor]: - """Lidar sensor pattern for ray casting. - - Args: - cfg: The configuration instance for the pattern. - device: The device to create the pattern on. - - Returns: - The starting positions and directions of the rays. - """ - # Vertical angles - vertical_angles = torch.linspace(cfg.vertical_fov_range[0], cfg.vertical_fov_range[1], cfg.channels) - - # If the horizontal field of view is 360 degrees, exclude the last point to avoid overlap - if abs(abs(cfg.horizontal_fov_range[0] - cfg.horizontal_fov_range[1]) - 360.0) < 1e-6: - up_to = -1 - else: - up_to = None - - # Horizontal angles - num_horizontal_angles = math.ceil((cfg.horizontal_fov_range[1] - cfg.horizontal_fov_range[0]) / cfg.horizontal_res) - horizontal_angles = torch.linspace(cfg.horizontal_fov_range[0], cfg.horizontal_fov_range[1], num_horizontal_angles)[ - :up_to - ] - - # Convert degrees to radians - vertical_angles_rad = torch.deg2rad(vertical_angles) - horizontal_angles_rad = torch.deg2rad(horizontal_angles) - - # Meshgrid to create a 2D array of angles - v_angles, h_angles = torch.meshgrid(vertical_angles_rad, horizontal_angles_rad, indexing="ij") - - # Spherical to Cartesian conversion (assuming Z is up) - x = torch.cos(v_angles) * torch.cos(h_angles) - y = torch.cos(v_angles) * torch.sin(h_angles) - z = torch.sin(v_angles) - - # Ray directions - ray_directions = torch.stack([x, y, z], dim=-1).reshape(-1, 3).to(device) - - # Ray starts: Assuming all rays originate from (0,0,0) - ray_starts = torch.zeros_like(ray_directions).to(device) - - return ray_starts, ray_directions diff --git a/source/isaaclab_physx/isaaclab_physx/sensors/ray_caster/patterns/patterns_cfg.py b/source/isaaclab_physx/isaaclab_physx/sensors/ray_caster/patterns/patterns_cfg.py deleted file mode 100644 index f50ba272b70..00000000000 --- a/source/isaaclab_physx/isaaclab_physx/sensors/ray_caster/patterns/patterns_cfg.py +++ /dev/null @@ -1,219 +0,0 @@ -# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). -# All rights reserved. -# -# SPDX-License-Identifier: BSD-3-Clause - -"""Configuration for the ray-cast sensor.""" - -from __future__ import annotations - -from collections.abc import Callable, Sequence -from dataclasses import MISSING -from typing import Literal - -import torch - -from isaaclab.utils import configclass - -from . import patterns - - -@configclass -class PatternBaseCfg: - """Base configuration for a pattern.""" - - func: Callable[[PatternBaseCfg, str], tuple[torch.Tensor, torch.Tensor]] = MISSING - """Function to generate the pattern. - - The function should take in the configuration and the device name as arguments. It should return - the pattern's starting positions and directions as a tuple of torch.Tensor. - """ - - -@configclass -class GridPatternCfg(PatternBaseCfg): - """Configuration for the grid pattern for ray-casting. - - Defines a 2D grid of rays in the coordinates of the sensor. - - .. attention:: - The points are ordered based on the :attr:`ordering` attribute. - - """ - - func: Callable = patterns.grid_pattern - - resolution: float = MISSING - """Grid resolution (in meters).""" - - size: tuple[float, float] = MISSING - """Grid size (length, width) (in meters).""" - - direction: tuple[float, float, float] = (0.0, 0.0, -1.0) - """Ray direction. Defaults to (0.0, 0.0, -1.0).""" - - ordering: Literal["xy", "yx"] = "xy" - """Specifies the ordering of points in the generated grid. Defaults to ``"xy"``. - - Consider a grid pattern with points at :math:`(x, y)` where :math:`x` and :math:`y` are the grid indices. - The ordering of the points can be specified as "xy" or "yx". This determines the inner and outer loop order - when iterating over the grid points. - - * If "xy" is selected, the points are ordered with inner loop over "x" and outer loop over "y". - * If "yx" is selected, the points are ordered with inner loop over "y" and outer loop over "x". - - For example, the grid pattern points with :math:`X = (0, 1, 2)` and :math:`Y = (3, 4)`: - - * "xy" ordering: :math:`[(0, 3), (1, 3), (2, 3), (1, 4), (2, 4), (2, 4)]` - * "yx" ordering: :math:`[(0, 3), (0, 4), (1, 3), (1, 4), (2, 3), (2, 4)]` - """ - - -@configclass -class PinholeCameraPatternCfg(PatternBaseCfg): - """Configuration for a pinhole camera depth image pattern for ray-casting. - - .. caution:: - Focal length as well as the aperture sizes and offsets are set as a tenth of the world unit. In our case, the - world unit is meters, so all of these values are in cm. For more information, please check: - https://docs.omniverse.nvidia.com/materials-and-rendering/latest/cameras.html - """ - - func: Callable = patterns.pinhole_camera_pattern - - focal_length: float = 24.0 - """Perspective focal length (in cm). Defaults to 24.0cm. - - Longer lens lengths narrower FOV, shorter lens lengths wider FOV. - """ - - horizontal_aperture: float = 20.955 - """Horizontal aperture (in cm). Defaults to 20.955 cm. - - Emulates sensor/film width on a camera. - - Note: - The default value is the horizontal aperture of a 35 mm spherical projector. - """ - vertical_aperture: float | None = None - r"""Vertical aperture (in cm). Defaults to None. - - Emulates sensor/film height on a camera. If None, then the vertical aperture is calculated based on the - horizontal aperture and the aspect ratio of the image to maintain squared pixels. In this case, the vertical - aperture is calculated as: - - .. math:: - \text{vertical aperture} = \text{horizontal aperture} \times \frac{\text{height}}{\text{width}} - """ - - horizontal_aperture_offset: float = 0.0 - """Offsets Resolution/Film gate horizontally. Defaults to 0.0.""" - - vertical_aperture_offset: float = 0.0 - """Offsets Resolution/Film gate vertically. Defaults to 0.0.""" - - width: int = MISSING - """Width of the image (in pixels).""" - - height: int = MISSING - """Height of the image (in pixels).""" - - @classmethod - def from_intrinsic_matrix( - cls, - intrinsic_matrix: list[float], - width: int, - height: int, - focal_length: float = 24.0, - ) -> PinholeCameraPatternCfg: - r"""Create a :class:`PinholeCameraPatternCfg` class instance from an intrinsic matrix. - - The intrinsic matrix is a 3x3 matrix that defines the mapping between the 3D world coordinates and - the 2D image. The matrix is defined as: - - .. math:: - I_{cam} = \begin{bmatrix} - f_x & 0 & c_x \\ - 0 & f_y & c_y \\ - 0 & 0 & 1 - \end{bmatrix}, - - where :math:`f_x` and :math:`f_y` are the focal length along x and y direction, while - :math:`c_x` and :math:`c_y` are the principle point offsets along x and y direction, respectively. - - Args: - intrinsic_matrix: Intrinsic matrix of the camera in row-major format. - The matrix is defined as [f_x, 0, c_x, 0, f_y, c_y, 0, 0, 1]. Shape is (9,). - width: Width of the image (in pixels). - height: Height of the image (in pixels). - focal_length: Focal length of the camera (in cm). Defaults to 24.0 cm. - - Returns: - An instance of the :class:`PinholeCameraPatternCfg` class. - """ - # extract parameters from matrix - f_x = intrinsic_matrix[0] - c_x = intrinsic_matrix[2] - f_y = intrinsic_matrix[4] - c_y = intrinsic_matrix[5] - # resolve parameters for usd camera - horizontal_aperture = width * focal_length / f_x - vertical_aperture = height * focal_length / f_y - horizontal_aperture_offset = (c_x - width / 2) / f_x - vertical_aperture_offset = (c_y - height / 2) / f_y - - return cls( - focal_length=focal_length, - horizontal_aperture=horizontal_aperture, - vertical_aperture=vertical_aperture, - horizontal_aperture_offset=horizontal_aperture_offset, - vertical_aperture_offset=vertical_aperture_offset, - width=width, - height=height, - ) - - -@configclass -class BpearlPatternCfg(PatternBaseCfg): - """Configuration for the Bpearl pattern for ray-casting.""" - - func: Callable = patterns.bpearl_pattern - - horizontal_fov: float = 360.0 - """Horizontal field of view (in degrees). Defaults to 360.0.""" - - horizontal_res: float = 10.0 - """Horizontal resolution (in degrees). Defaults to 10.0.""" - - # fmt: off - vertical_ray_angles: Sequence[float] = [ - 89.5, 86.6875, 83.875, 81.0625, 78.25, 75.4375, 72.625, 69.8125, 67.0, 64.1875, 61.375, - 58.5625, 55.75, 52.9375, 50.125, 47.3125, 44.5, 41.6875, 38.875, 36.0625, 33.25, 30.4375, - 27.625, 24.8125, 22, 19.1875, 16.375, 13.5625, 10.75, 7.9375, 5.125, 2.3125 - ] - # fmt: on - """Vertical ray angles (in degrees). Defaults to a list of 32 angles. - - Note: - We manually set the vertical ray angles to match the Bpearl sensor. The ray-angles - are not evenly spaced. - """ - - -@configclass -class LidarPatternCfg(PatternBaseCfg): - """Configuration for the LiDAR pattern for ray-casting.""" - - func: Callable = patterns.lidar_pattern - - channels: int = MISSING - """Number of Channels (Beams). Determines the vertical resolution of the LiDAR sensor.""" - - vertical_fov_range: tuple[float, float] = MISSING - """Vertical field of view range in degrees.""" - - horizontal_fov_range: tuple[float, float] = MISSING - """Horizontal field of view range in degrees.""" - - horizontal_res: float = MISSING - """Horizontal resolution (in degrees).""" diff --git a/source/isaaclab_physx/isaaclab_physx/sensors/ray_caster/ray_cast_utils.py b/source/isaaclab_physx/isaaclab_physx/sensors/ray_caster/ray_cast_utils.py deleted file mode 100644 index 543276e8ea2..00000000000 --- a/source/isaaclab_physx/isaaclab_physx/sensors/ray_caster/ray_cast_utils.py +++ /dev/null @@ -1,51 +0,0 @@ -# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). -# All rights reserved. -# -# SPDX-License-Identifier: BSD-3-Clause - -"""Utility functions for ray-cast sensors.""" - -from __future__ import annotations - -import torch - -import omni.physics.tensors.impl.api as physx - -from isaaclab.sim.views import XformPrimView -from isaaclab.utils.math import convert_quat - - -def obtain_world_pose_from_view( - physx_view: XformPrimView | physx.ArticulationView | physx.RigidBodyView, - env_ids: torch.Tensor, - clone: bool = False, -) -> tuple[torch.Tensor, torch.Tensor]: - """Get the world poses of the prim referenced by the prim view. - - Args: - physx_view: The prim view to get the world poses from. - env_ids: The environment ids of the prims to get the world poses for. - clone: Whether to clone the returned tensors (default: False). - - Returns: - A tuple containing the world positions and orientations of the prims. - Orientation is in (w, x, y, z) format. - - Raises: - NotImplementedError: If the prim view is not of the supported type. - """ - if isinstance(physx_view, XformPrimView): - pos_w, quat_w = physx_view.get_world_poses(env_ids) - elif isinstance(physx_view, physx.ArticulationView): - pos_w, quat_w = physx_view.get_root_transforms()[env_ids].split([3, 4], dim=-1) - quat_w = convert_quat(quat_w, to="wxyz") - elif isinstance(physx_view, physx.RigidBodyView): - pos_w, quat_w = physx_view.get_transforms()[env_ids].split([3, 4], dim=-1) - quat_w = convert_quat(quat_w, to="wxyz") - else: - raise NotImplementedError(f"Cannot get world poses for prim view of type '{type(physx_view)}'.") - - if clone: - return pos_w.clone(), quat_w.clone() - else: - return pos_w, quat_w diff --git a/source/isaaclab_physx/isaaclab_physx/sensors/ray_caster/ray_caster.py b/source/isaaclab_physx/isaaclab_physx/sensors/ray_caster/ray_caster.py deleted file mode 100644 index e6735a9f481..00000000000 --- a/source/isaaclab_physx/isaaclab_physx/sensors/ray_caster/ray_caster.py +++ /dev/null @@ -1,428 +0,0 @@ -# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). -# All rights reserved. -# -# SPDX-License-Identifier: BSD-3-Clause - -from __future__ import annotations - -import logging -import re -from collections.abc import Sequence -from typing import TYPE_CHECKING, ClassVar - -import numpy as np -import torch -import warp as wp - -import omni -from isaacsim.core.simulation_manager import SimulationManager -from pxr import UsdGeom, UsdPhysics - -import isaaclab.sim as sim_utils -import isaaclab.utils.math as math_utils -from isaaclab.markers import VisualizationMarkers -from isaaclab.sim.views import XformPrimView -from isaaclab.terrains.trimesh.utils import make_plane -from isaaclab.utils.math import quat_apply, quat_apply_yaw -from isaaclab.utils.warp import convert_to_warp_mesh, raycast_mesh - -from ..sensor_base import SensorBase -from .ray_cast_utils import obtain_world_pose_from_view -from .ray_caster_data import RayCasterData - -if TYPE_CHECKING: - from .ray_caster_cfg import RayCasterCfg - -# import logger -logger = logging.getLogger(__name__) - - -class RayCaster(SensorBase): - """A ray-casting sensor. - - The ray-caster uses a set of rays to detect collisions with meshes in the scene. The rays are - defined in the sensor's local coordinate frame. The sensor can be configured to ray-cast against - a set of meshes with a given ray pattern. - - The meshes are parsed from the list of primitive paths provided in the configuration. These are then - converted to warp meshes and stored in the `warp_meshes` list. The ray-caster then ray-casts against - these warp meshes using the ray pattern provided in the configuration. - - .. note:: - Currently, only static meshes are supported. Extending the warp mesh to support dynamic meshes - is a work in progress. - """ - - cfg: RayCasterCfg - """The configuration parameters.""" - - # Class variables to share meshes across instances - meshes: ClassVar[dict[str, wp.Mesh]] = {} - """A dictionary to store warp meshes for raycasting, shared across all instances. - - The keys correspond to the prim path for the meshes, and values are the corresponding warp Mesh objects.""" - _instance_count: ClassVar[int] = 0 - """A counter to track the number of RayCaster instances, used to manage class variable lifecycle.""" - - def __init__(self, cfg: RayCasterCfg): - """Initializes the ray-caster object. - - Args: - cfg: The configuration parameters. - """ - RayCaster._instance_count += 1 - # check if sensor path is valid - # note: currently we do not handle environment indices if there is a regex pattern in the leaf - # For example, if the prim path is "/World/Sensor_[1,2]". - sensor_path = cfg.prim_path.split("/")[-1] - sensor_path_is_regex = re.match(r"^[a-zA-Z0-9/_]+$", sensor_path) is None - if sensor_path_is_regex: - raise RuntimeError( - f"Invalid prim path for the ray-caster sensor: {cfg.prim_path}." - "\n\tHint: Please ensure that the prim path does not contain any regex patterns in the leaf." - ) - # Initialize base class - super().__init__(cfg) - # Create empty variables for storing output data - self._data = RayCasterData() - - def __str__(self) -> str: - """Returns: A string containing information about the instance.""" - return ( - f"Ray-caster @ '{self.cfg.prim_path}': \n" - f"\tview type : {self._view.__class__}\n" - f"\tupdate period (s) : {self.cfg.update_period}\n" - f"\tnumber of meshes : {len(RayCaster.meshes)}\n" - f"\tnumber of sensors : {self._view.count}\n" - f"\tnumber of rays/sensor: {self.num_rays}\n" - f"\ttotal number of rays : {self.num_rays * self._view.count}" - ) - - """ - Properties - """ - - @property - def num_instances(self) -> int: - return self._view.count - - @property - def data(self) -> RayCasterData: - # update sensors if needed - self._update_outdated_buffers() - # return the data - return self._data - - """ - Operations. - """ - - def reset(self, env_ids: Sequence[int] | None = None): - # reset the timers and counters - super().reset(env_ids) - # resolve None - if env_ids is None: - env_ids = slice(None) - num_envs_ids = self._view.count - else: - num_envs_ids = len(env_ids) - # resample the drift - r = torch.empty(num_envs_ids, 3, device=self.device) - self.drift[env_ids] = r.uniform_(*self.cfg.drift_range) - # resample the height drift - range_list = [self.cfg.ray_cast_drift_range.get(key, (0.0, 0.0)) for key in ["x", "y", "z"]] - ranges = torch.tensor(range_list, device=self.device) - self.ray_cast_drift[env_ids] = math_utils.sample_uniform( - ranges[:, 0], ranges[:, 1], (num_envs_ids, 3), device=self.device - ) - - """ - Implementation. - """ - - def _initialize_impl(self): - super()._initialize_impl() - # obtain global simulation view - - self._physics_sim_view = SimulationManager.get_physics_sim_view() - prim = sim_utils.find_first_matching_prim(self.cfg.prim_path) - if prim is None: - available_prims = ",".join([str(p.GetPath()) for p in sim_utils.get_current_stage().Traverse()]) - raise RuntimeError( - f"Failed to find a prim at path expression: {self.cfg.prim_path}. Available prims: {available_prims}" - ) - - self._view, self._offset = self._obtain_trackable_prim_view(self.cfg.prim_path) - - # load the meshes by parsing the stage - self._initialize_warp_meshes() - # initialize the ray start and directions - self._initialize_rays_impl() - - def _initialize_warp_meshes(self): - # check number of mesh prims provided - if len(self.cfg.mesh_prim_paths) != 1: - raise NotImplementedError( - f"RayCaster currently only supports one mesh prim. Received: {len(self.cfg.mesh_prim_paths)}" - ) - - # read prims to ray-cast - for mesh_prim_path in self.cfg.mesh_prim_paths: - # check if mesh already casted into warp mesh - if mesh_prim_path in RayCaster.meshes: - continue - - # check if the prim is a plane - handle PhysX plane as a special case - # if a plane exists then we need to create an infinite mesh that is a plane - mesh_prim = sim_utils.get_first_matching_child_prim( - mesh_prim_path, lambda prim: prim.GetTypeName() == "Plane" - ) - # if we did not find a plane then we need to read the mesh - if mesh_prim is None: - # obtain the mesh prim - mesh_prim = sim_utils.get_first_matching_child_prim( - mesh_prim_path, lambda prim: prim.GetTypeName() == "Mesh" - ) - # check if valid - if mesh_prim is None or not mesh_prim.IsValid(): - raise RuntimeError(f"Invalid mesh prim path: {mesh_prim_path}") - # cast into UsdGeomMesh - mesh_prim = UsdGeom.Mesh(mesh_prim) - # read the vertices and faces - points = np.asarray(mesh_prim.GetPointsAttr().Get()) - transform_matrix = np.array(omni.usd.get_world_transform_matrix(mesh_prim)).T - points = np.matmul(points, transform_matrix[:3, :3].T) - points += transform_matrix[:3, 3] - indices = np.asarray(mesh_prim.GetFaceVertexIndicesAttr().Get()) - wp_mesh = convert_to_warp_mesh(points, indices, device=self.device) - # print info - logger.info( - f"Read mesh prim: {mesh_prim.GetPath()} with {len(points)} vertices and {len(indices)} faces." - ) - else: - mesh = make_plane(size=(2e6, 2e6), height=0.0, center_zero=True) - wp_mesh = convert_to_warp_mesh(mesh.vertices, mesh.faces, device=self.device) - # print info - logger.info(f"Created infinite plane mesh prim: {mesh_prim.GetPath()}.") - # add the warp mesh to the list - RayCaster.meshes[mesh_prim_path] = wp_mesh - - # throw an error if no meshes are found - if all([mesh_prim_path not in RayCaster.meshes for mesh_prim_path in self.cfg.mesh_prim_paths]): - raise RuntimeError( - f"No meshes found for ray-casting! Please check the mesh prim paths: {self.cfg.mesh_prim_paths}" - ) - - def _initialize_rays_impl(self): - # compute ray stars and directions - self.ray_starts, self.ray_directions = self.cfg.pattern_cfg.func(self.cfg.pattern_cfg, self._device) - self.num_rays = len(self.ray_directions) - # apply offset transformation to the rays - offset_pos = torch.tensor(list(self.cfg.offset.pos), device=self._device) - offset_quat = torch.tensor(list(self.cfg.offset.rot), device=self._device) - self.ray_directions = quat_apply(offset_quat.repeat(len(self.ray_directions), 1), self.ray_directions) - self.ray_starts += offset_pos - # repeat the rays for each sensor - self.ray_starts = self.ray_starts.repeat(self._view.count, 1, 1) - self.ray_directions = self.ray_directions.repeat(self._view.count, 1, 1) - # prepare drift - self.drift = torch.zeros(self._view.count, 3, device=self.device) - self.ray_cast_drift = torch.zeros(self._view.count, 3, device=self.device) - # fill the data buffer - self._data.pos_w = torch.zeros(self._view.count, 3, device=self.device) - self._data.quat_w = torch.zeros(self._view.count, 4, device=self.device) - self._data.ray_hits_w = torch.zeros(self._view.count, self.num_rays, 3, device=self.device) - self._ray_starts_w = torch.zeros(self._view.count, self.num_rays, 3, device=self.device) - self._ray_directions_w = torch.zeros(self._view.count, self.num_rays, 3, device=self.device) - - def _update_ray_infos(self, env_ids: Sequence[int]): - """Updates the ray information buffers.""" - - pos_w, quat_w = obtain_world_pose_from_view(self._view, env_ids) - pos_w, quat_w = math_utils.combine_frame_transforms( - pos_w, quat_w, self._offset[0][env_ids], self._offset[1][env_ids] - ) - # apply drift to ray starting position in world frame - pos_w += self.drift[env_ids] - # store the poses - self._data.pos_w[env_ids] = pos_w - self._data.quat_w[env_ids] = quat_w - - # check if user provided attach_yaw_only flag - if self.cfg.attach_yaw_only is not None: - msg = ( - "Raycaster attribute 'attach_yaw_only' property will be deprecated in a future release." - " Please use the parameter 'ray_alignment' instead." - ) - # set ray alignment to yaw - if self.cfg.attach_yaw_only: - self.cfg.ray_alignment = "yaw" - msg += " Setting ray_alignment to 'yaw'." - else: - self.cfg.ray_alignment = "base" - msg += " Setting ray_alignment to 'base'." - # log the warning - logger.warning(msg) - # ray cast based on the sensor poses - if self.cfg.ray_alignment == "world": - # apply horizontal drift to ray starting position in ray caster frame - pos_w[:, 0:2] += self.ray_cast_drift[env_ids, 0:2] - # no rotation is considered and directions are not rotated - ray_starts_w = self.ray_starts[env_ids] - ray_starts_w += pos_w.unsqueeze(1) - ray_directions_w = self.ray_directions[env_ids] - elif self.cfg.ray_alignment == "yaw": - # apply horizontal drift to ray starting position in ray caster frame - pos_w[:, 0:2] += quat_apply_yaw(quat_w, self.ray_cast_drift[env_ids])[:, 0:2] - # only yaw orientation is considered and directions are not rotated - ray_starts_w = quat_apply_yaw(quat_w.repeat(1, self.num_rays), self.ray_starts[env_ids]) - ray_starts_w += pos_w.unsqueeze(1) - ray_directions_w = self.ray_directions[env_ids] - elif self.cfg.ray_alignment == "base": - # apply horizontal drift to ray starting position in ray caster frame - pos_w[:, 0:2] += quat_apply(quat_w, self.ray_cast_drift[env_ids])[:, 0:2] - # full orientation is considered - ray_starts_w = quat_apply(quat_w.repeat(1, self.num_rays), self.ray_starts[env_ids]) - ray_starts_w += pos_w.unsqueeze(1) - ray_directions_w = quat_apply(quat_w.repeat(1, self.num_rays), self.ray_directions[env_ids]) - else: - raise RuntimeError(f"Unsupported ray_alignment type: {self.cfg.ray_alignment}.") - - self._ray_starts_w[env_ids] = ray_starts_w - self._ray_directions_w[env_ids] = ray_directions_w - - def _update_buffers_impl(self, env_ids: Sequence[int]): - """Fills the buffers of the sensor data.""" - self._update_ray_infos(env_ids) - - # ray cast and store the hits - # TODO: Make this work for multiple meshes? - self._data.ray_hits_w[env_ids] = raycast_mesh( - self._ray_starts_w[env_ids], - self._ray_directions_w[env_ids], - max_dist=self.cfg.max_distance, - mesh=RayCaster.meshes[self.cfg.mesh_prim_paths[0]], - )[0] - - # apply vertical drift to ray starting position in ray caster frame - self._data.ray_hits_w[env_ids, :, 2] += self.ray_cast_drift[env_ids, 2].unsqueeze(-1) - - def _set_debug_vis_impl(self, debug_vis: bool): - # set visibility of markers - # note: parent only deals with callbacks. not their visibility - if debug_vis: - if not hasattr(self, "ray_visualizer"): - self.ray_visualizer = VisualizationMarkers(self.cfg.visualizer_cfg) - # set their visibility to true - self.ray_visualizer.set_visibility(True) - else: - if hasattr(self, "ray_visualizer"): - self.ray_visualizer.set_visibility(False) - - def _debug_vis_callback(self, event): - if self._data.ray_hits_w is None: - return - # remove possible inf values - viz_points = self._data.ray_hits_w.reshape(-1, 3) - viz_points = viz_points[~torch.any(torch.isinf(viz_points), dim=1)] - - self.ray_visualizer.visualize(viz_points) - - """ - Internal Helpers. - """ - - def _obtain_trackable_prim_view( - self, target_prim_path: str - ) -> tuple[XformPrimView | any, tuple[torch.Tensor, torch.Tensor]]: - """Obtain a prim view that can be used to track the pose of the parget prim. - - The target prim path is a regex expression that matches one or more mesh prims. While we can track its - pose directly using XFormPrim, this is not efficient and can be slow. Instead, we create a prim view - using the physics simulation view, which provides a more efficient way to track the pose of the mesh prims. - - The function additionally resolves the relative pose between the mesh and its corresponding physics prim. - This is especially useful if the mesh is not directly parented to the physics prim. - - Args: - target_prim_path: The target prim path to obtain the prim view for. - - Returns: - A tuple containing: - - - An XFormPrim or a physics prim view (ArticulationView or RigidBodyView). - - A tuple containing the positions and orientations of the mesh prims in the physics prim frame. - - """ - - mesh_prim = sim_utils.find_first_matching_prim(target_prim_path) - current_prim = mesh_prim - current_path_expr = target_prim_path - - prim_view = None - - while prim_view is None: - # TODO: Need to handle the case where API is present but it is disabled - if current_prim.HasAPI(UsdPhysics.ArticulationRootAPI): - prim_view = self._physics_sim_view.create_articulation_view(current_path_expr.replace(".*", "*")) - logger.info(f"Created articulation view for mesh prim at path: {target_prim_path}") - break - - # TODO: Need to handle the case where API is present but it is disabled - if current_prim.HasAPI(UsdPhysics.RigidBodyAPI): - prim_view = self._physics_sim_view.create_rigid_body_view(current_path_expr.replace(".*", "*")) - logger.info(f"Created rigid body view for mesh prim at path: {target_prim_path}") - break - - new_root_prim = current_prim.GetParent() - current_path_expr = current_path_expr.rsplit("/", 1)[0] - if not new_root_prim.IsValid(): - prim_view = XformPrimView(target_prim_path, device=self._device, stage=self.stage) - current_path_expr = target_prim_path - logger.warning( - f"The prim at path {target_prim_path} which is used for raycasting is not a physics prim." - " Defaulting to XFormPrim. \n The pose of the mesh will most likely not" - " be updated correctly when running in headless mode and position lookups will be much slower. \n" - " If possible, ensure that the mesh or its parent is a physics prim (rigid body or articulation)." - ) - break - - # switch the current prim to the parent prim - current_prim = new_root_prim - - # obtain the relative transforms between target prim and the view prims - mesh_prims = sim_utils.find_matching_prims(target_prim_path) - view_prims = sim_utils.find_matching_prims(current_path_expr) - if len(mesh_prims) != len(view_prims): - raise RuntimeError( - f"The number of mesh prims ({len(mesh_prims)}) does not match the number of physics prims" - f" ({len(view_prims)})Please specify the correct mesh and physics prim paths more" - " specifically in your target expressions." - ) - positions = [] - quaternions = [] - for mesh_prim, view_prim in zip(mesh_prims, view_prims): - pos, orientation = sim_utils.resolve_prim_pose(mesh_prim, view_prim) - positions.append(torch.tensor(pos, dtype=torch.float32, device=self.device)) - quaternions.append(torch.tensor(orientation, dtype=torch.float32, device=self.device)) - - positions = torch.stack(positions).to(device=self.device, dtype=torch.float32) - quaternions = torch.stack(quaternions).to(device=self.device, dtype=torch.float32) - - return prim_view, (positions, quaternions) - - """ - Internal simulation callbacks. - """ - - def _invalidate_initialize_callback(self, event): - """Invalidates the scene elements.""" - # call parent - super()._invalidate_initialize_callback(event) - # set all existing views to None to invalidate them - self._view = None - - def __del__(self): - RayCaster._instance_count -= 1 - if RayCaster._instance_count == 0: - RayCaster.meshes.clear() diff --git a/source/isaaclab_physx/isaaclab_physx/sensors/ray_caster/ray_caster_camera.py b/source/isaaclab_physx/isaaclab_physx/sensors/ray_caster/ray_caster_camera.py deleted file mode 100644 index e930d3df183..00000000000 --- a/source/isaaclab_physx/isaaclab_physx/sensors/ray_caster/ray_caster_camera.py +++ /dev/null @@ -1,456 +0,0 @@ -# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). -# All rights reserved. -# -# SPDX-License-Identifier: BSD-3-Clause - -from __future__ import annotations - -import logging -from collections.abc import Sequence -from typing import TYPE_CHECKING, ClassVar, Literal - -import torch - -from pxr import UsdGeom - -import isaaclab.utils.math as math_utils -from isaaclab.sensors.camera import CameraData -from isaaclab.utils.warp import raycast_mesh - -from .ray_cast_utils import obtain_world_pose_from_view -from .ray_caster import RayCaster - -if TYPE_CHECKING: - from .ray_caster_camera_cfg import RayCasterCameraCfg - -# import logger -logger = logging.getLogger(__name__) - - -class RayCasterCamera(RayCaster): - """A ray-casting camera sensor. - - The ray-caster camera uses a set of rays to get the distances to meshes in the scene. The rays are - defined in the sensor's local coordinate frame. The sensor has the same interface as the - :class:`isaaclab.sensors.Camera` that implements the camera class through USD camera prims. - However, this class provides a faster image generation. The sensor converts meshes from the list of - primitive paths provided in the configuration to Warp meshes. The camera then ray-casts against these - Warp meshes only. - - Currently, only the following annotators are supported: - - - ``"distance_to_camera"``: An image containing the distance to camera optical center. - - ``"distance_to_image_plane"``: An image containing distances of 3D points from camera plane along camera's z-axis. - - ``"normals"``: An image containing the local surface normal vectors at each pixel. - - .. note:: - Currently, only static meshes are supported. Extending the warp mesh to support dynamic meshes - is a work in progress. - """ - - cfg: RayCasterCameraCfg - """The configuration parameters.""" - UNSUPPORTED_TYPES: ClassVar[set[str]] = { - "rgb", - "instance_id_segmentation", - "instance_id_segmentation_fast", - "instance_segmentation", - "instance_segmentation_fast", - "semantic_segmentation", - "skeleton_data", - "motion_vectors", - "bounding_box_2d_tight", - "bounding_box_2d_tight_fast", - "bounding_box_2d_loose", - "bounding_box_2d_loose_fast", - "bounding_box_3d", - "bounding_box_3d_fast", - } - """A set of sensor types that are not supported by the ray-caster camera.""" - - def __init__(self, cfg: RayCasterCameraCfg): - """Initializes the camera object. - - Args: - cfg: The configuration parameters. - - Raises: - ValueError: If the provided data types are not supported by the ray-caster camera. - """ - # perform check on supported data types - self._check_supported_data_types(cfg) - # initialize base class - super().__init__(cfg) - # create empty variables for storing output data - self._data = CameraData() - - def __str__(self) -> str: - """Returns: A string containing information about the instance.""" - return ( - f"Ray-Caster-Camera @ '{self.cfg.prim_path}': \n" - f"\tview type : {self._view.__class__}\n" - f"\tupdate period (s) : {self.cfg.update_period}\n" - f"\tnumber of meshes : {len(RayCaster.meshes)}\n" - f"\tnumber of sensors : {self._view.count}\n" - f"\tnumber of rays/sensor: {self.num_rays}\n" - f"\ttotal number of rays : {self.num_rays * self._view.count}\n" - f"\timage shape : {self.image_shape}" - ) - - """ - Properties - """ - - @property - def data(self) -> CameraData: - # update sensors if needed - self._update_outdated_buffers() - # return the data - return self._data - - @property - def image_shape(self) -> tuple[int, int]: - """A tuple containing (height, width) of the camera sensor.""" - return (self.cfg.pattern_cfg.height, self.cfg.pattern_cfg.width) - - @property - def frame(self) -> torch.tensor: - """Frame number when the measurement took place.""" - return self._frame - - """ - Operations. - """ - - def set_intrinsic_matrices( - self, matrices: torch.Tensor, focal_length: float = 1.0, env_ids: Sequence[int] | None = None - ): - """Set the intrinsic matrix of the camera. - - Args: - matrices: The intrinsic matrices for the camera. Shape is (N, 3, 3). - focal_length: Focal length to use when computing aperture values (in cm). Defaults to 1.0. - env_ids: A sensor ids to manipulate. Defaults to None, which means all sensor indices. - """ - # resolve env_ids - if env_ids is None: - env_ids = slice(None) - # save new intrinsic matrices and focal length - self._data.intrinsic_matrices[env_ids] = matrices.to(self._device) - self._focal_length = focal_length - # recompute ray directions - self.ray_starts[env_ids], self.ray_directions[env_ids] = self.cfg.pattern_cfg.func( - self.cfg.pattern_cfg, self._data.intrinsic_matrices[env_ids], self._device - ) - - def reset(self, env_ids: Sequence[int] | None = None): - # reset the timestamps - super().reset(env_ids) - # resolve None - if env_ids is None or isinstance(env_ids, slice): - env_ids = self._ALL_INDICES - # reset the data - # note: this recomputation is useful if one performs events such as randomizations on the camera poses. - pos_w, quat_w = obtain_world_pose_from_view(self._view, env_ids, clone=True) - pos_w, quat_w = math_utils.combine_frame_transforms( - pos_w, quat_w, self._offset_pos[env_ids], self._offset_quat[env_ids] - ) - self._data.pos_w[env_ids] = pos_w - self._data.quat_w_world[env_ids] = quat_w - # Reset the frame count - self._frame[env_ids] = 0 - - def set_world_poses( - self, - positions: torch.Tensor | None = None, - orientations: torch.Tensor | None = None, - env_ids: Sequence[int] | None = None, - convention: Literal["opengl", "ros", "world"] = "ros", - ): - """Set the pose of the camera w.r.t. the world frame using specified convention. - - Since different fields use different conventions for camera orientations, the method allows users to - set the camera poses in the specified convention. Possible conventions are: - - - :obj:`"opengl"` - forward axis: -Z - up axis +Y - Offset is applied in the OpenGL (Usd.Camera) convention - - :obj:`"ros"` - forward axis: +Z - up axis -Y - Offset is applied in the ROS convention - - :obj:`"world"` - forward axis: +X - up axis +Z - Offset is applied in the World Frame convention - - See :meth:`isaaclab.utils.maths.convert_camera_frame_orientation_convention` for more details - on the conventions. - - Args: - positions: The cartesian coordinates (in meters). Shape is (N, 3). - Defaults to None, in which case the camera position in not changed. - orientations: The quaternion orientation in (w, x, y, z). Shape is (N, 4). - Defaults to None, in which case the camera orientation in not changed. - env_ids: A sensor ids to manipulate. Defaults to None, which means all sensor indices. - convention: The convention in which the poses are fed. Defaults to "ros". - - Raises: - RuntimeError: If the camera prim is not set. Need to call :meth:`initialize` method first. - """ - # resolve env_ids - if env_ids is None or isinstance(env_ids, slice): - env_ids = self._ALL_INDICES - - # get current positions - pos_w, quat_w = obtain_world_pose_from_view(self._view, env_ids) - if positions is not None: - # transform to camera frame - pos_offset_world_frame = positions - pos_w - self._offset_pos[env_ids] = math_utils.quat_apply(math_utils.quat_inv(quat_w), pos_offset_world_frame) - if orientations is not None: - # convert rotation matrix from input convention to world - quat_w_set = math_utils.convert_camera_frame_orientation_convention( - orientations, origin=convention, target="world" - ) - self._offset_quat[env_ids] = math_utils.quat_mul(math_utils.quat_inv(quat_w), quat_w_set) - - # update the data - pos_w, quat_w = obtain_world_pose_from_view(self._view, env_ids, clone=True) - pos_w, quat_w = math_utils.combine_frame_transforms( - pos_w, quat_w, self._offset_pos[env_ids], self._offset_quat[env_ids] - ) - self._data.pos_w[env_ids] = pos_w - self._data.quat_w_world[env_ids] = quat_w - - def set_world_poses_from_view( - self, eyes: torch.Tensor, targets: torch.Tensor, env_ids: Sequence[int] | None = None - ): - """Set the poses of the camera from the eye position and look-at target position. - - Args: - eyes: The positions of the camera's eye. Shape is N, 3). - targets: The target locations to look at. Shape is (N, 3). - env_ids: A sensor ids to manipulate. Defaults to None, which means all sensor indices. - - Raises: - RuntimeError: If the camera prim is not set. Need to call :meth:`initialize` method first. - NotImplementedError: If the stage up-axis is not "Y" or "Z". - """ - # get up axis of current stage - up_axis = UsdGeom.GetStageUpAxis(self.stage) - # camera position and rotation in opengl convention - orientations = math_utils.quat_from_matrix( - math_utils.create_rotation_matrix_from_view(eyes, targets, up_axis=up_axis, device=self._device) - ) - self.set_world_poses(eyes, orientations, env_ids, convention="opengl") - - """ - Implementation. - """ - - def _initialize_rays_impl(self): - # Create all indices buffer - self._ALL_INDICES = torch.arange(self._view.count, device=self._device, dtype=torch.long) - # Create frame count buffer - self._frame = torch.zeros(self._view.count, device=self._device, dtype=torch.long) - # create buffers - self._create_buffers() - # compute intrinsic matrices - self._compute_intrinsic_matrices() - # compute ray stars and directions - self.ray_starts, self.ray_directions = self.cfg.pattern_cfg.func( - self.cfg.pattern_cfg, self._data.intrinsic_matrices, self._device - ) - self.num_rays = self.ray_directions.shape[1] - # create buffer to store ray hits - self.ray_hits_w = torch.zeros(self._view.count, self.num_rays, 3, device=self._device) - # set offsets - quat_w = math_utils.convert_camera_frame_orientation_convention( - torch.tensor([self.cfg.offset.rot], device=self._device), origin=self.cfg.offset.convention, target="world" - ) - self._offset_quat = quat_w.repeat(self._view.count, 1) - self._offset_pos = torch.tensor(list(self.cfg.offset.pos), device=self._device).repeat(self._view.count, 1) - - def _update_buffers_impl(self, env_ids: Sequence[int]): - """Fills the buffers of the sensor data.""" - # increment frame count - self._frame[env_ids] += 1 - - # compute poses from current view - pos_w, quat_w = obtain_world_pose_from_view(self._view, env_ids, clone=True) - pos_w, quat_w = math_utils.combine_frame_transforms( - pos_w, quat_w, self._offset_pos[env_ids], self._offset_quat[env_ids] - ) - # update the data - self._data.pos_w[env_ids] = pos_w - self._data.quat_w_world[env_ids] = quat_w - - # note: full orientation is considered - ray_starts_w = math_utils.quat_apply(quat_w.repeat(1, self.num_rays), self.ray_starts[env_ids]) - ray_starts_w += pos_w.unsqueeze(1) - ray_directions_w = math_utils.quat_apply(quat_w.repeat(1, self.num_rays), self.ray_directions[env_ids]) - - # ray cast and store the hits - # note: we set max distance to 1e6 during the ray-casting. THis is because we clip the distance - # to the image plane and distance to the camera to the maximum distance afterwards in-order to - # match the USD camera behavior. - - # TODO: Make ray-casting work for multiple meshes? - # necessary for regular dictionaries. - self.ray_hits_w, ray_depth, ray_normal, _ = raycast_mesh( - ray_starts_w, - ray_directions_w, - mesh=RayCaster.meshes[self.cfg.mesh_prim_paths[0]], - max_dist=1e6, - return_distance=any( - [name in self.cfg.data_types for name in ["distance_to_image_plane", "distance_to_camera"]] - ), - return_normal="normals" in self.cfg.data_types, - ) - # update output buffers - if "distance_to_image_plane" in self.cfg.data_types: - # note: data is in camera frame so we only take the first component (z-axis of camera frame) - distance_to_image_plane = ( - math_utils.quat_apply( - math_utils.quat_inv(quat_w).repeat(1, self.num_rays), - (ray_depth[:, :, None] * ray_directions_w), - ) - )[:, :, 0] - # apply the maximum distance after the transformation - if self.cfg.depth_clipping_behavior == "max": - distance_to_image_plane = torch.clip(distance_to_image_plane, max=self.cfg.max_distance) - distance_to_image_plane[torch.isnan(distance_to_image_plane)] = self.cfg.max_distance - elif self.cfg.depth_clipping_behavior == "zero": - distance_to_image_plane[distance_to_image_plane > self.cfg.max_distance] = 0.0 - distance_to_image_plane[torch.isnan(distance_to_image_plane)] = 0.0 - self._data.output["distance_to_image_plane"][env_ids] = distance_to_image_plane.view( - -1, *self.image_shape, 1 - ) - - if "distance_to_camera" in self.cfg.data_types: - if self.cfg.depth_clipping_behavior == "max": - ray_depth = torch.clip(ray_depth, max=self.cfg.max_distance) - elif self.cfg.depth_clipping_behavior == "zero": - ray_depth[ray_depth > self.cfg.max_distance] = 0.0 - self._data.output["distance_to_camera"][env_ids] = ray_depth.view(-1, *self.image_shape, 1) - - if "normals" in self.cfg.data_types: - self._data.output["normals"][env_ids] = ray_normal.view(-1, *self.image_shape, 3) - - def _debug_vis_callback(self, event): - # in case it crashes be safe - if not hasattr(self, "ray_hits_w"): - return - # show ray hit positions - self.ray_visualizer.visualize(self.ray_hits_w.view(-1, 3)) - - """ - Private Helpers - """ - - def _check_supported_data_types(self, cfg: RayCasterCameraCfg): - """Checks if the data types are supported by the ray-caster camera.""" - # check if there is any intersection in unsupported types - # reason: we cannot obtain this data from simplified warp-based ray caster - common_elements = set(cfg.data_types) & RayCasterCamera.UNSUPPORTED_TYPES - if common_elements: - raise ValueError( - f"RayCasterCamera class does not support the following sensor types: {common_elements}." - "\n\tThis is because these sensor types cannot be obtained in a fast way using ''warp''." - "\n\tHint: If you need to work with these sensor types, we recommend using the USD camera" - " interface from the isaaclab.sensors.camera module." - ) - - def _create_buffers(self): - """Create buffers for storing data.""" - # prepare drift - self.drift = torch.zeros(self._view.count, 3, device=self.device) - self.ray_cast_drift = torch.zeros(self._view.count, 3, device=self.device) - # create the data object - # -- pose of the cameras - self._data.pos_w = torch.zeros((self._view.count, 3), device=self._device) - self._data.quat_w_world = torch.zeros((self._view.count, 4), device=self._device) - # -- intrinsic matrix - self._data.intrinsic_matrices = torch.zeros((self._view.count, 3, 3), device=self._device) - self._data.intrinsic_matrices[:, 2, 2] = 1.0 - self._data.image_shape = self.image_shape - # -- output data - # create the buffers to store the annotator data. - self._data.output = {} - self._data.info = [{name: None for name in self.cfg.data_types}] * self._view.count - for name in self.cfg.data_types: - if name in ["distance_to_image_plane", "distance_to_camera"]: - shape = (self.cfg.pattern_cfg.height, self.cfg.pattern_cfg.width, 1) - elif name in ["normals"]: - shape = (self.cfg.pattern_cfg.height, self.cfg.pattern_cfg.width, 3) - else: - raise ValueError(f"Received unknown data type: {name}. Please check the configuration.") - # allocate tensor to store the data - self._data.output[name] = torch.zeros((self._view.count, *shape), device=self._device) - - def _compute_intrinsic_matrices(self): - """Computes the intrinsic matrices for the camera based on the config provided.""" - # get the sensor properties - pattern_cfg = self.cfg.pattern_cfg - - # check if vertical aperture is provided - # if not then it is auto-computed based on the aspect ratio to preserve squared pixels - if pattern_cfg.vertical_aperture is None: - pattern_cfg.vertical_aperture = pattern_cfg.horizontal_aperture * pattern_cfg.height / pattern_cfg.width - - # compute the intrinsic matrix - f_x = pattern_cfg.width * pattern_cfg.focal_length / pattern_cfg.horizontal_aperture - f_y = pattern_cfg.height * pattern_cfg.focal_length / pattern_cfg.vertical_aperture - c_x = pattern_cfg.horizontal_aperture_offset * f_x + pattern_cfg.width / 2 - c_y = pattern_cfg.vertical_aperture_offset * f_y + pattern_cfg.height / 2 - # allocate the intrinsic matrices - self._data.intrinsic_matrices[:, 0, 0] = f_x - self._data.intrinsic_matrices[:, 0, 2] = c_x - self._data.intrinsic_matrices[:, 1, 1] = f_y - self._data.intrinsic_matrices[:, 1, 2] = c_y - - # save focal length - self._focal_length = pattern_cfg.focal_length - - def _compute_view_world_poses(self, env_ids: Sequence[int]) -> tuple[torch.Tensor, torch.Tensor]: - """Obtains the pose of the view the camera is attached to in the world frame. - - .. deprecated v2.3.1: - This function will be removed in a future release in favor of implementation - :meth:`obtain_world_pose_from_view`. - - Returns: - A tuple of the position (in meters) and quaternion (w, x, y, z). - - - """ - # deprecation - logger.warning( - "The function '_compute_view_world_poses' will be deprecated in favor of the util method" - " 'obtain_world_pose_from_view'. Please use 'obtain_world_pose_from_view' instead...." - ) - - return obtain_world_pose_from_view(self._view, env_ids, clone=True) - - def _compute_camera_world_poses(self, env_ids: Sequence[int]) -> tuple[torch.Tensor, torch.Tensor]: - """Computes the pose of the camera in the world frame. - - This function applies the offset pose to the pose of the view the camera is attached to. - - .. deprecated v2.3.1: - This function will be removed in a future release. Instead, use the code block below: - - .. code-block:: python - - pos_w, quat_w = obtain_world_pose_from_view(self._view, env_ids, clone=True) - pos_w, quat_w = math_utils.combine_frame_transforms( - pos_w, quat_w, self._offset_pos[env_ids], self._offset_quat[env_ids] - ) - - Returns: - A tuple of the position (in meters) and quaternion (w, x, y, z) in "world" convention. - """ - - # deprecation - logger.warning( - "The function '_compute_camera_world_poses' will be deprecated in favor of the combination of methods" - " 'obtain_world_pose_from_view' and 'math_utils.combine_frame_transforms'. Please use" - " 'obtain_world_pose_from_view' and 'math_utils.combine_frame_transforms' instead...." - ) - - # get the pose of the view the camera is attached to - pos_w, quat_w = obtain_world_pose_from_view(self._view, env_ids, clone=True) - return math_utils.combine_frame_transforms(pos_w, quat_w, self._offset_pos[env_ids], self._offset_quat[env_ids]) diff --git a/source/isaaclab_physx/isaaclab_physx/sensors/ray_caster/ray_caster_camera_cfg.py b/source/isaaclab_physx/isaaclab_physx/sensors/ray_caster/ray_caster_camera_cfg.py deleted file mode 100644 index 604c586adcc..00000000000 --- a/source/isaaclab_physx/isaaclab_physx/sensors/ray_caster/ray_caster_camera_cfg.py +++ /dev/null @@ -1,64 +0,0 @@ -# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). -# All rights reserved. -# -# SPDX-License-Identifier: BSD-3-Clause - -"""Configuration for the ray-cast camera sensor.""" - -from dataclasses import MISSING -from typing import Literal - -from isaaclab.utils import configclass - -from .patterns import PinholeCameraPatternCfg -from .ray_caster_camera import RayCasterCamera -from .ray_caster_cfg import RayCasterCfg - - -@configclass -class RayCasterCameraCfg(RayCasterCfg): - """Configuration for the ray-cast sensor.""" - - @configclass - class OffsetCfg: - """The offset pose of the sensor's frame from the sensor's parent frame.""" - - pos: tuple[float, float, float] = (0.0, 0.0, 0.0) - """Translation w.r.t. the parent frame. Defaults to (0.0, 0.0, 0.0).""" - - rot: tuple[float, float, float, float] = (1.0, 0.0, 0.0, 0.0) - """Quaternion rotation (w, x, y, z) w.r.t. the parent frame. Defaults to (1.0, 0.0, 0.0, 0.0).""" - - convention: Literal["opengl", "ros", "world"] = "ros" - """The convention in which the frame offset is applied. Defaults to "ros". - - - ``"opengl"`` - forward axis: ``-Z`` - up axis: ``+Y`` - Offset is applied in the OpenGL (Usd.Camera) - convention. - - ``"ros"`` - forward axis: ``+Z`` - up axis: ``-Y`` - Offset is applied in the ROS convention. - - ``"world"`` - forward axis: ``+X`` - up axis: ``+Z`` - Offset is applied in the World Frame convention. - - """ - - class_type: type = RayCasterCamera - - offset: OffsetCfg = OffsetCfg() - """The offset pose of the sensor's frame from the sensor's parent frame. Defaults to identity.""" - - data_types: list[str] = ["distance_to_image_plane"] - """List of sensor names/types to enable for the camera. Defaults to ["distance_to_image_plane"].""" - - depth_clipping_behavior: Literal["max", "zero", "none"] = "none" - """Clipping behavior for the camera for values exceed the maximum value. Defaults to "none". - - - ``"max"``: Values are clipped to the maximum value. - - ``"zero"``: Values are clipped to zero. - - ``"none``: No clipping is applied. Values will be returned as ``inf`` for ``distance_to_camera`` and ``nan`` - for ``distance_to_image_plane`` data type. - """ - - pattern_cfg: PinholeCameraPatternCfg = MISSING - """The pattern that defines the local ray starting positions and directions in a pinhole camera pattern.""" - - def __post_init__(self): - # for cameras, this quantity should be False always. - self.ray_alignment = "base" diff --git a/source/isaaclab_physx/isaaclab_physx/sensors/ray_caster/ray_caster_cfg.py b/source/isaaclab_physx/isaaclab_physx/sensors/ray_caster/ray_caster_cfg.py deleted file mode 100644 index dbdebfad3a5..00000000000 --- a/source/isaaclab_physx/isaaclab_physx/sensors/ray_caster/ray_caster_cfg.py +++ /dev/null @@ -1,98 +0,0 @@ -# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). -# All rights reserved. -# -# SPDX-License-Identifier: BSD-3-Clause - -"""Configuration for the ray-cast sensor.""" - -from dataclasses import MISSING -from typing import Literal - -from isaaclab.markers import VisualizationMarkersCfg -from isaaclab.markers.config import RAY_CASTER_MARKER_CFG -from isaaclab.utils import configclass - -from ..sensor_base_cfg import SensorBaseCfg -from .patterns.patterns_cfg import PatternBaseCfg -from .ray_caster import RayCaster - - -@configclass -class RayCasterCfg(SensorBaseCfg): - """Configuration for the ray-cast sensor.""" - - @configclass - class OffsetCfg: - """The offset pose of the sensor's frame from the sensor's parent frame.""" - - pos: tuple[float, float, float] = (0.0, 0.0, 0.0) - """Translation w.r.t. the parent frame. Defaults to (0.0, 0.0, 0.0).""" - rot: tuple[float, float, float, float] = (1.0, 0.0, 0.0, 0.0) - """Quaternion rotation (w, x, y, z) w.r.t. the parent frame. Defaults to (1.0, 0.0, 0.0, 0.0).""" - - class_type: type = RayCaster - - mesh_prim_paths: list[str] = MISSING - """The list of mesh primitive paths to ray cast against. - - Note: - Currently, only a single static mesh is supported. We are working on supporting multiple - static meshes and dynamic meshes. - """ - - offset: OffsetCfg = OffsetCfg() - """The offset pose of the sensor's frame from the sensor's parent frame. Defaults to identity.""" - - attach_yaw_only: bool | None = None - """Whether the rays' starting positions and directions only track the yaw orientation. - Defaults to None, which doesn't raise a warning of deprecated usage. - - This is useful for ray-casting height maps, where only yaw rotation is needed. - - .. deprecated:: 2.1.1 - - This attribute is deprecated and will be removed in the future. Please use - :attr:`ray_alignment` instead. - - To get the same behavior as setting this parameter to ``True`` or ``False``, set - :attr:`ray_alignment` to ``"yaw"`` or "base" respectively. - - """ - - ray_alignment: Literal["base", "yaw", "world"] = "base" - """Specify in what frame the rays are projected onto the ground. Default is "base". - - The options are: - - * ``base`` if the rays' starting positions and directions track the full root position and orientation. - * ``yaw`` if the rays' starting positions and directions track root position and only yaw component of - the orientation. This is useful for ray-casting height maps. - * ``world`` if rays' starting positions and directions are always fixed. This is useful in combination - with a mapping package on the robot and querying ray-casts in a global frame. - """ - - pattern_cfg: PatternBaseCfg = MISSING - """The pattern that defines the local ray starting positions and directions.""" - - max_distance: float = 1e6 - """Maximum distance (in meters) from the sensor to ray cast to. Defaults to 1e6.""" - - drift_range: tuple[float, float] = (0.0, 0.0) - """The range of drift (in meters) to add to the ray starting positions (xyz) in world frame. Defaults to (0.0, 0.0). - - For floating base robots, this is useful for simulating drift in the robot's pose estimation. - """ - - ray_cast_drift_range: dict[str, tuple[float, float]] = {"x": (0.0, 0.0), "y": (0.0, 0.0), "z": (0.0, 0.0)} - """The range of drift (in meters) to add to the projected ray points in local projection frame. Defaults to - a dictionary with zero drift for each x, y and z axis. - - For floating base robots, this is useful for simulating drift in the robot's pose estimation. - """ - - visualizer_cfg: VisualizationMarkersCfg = RAY_CASTER_MARKER_CFG.replace(prim_path="/Visuals/RayCaster") - """The configuration object for the visualization markers. Defaults to RAY_CASTER_MARKER_CFG. - - Note: - This attribute is only used when debug visualization is enabled. - """ diff --git a/source/isaaclab_physx/isaaclab_physx/sensors/ray_caster/ray_caster_data.py b/source/isaaclab_physx/isaaclab_physx/sensors/ray_caster/ray_caster_data.py deleted file mode 100644 index d63e085e752..00000000000 --- a/source/isaaclab_physx/isaaclab_physx/sensors/ray_caster/ray_caster_data.py +++ /dev/null @@ -1,30 +0,0 @@ -# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). -# All rights reserved. -# -# SPDX-License-Identifier: BSD-3-Clause - -from dataclasses import dataclass - -import torch - - -@dataclass -class RayCasterData: - """Data container for the ray-cast sensor.""" - - pos_w: torch.Tensor = None - """Position of the sensor origin in world frame. - - Shape is (N, 3), where N is the number of sensors. - """ - quat_w: torch.Tensor = None - """Orientation of the sensor origin in quaternion (w, x, y, z) in world frame. - - Shape is (N, 4), where N is the number of sensors. - """ - ray_hits_w: torch.Tensor = None - """The ray hit positions in the world frame. - - Shape is (N, B, 3), where N is the number of sensors, B is the number of rays - in the scan pattern per sensor. - """ diff --git a/source/isaaclab_physx/isaaclab_physx/sensors/sensor_base.py b/source/isaaclab_physx/isaaclab_physx/sensors/sensor_base.py deleted file mode 100644 index 4ece160bbe5..00000000000 --- a/source/isaaclab_physx/isaaclab_physx/sensors/sensor_base.py +++ /dev/null @@ -1,363 +0,0 @@ -# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). -# All rights reserved. -# -# SPDX-License-Identifier: BSD-3-Clause - -"""Base class for sensors. - -This class defines an interface for sensors similar to how the :class:`isaaclab.assets.AssetBase` class works. -Each sensor class should inherit from this class and implement the abstract methods. -""" - -from __future__ import annotations - -import builtins -import inspect -import re -import weakref -from abc import ABC, abstractmethod -from collections.abc import Sequence -from typing import TYPE_CHECKING, Any - -import torch - -import omni.kit.app -import omni.timeline -from isaacsim.core.simulation_manager import IsaacEvents, SimulationManager - -import isaaclab.sim as sim_utils -from isaaclab.sim.utils.stage import get_current_stage - -if TYPE_CHECKING: - from .sensor_base_cfg import SensorBaseCfg - - -class SensorBase(ABC): - """The base class for implementing a sensor. - - The implementation is based on lazy evaluation. The sensor data is only updated when the user - tries accessing the data through the :attr:`data` property or sets ``force_compute=True`` in - the :meth:`update` method. This is done to avoid unnecessary computation when the sensor data - is not used. - - The sensor is updated at the specified update period. If the update period is zero, then the - sensor is updated at every simulation step. - """ - - def __init__(self, cfg: SensorBaseCfg): - """Initialize the sensor class. - - Args: - cfg: The configuration parameters for the sensor. - """ - # check that config is valid - if cfg.history_length < 0: - raise ValueError(f"History length must be greater than 0! Received: {cfg.history_length}") - # check that the config is valid - cfg.validate() - # store inputs - self.cfg = cfg.copy() - # flag for whether the sensor is initialized - self._is_initialized = False - # flag for whether the sensor is in visualization mode - self._is_visualizing = False - # get stage handle - self.stage = get_current_stage() - - # register various callback functions - self._register_callbacks() - - # add handle for debug visualization (this is set to a valid handle inside set_debug_vis) - self._debug_vis_handle = None - # set initial state of debug visualization - self.set_debug_vis(self.cfg.debug_vis) - - def __del__(self): - """Unsubscribe from the callbacks.""" - # clear physics events handles - self._clear_callbacks() - - """ - Properties - """ - - @property - def is_initialized(self) -> bool: - """Whether the sensor is initialized. - - Returns True if the sensor is initialized, False otherwise. - """ - return self._is_initialized - - @property - def num_instances(self) -> int: - """Number of instances of the sensor. - - This is equal to the number of sensors per environment multiplied by the number of environments. - """ - return self._num_envs - - @property - def device(self) -> str: - """Memory device for computation.""" - return self._device - - @property - @abstractmethod - def data(self) -> Any: - """Data from the sensor. - - This property is only updated when the user tries to access the data. This is done to avoid - unnecessary computation when the sensor data is not used. - - For updating the sensor when this property is accessed, you can use the following - code snippet in your sensor implementation: - - .. code-block:: python - - # update sensors if needed - self._update_outdated_buffers() - # return the data (where `_data` is the data for the sensor) - return self._data - """ - raise NotImplementedError - - @property - def has_debug_vis_implementation(self) -> bool: - """Whether the sensor has a debug visualization implemented.""" - # check if function raises NotImplementedError - source_code = inspect.getsource(self._set_debug_vis_impl) - return "NotImplementedError" not in source_code - - """ - Operations - """ - - def set_debug_vis(self, debug_vis: bool) -> bool: - """Sets whether to visualize the sensor data. - - Args: - debug_vis: Whether to visualize the sensor data. - - Returns: - Whether the debug visualization was successfully set. False if the sensor - does not support debug visualization. - """ - # check if debug visualization is supported - if not self.has_debug_vis_implementation: - return False - # toggle debug visualization objects - self._set_debug_vis_impl(debug_vis) - # toggle debug visualization flag - self._is_visualizing = debug_vis - # toggle debug visualization handles - if debug_vis: - # create a subscriber for the post update event if it doesn't exist - if self._debug_vis_handle is None: - app_interface = omni.kit.app.get_app_interface() - self._debug_vis_handle = app_interface.get_post_update_event_stream().create_subscription_to_pop( - lambda event, obj=weakref.proxy(self): obj._debug_vis_callback(event) - ) - else: - # remove the subscriber if it exists - if self._debug_vis_handle is not None: - self._debug_vis_handle.unsubscribe() - self._debug_vis_handle = None - # return success - return True - - def reset(self, env_ids: Sequence[int] | None = None): - """Resets the sensor internals. - - Args: - env_ids: The sensor ids to reset. Defaults to None. - """ - # Resolve sensor ids - if env_ids is None: - env_ids = slice(None) - # Reset the timestamp for the sensors - self._timestamp[env_ids] = 0.0 - self._timestamp_last_update[env_ids] = 0.0 - # Set all reset sensors to outdated so that they are updated when data is called the next time. - self._is_outdated[env_ids] = True - - def update(self, dt: float, force_recompute: bool = False): - # Update the timestamp for the sensors - self._timestamp += dt - self._is_outdated |= self._timestamp - self._timestamp_last_update + 1e-6 >= self.cfg.update_period - # Update the buffers - # TODO (from @mayank): Why is there a history length here when it doesn't mean anything in the sensor base?!? - # It is only for the contact sensor but there we should redefine the update function IMO. - if force_recompute or self._is_visualizing or (self.cfg.history_length > 0): - self._update_outdated_buffers() - - """ - Implementation specific. - """ - - @abstractmethod - def _initialize_impl(self): - """Initializes the sensor-related handles and internal buffers.""" - # Obtain Simulation Context - sim = sim_utils.SimulationContext.instance() - if sim is None: - raise RuntimeError("Simulation Context is not initialized!") - # Obtain device and backend - self._device = sim.device - self._backend = sim.backend - self._sim_physics_dt = sim.get_physics_dt() - # Count number of environments - env_prim_path_expr = self.cfg.prim_path.rsplit("/", 1)[0] - self._parent_prims = sim_utils.find_matching_prims(env_prim_path_expr) - self._num_envs = len(self._parent_prims) - # Boolean tensor indicating whether the sensor data has to be refreshed - self._is_outdated = torch.ones(self._num_envs, dtype=torch.bool, device=self._device) - # Current timestamp (in seconds) - self._timestamp = torch.zeros(self._num_envs, device=self._device) - # Timestamp from last update - self._timestamp_last_update = torch.zeros_like(self._timestamp) - - # Initialize debug visualization handle - if self._debug_vis_handle is None: - # set initial state of debug visualization - self.set_debug_vis(self.cfg.debug_vis) - - @abstractmethod - def _update_buffers_impl(self, env_ids: Sequence[int]): - """Fills the sensor data for provided environment ids. - - This function does not perform any time-based checks and directly fills the data into the - data container. - - Args: - env_ids: The indices of the sensors that are ready to capture. - """ - raise NotImplementedError - - def _set_debug_vis_impl(self, debug_vis: bool): - """Set debug visualization into visualization objects. - - This function is responsible for creating the visualization objects if they don't exist - and input ``debug_vis`` is True. If the visualization objects exist, the function should - set their visibility into the stage. - """ - raise NotImplementedError(f"Debug visualization is not implemented for {self.__class__.__name__}.") - - def _debug_vis_callback(self, event): - """Callback for debug visualization. - - This function calls the visualization objects and sets the data to visualize into them. - """ - raise NotImplementedError(f"Debug visualization is not implemented for {self.__class__.__name__}.") - - """ - Internal simulation callbacks. - """ - - def _register_callbacks(self): - """Registers the timeline and prim deletion callbacks.""" - - # register simulator callbacks (with weakref safety to avoid crashes on deletion) - def safe_callback(callback_name, event, obj_ref): - """Safely invoke a callback on a weakly-referenced object, ignoring ReferenceError if deleted.""" - try: - obj = obj_ref - getattr(obj, callback_name)(event) - except ReferenceError: - # Object has been deleted; ignore. - pass - - # note: use weakref on callbacks to ensure that this object can be deleted when its destructor is called. - # add callbacks for stage play/stop - obj_ref = weakref.proxy(self) - timeline_event_stream = omni.timeline.get_timeline_interface().get_timeline_event_stream() - - # the order is set to 10 which is arbitrary but should be lower priority than the default order of 0 - # register timeline PLAY event callback (lower priority with order=10) - self._initialize_handle = timeline_event_stream.create_subscription_to_pop_by_type( - int(omni.timeline.TimelineEventType.PLAY), - lambda event, obj_ref=obj_ref: safe_callback("_initialize_callback", event, obj_ref), - order=10, - ) - # register timeline STOP event callback (lower priority with order=10) - self._invalidate_initialize_handle = timeline_event_stream.create_subscription_to_pop_by_type( - int(omni.timeline.TimelineEventType.STOP), - lambda event, obj_ref=obj_ref: safe_callback("_invalidate_initialize_callback", event, obj_ref), - order=10, - ) - # register prim deletion callback - self._prim_deletion_callback_id = SimulationManager.register_callback( - lambda event, obj_ref=obj_ref: safe_callback("_on_prim_deletion", event, obj_ref), - event=IsaacEvents.PRIM_DELETION, - ) - - def _initialize_callback(self, event): - """Initializes the scene elements. - - Note: - PhysX handles are only enabled once the simulator starts playing. Hence, this function needs to be - called whenever the simulator "plays" from a "stop" state. - """ - if not self._is_initialized: - try: - self._initialize_impl() - except Exception as e: - if builtins.ISAACLAB_CALLBACK_EXCEPTION is None: - builtins.ISAACLAB_CALLBACK_EXCEPTION = e - self._is_initialized = True - - def _invalidate_initialize_callback(self, event): - """Invalidates the scene elements.""" - self._is_initialized = False - if self._debug_vis_handle is not None: - self._debug_vis_handle.unsubscribe() - self._debug_vis_handle = None - - def _on_prim_deletion(self, prim_path: str) -> None: - """Invalidates and deletes the callbacks when the prim is deleted. - - Args: - prim_path: The path to the prim that is being deleted. - - Note: - This function is called when the prim is deleted. - """ - if prim_path == "/": - self._clear_callbacks() - return - result = re.match( - pattern="^" + "/".join(self.cfg.prim_path.split("/")[: prim_path.count("/") + 1]) + "$", string=prim_path - ) - if result: - self._clear_callbacks() - - def _clear_callbacks(self) -> None: - """Clears the callbacks.""" - if self._prim_deletion_callback_id: - SimulationManager.deregister_callback(self._prim_deletion_callback_id) - self._prim_deletion_callback_id = None - if self._initialize_handle: - self._initialize_handle.unsubscribe() - self._initialize_handle = None - if self._invalidate_initialize_handle: - self._invalidate_initialize_handle.unsubscribe() - self._invalidate_initialize_handle = None - # clear debug visualization - if self._debug_vis_handle: - self._debug_vis_handle.unsubscribe() - self._debug_vis_handle = None - - """ - Helper functions. - """ - - def _update_outdated_buffers(self): - """Fills the sensor data for the outdated sensors.""" - outdated_env_ids = self._is_outdated.nonzero().squeeze(-1) - if len(outdated_env_ids) > 0: - # obtain new data - self._update_buffers_impl(outdated_env_ids) - # update the timestamp from last update - self._timestamp_last_update[outdated_env_ids] = self._timestamp[outdated_env_ids] - # set outdated flag to false for the updated sensors - self._is_outdated[outdated_env_ids] = False diff --git a/source/isaaclab_physx/isaaclab_physx/sensors/sensor_base_cfg.py b/source/isaaclab_physx/isaaclab_physx/sensors/sensor_base_cfg.py deleted file mode 100644 index 85875b2e499..00000000000 --- a/source/isaaclab_physx/isaaclab_physx/sensors/sensor_base_cfg.py +++ /dev/null @@ -1,42 +0,0 @@ -# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). -# All rights reserved. -# -# SPDX-License-Identifier: BSD-3-Clause - -from dataclasses import MISSING - -from isaaclab.utils import configclass - -from .sensor_base import SensorBase - - -@configclass -class SensorBaseCfg: - """Configuration parameters for a sensor.""" - - class_type: type[SensorBase] = MISSING - """The associated sensor class. - - The class should inherit from :class:`isaaclab.sensors.sensor_base.SensorBase`. - """ - - prim_path: str = MISSING - """Prim path (or expression) to the sensor. - - .. note:: - The expression can contain the environment namespace regex ``{ENV_REGEX_NS}`` which - will be replaced with the environment namespace. - - Example: ``{ENV_REGEX_NS}/Robot/sensor`` will be replaced with ``/World/envs/env_.*/Robot/sensor``. - - """ - - update_period: float = 0.0 - """Update period of the sensor buffers (in seconds). Defaults to 0.0 (update every step).""" - - history_length: int = 0 - """Number of past frames to store in the sensor buffers. Defaults to 0, which means that only - the current data is stored (no history).""" - - debug_vis: bool = False - """Whether to visualize the sensor. Defaults to False.""" diff --git a/source/isaaclab_physx/isaaclab_physx/sensors/tacsl_sensor/__init__.py b/source/isaaclab_physx/isaaclab_physx/sensors/tacsl_sensor/__init__.py deleted file mode 100644 index 869b233d166..00000000000 --- a/source/isaaclab_physx/isaaclab_physx/sensors/tacsl_sensor/__init__.py +++ /dev/null @@ -1,10 +0,0 @@ -# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). -# All rights reserved. -# -# SPDX-License-Identifier: BSD-3-Clause - -"""TacSL Tactile Sensor implementation for IsaacLab.""" - -from .visuotactile_sensor import VisuoTactileSensor -from .visuotactile_sensor_cfg import GelSightRenderCfg, VisuoTactileSensorCfg -from .visuotactile_sensor_data import VisuoTactileSensorData From 6050eef61bd6b2bfc9d67d92b1a702721c7ff605 Mon Sep 17 00:00:00 2001 From: Antoine Richard Date: Tue, 27 Jan 2026 16:29:19 +0100 Subject: [PATCH 03/38] removing old files. --- .../articulation/articulation_data_old.py | 1165 --------- .../assets/articulation/articulation_old.py | 2143 ----------------- .../rigid_object/rigid_object_data_old.py | 657 ----- .../assets/rigid_object/rigid_object_old.py | 582 ----- .../rigid_object_collection_data_old.py | 515 ---- .../rigid_object_collection_old.py | 783 ------ 6 files changed, 5845 deletions(-) delete mode 100644 source/isaaclab_physx/isaaclab_physx/assets/articulation/articulation_data_old.py delete mode 100644 source/isaaclab_physx/isaaclab_physx/assets/articulation/articulation_old.py delete mode 100644 source/isaaclab_physx/isaaclab_physx/assets/rigid_object/rigid_object_data_old.py delete mode 100644 source/isaaclab_physx/isaaclab_physx/assets/rigid_object/rigid_object_old.py delete mode 100644 source/isaaclab_physx/isaaclab_physx/assets/rigid_object_collection/rigid_object_collection_data_old.py delete mode 100644 source/isaaclab_physx/isaaclab_physx/assets/rigid_object_collection/rigid_object_collection_old.py diff --git a/source/isaaclab_physx/isaaclab_physx/assets/articulation/articulation_data_old.py b/source/isaaclab_physx/isaaclab_physx/assets/articulation/articulation_data_old.py deleted file mode 100644 index 47902edc0a5..00000000000 --- a/source/isaaclab_physx/isaaclab_physx/assets/articulation/articulation_data_old.py +++ /dev/null @@ -1,1165 +0,0 @@ -# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). -# All rights reserved. -# -# SPDX-License-Identifier: BSD-3-Clause - -import logging -import weakref - -import torch - -import omni.physics.tensors.impl.api as physx -from isaacsim.core.simulation_manager import SimulationManager - -import isaaclab.utils.math as math_utils -from isaaclab.utils.buffers import TimestampedBuffer - -# import logger -logger = logging.getLogger(__name__) - - -class ArticulationData: - """Data container for an articulation. - - This class contains the data for an articulation in the simulation. The data includes the state of - the root rigid body, the state of all the bodies in the articulation, and the joint state. The data is - stored in the simulation world frame unless otherwise specified. - - An articulation is comprised of multiple rigid bodies or links. For a rigid body, there are two frames - of reference that are used: - - - Actor frame: The frame of reference of the rigid body prim. This typically corresponds to the Xform prim - with the rigid body schema. - - Center of mass frame: The frame of reference of the center of mass of the rigid body. - - Depending on the settings, the two frames may not coincide with each other. In the robotics sense, the actor frame - can be interpreted as the link frame. - """ - - def __init__(self, root_physx_view: physx.ArticulationView, device: str): - """Initializes the articulation data. - - Args: - root_physx_view: The root articulation view. - device: The device used for processing. - """ - # Set the parameters - self.device = device - # Set the root articulation view - # note: this is stored as a weak reference to avoid circular references between the asset class - # and the data container. This is important to avoid memory leaks. - self._root_physx_view: physx.ArticulationView = weakref.proxy(root_physx_view) - - # Set initial time stamp - self._sim_timestamp = 0.0 - - # obtain global simulation view - self._physics_sim_view = SimulationManager.get_physics_sim_view() - gravity = self._physics_sim_view.get_gravity() - # Convert to direction vector - gravity_dir = torch.tensor((gravity[0], gravity[1], gravity[2]), device=self.device) - gravity_dir = math_utils.normalize(gravity_dir.unsqueeze(0)).squeeze(0) - - # Initialize constants - self.GRAVITY_VEC_W = gravity_dir.repeat(self._root_physx_view.count, 1) - self.FORWARD_VEC_B = torch.tensor((1.0, 0.0, 0.0), device=self.device).repeat(self._root_physx_view.count, 1) - - # Initialize history for finite differencing - self._previous_joint_vel = self._root_physx_view.get_dof_velocities().clone() - - # Initialize the lazy buffers. - # -- link frame w.r.t. world frame - self._root_link_pose_w = TimestampedBuffer() - self._root_link_vel_w = TimestampedBuffer() - self._body_link_pose_w = TimestampedBuffer() - self._body_link_vel_w = TimestampedBuffer() - # -- com frame w.r.t. link frame - self._body_com_pose_b = TimestampedBuffer() - # -- com frame w.r.t. world frame - self._root_com_pose_w = TimestampedBuffer() - self._root_com_vel_w = TimestampedBuffer() - self._body_com_pose_w = TimestampedBuffer() - self._body_com_vel_w = TimestampedBuffer() - self._body_com_acc_w = TimestampedBuffer() - # -- combined state (these are cached as they concatenate) - self._root_state_w = TimestampedBuffer() - self._root_link_state_w = TimestampedBuffer() - self._root_com_state_w = TimestampedBuffer() - self._body_state_w = TimestampedBuffer() - self._body_link_state_w = TimestampedBuffer() - self._body_com_state_w = TimestampedBuffer() - # -- joint state - self._joint_pos = TimestampedBuffer() - self._joint_vel = TimestampedBuffer() - self._joint_acc = TimestampedBuffer() - self._body_incoming_joint_wrench_b = TimestampedBuffer() - - def update(self, dt: float): - # update the simulation timestamp - self._sim_timestamp += dt - # Trigger an update of the joint acceleration buffer at a higher frequency - # since we do finite differencing. - self.joint_acc - - ## - # Names. - ## - - body_names: list[str] = None - """Body names in the order parsed by the simulation view.""" - - joint_names: list[str] = None - """Joint names in the order parsed by the simulation view.""" - - fixed_tendon_names: list[str] = None - """Fixed tendon names in the order parsed by the simulation view.""" - - spatial_tendon_names: list[str] = None - """Spatial tendon names in the order parsed by the simulation view.""" - - ## - # Defaults - Initial state. - ## - - default_root_state: torch.Tensor = None - """Default root state ``[pos, quat, lin_vel, ang_vel]`` in the local environment frame. - Shape is (num_instances, 13). - - The position and quaternion are of the articulation root's actor frame. Meanwhile, the linear and angular - velocities are of its center of mass frame. - - This quantity is configured through the :attr:`isaaclab.assets.ArticulationCfg.init_state` parameter. - """ - - default_joint_pos: torch.Tensor = None - """Default joint positions of all joints. Shape is (num_instances, num_joints). - - This quantity is configured through the :attr:`isaaclab.assets.ArticulationCfg.init_state` parameter. - """ - - default_joint_vel: torch.Tensor = None - """Default joint velocities of all joints. Shape is (num_instances, num_joints). - - This quantity is configured through the :attr:`isaaclab.assets.ArticulationCfg.init_state` parameter. - """ - - ## - # Defaults - Physical properties. - ## - - default_mass: torch.Tensor = None - """Default mass for all the bodies in the articulation. Shape is (num_instances, num_bodies). - - This quantity is parsed from the USD schema at the time of initialization. - """ - - default_inertia: torch.Tensor = None - """Default inertia for all the bodies in the articulation. Shape is (num_instances, num_bodies, 9). - - The inertia tensor should be given with respect to the center of mass, expressed in the articulation links' - actor frame. The values are stored in the order - :math:`[I_{xx}, I_{yx}, I_{zx}, I_{xy}, I_{yy}, I_{zy}, I_{xz}, I_{yz}, I_{zz}]`. However, due to the - symmetry of inertia tensors, row- and column-major orders are equivalent. - - This quantity is parsed from the USD schema at the time of initialization. - """ - - default_joint_stiffness: torch.Tensor = None - """Default joint stiffness of all joints. Shape is (num_instances, num_joints). - - This quantity is configured through the actuator model's :attr:`isaaclab.actuators.ActuatorBaseCfg.stiffness` - parameter. If the parameter's value is None, the value parsed from the USD schema, at the time of initialization, - is used. - - .. attention:: - The default stiffness is the value configured by the user or the value parsed from the USD schema. - It should not be confused with :attr:`joint_stiffness`, which is the value set into the simulation. - """ - - default_joint_damping: torch.Tensor = None - """Default joint damping of all joints. Shape is (num_instances, num_joints). - - This quantity is configured through the actuator model's :attr:`isaaclab.actuators.ActuatorBaseCfg.damping` - parameter. If the parameter's value is None, the value parsed from the USD schema, at the time of initialization, - is used. - - .. attention:: - The default stiffness is the value configured by the user or the value parsed from the USD schema. - It should not be confused with :attr:`joint_damping`, which is the value set into the simulation. - """ - - default_joint_armature: torch.Tensor = None - """Default joint armature of all joints. Shape is (num_instances, num_joints). - - This quantity is configured through the actuator model's :attr:`isaaclab.actuators.ActuatorBaseCfg.armature` - parameter. If the parameter's value is None, the value parsed from the USD schema, at the time of initialization, - is used. - """ - - default_joint_friction_coeff: torch.Tensor = None - """Default joint static friction coefficient of all joints. Shape is (num_instances, num_joints). - - This quantity is configured through the actuator model's :attr:`isaaclab.actuators.ActuatorBaseCfg.friction` - parameter. If the parameter's value is None, the value parsed from the USD schema, at the time of initialization, - is used. - - Note: - In Isaac Sim 4.5, this parameter is modeled as a coefficient. In Isaac Sim 5.0 and later, - it is modeled as an effort (torque or force). - """ - - default_joint_dynamic_friction_coeff: torch.Tensor = None - """Default joint dynamic friction coefficient of all joints. Shape is (num_instances, num_joints). - - This quantity is configured through the actuator model's - :attr:`isaaclab.actuators.ActuatorBaseCfg.dynamic_friction` parameter. If the parameter's value is None, - the value parsed from the USD schema, at the time of initialization, is used. - - Note: - In Isaac Sim 4.5, this parameter is modeled as a coefficient. In Isaac Sim 5.0 and later, - it is modeled as an effort (torque or force). - """ - - default_joint_viscous_friction_coeff: torch.Tensor = None - """Default joint viscous friction coefficient of all joints. Shape is (num_instances, num_joints). - - This quantity is configured through the actuator model's - :attr:`isaaclab.actuators.ActuatorBaseCfg.viscous_friction` parameter. If the parameter's value is None, - the value parsed from the USD schema, at the time of initialization, is used. - """ - - default_joint_pos_limits: torch.Tensor = None - """Default joint position limits of all joints. Shape is (num_instances, num_joints, 2). - - The limits are in the order :math:`[lower, upper]`. They are parsed from the USD schema at the - time of initialization. - """ - - default_fixed_tendon_stiffness: torch.Tensor = None - """Default tendon stiffness of all fixed tendons. Shape is (num_instances, num_fixed_tendons). - - This quantity is parsed from the USD schema at the time of initialization. - """ - - default_fixed_tendon_damping: torch.Tensor = None - """Default tendon damping of all fixed tendons. Shape is (num_instances, num_fixed_tendons). - - This quantity is parsed from the USD schema at the time of initialization. - """ - - default_fixed_tendon_limit_stiffness: torch.Tensor = None - """Default tendon limit stiffness of all fixed tendons. Shape is (num_instances, num_fixed_tendons). - - This quantity is parsed from the USD schema at the time of initialization. - """ - - default_fixed_tendon_rest_length: torch.Tensor = None - """Default tendon rest length of all fixed tendons. Shape is (num_instances, num_fixed_tendons). - - This quantity is parsed from the USD schema at the time of initialization. - """ - - default_fixed_tendon_offset: torch.Tensor = None - """Default tendon offset of all fixed tendons. Shape is (num_instances, num_fixed_tendons). - - This quantity is parsed from the USD schema at the time of initialization. - """ - - default_fixed_tendon_pos_limits: torch.Tensor = None - """Default tendon position limits of all fixed tendons. Shape is (num_instances, num_fixed_tendons, 2). - - The position limits are in the order :math:`[lower, upper]`. They are parsed from the USD schema at the time of - initialization. - """ - - default_spatial_tendon_stiffness: torch.Tensor = None - """Default tendon stiffness of all spatial tendons. Shape is (num_instances, num_spatial_tendons). - - This quantity is parsed from the USD schema at the time of initialization. - """ - - default_spatial_tendon_damping: torch.Tensor = None - """Default tendon damping of all spatial tendons. Shape is (num_instances, num_spatial_tendons). - - This quantity is parsed from the USD schema at the time of initialization. - """ - - default_spatial_tendon_limit_stiffness: torch.Tensor = None - """Default tendon limit stiffness of all spatial tendons. Shape is (num_instances, num_spatial_tendons). - - This quantity is parsed from the USD schema at the time of initialization. - """ - - default_spatial_tendon_offset: torch.Tensor = None - """Default tendon offset of all spatial tendons. Shape is (num_instances, num_spatial_tendons). - - This quantity is parsed from the USD schema at the time of initialization. - """ - - ## - # Joint commands -- Set into simulation. - ## - - joint_pos_target: torch.Tensor = None - """Joint position targets commanded by the user. Shape is (num_instances, num_joints). - - For an implicit actuator model, the targets are directly set into the simulation. - For an explicit actuator model, the targets are used to compute the joint torques (see :attr:`applied_torque`), - which are then set into the simulation. - """ - - joint_vel_target: torch.Tensor = None - """Joint velocity targets commanded by the user. Shape is (num_instances, num_joints). - - For an implicit actuator model, the targets are directly set into the simulation. - For an explicit actuator model, the targets are used to compute the joint torques (see :attr:`applied_torque`), - which are then set into the simulation. - """ - - joint_effort_target: torch.Tensor = None - """Joint effort targets commanded by the user. Shape is (num_instances, num_joints). - - For an implicit actuator model, the targets are directly set into the simulation. - For an explicit actuator model, the targets are used to compute the joint torques (see :attr:`applied_torque`), - which are then set into the simulation. - """ - - ## - # Joint commands -- Explicit actuators. - ## - - computed_torque: torch.Tensor = None - """Joint torques computed from the actuator model (before clipping). Shape is (num_instances, num_joints). - - This quantity is the raw torque output from the actuator mode, before any clipping is applied. - It is exposed for users who want to inspect the computations inside the actuator model. - For instance, to penalize the learning agent for a difference between the computed and applied torques. - """ - - applied_torque: torch.Tensor = None - """Joint torques applied from the actuator model (after clipping). Shape is (num_instances, num_joints). - - These torques are set into the simulation, after clipping the :attr:`computed_torque` based on the - actuator model. - """ - - ## - # Joint properties. - ## - - joint_stiffness: torch.Tensor = None - """Joint stiffness provided to the simulation. Shape is (num_instances, num_joints). - - In the case of explicit actuators, the value for the corresponding joints is zero. - """ - - joint_damping: torch.Tensor = None - """Joint damping provided to the simulation. Shape is (num_instances, num_joints) - - In the case of explicit actuators, the value for the corresponding joints is zero. - """ - - joint_armature: torch.Tensor = None - """Joint armature provided to the simulation. Shape is (num_instances, num_joints).""" - - joint_friction_coeff: torch.Tensor = None - """Joint static friction coefficient provided to the simulation. Shape is (num_instances, num_joints). - - Note: In Isaac Sim 4.5, this parameter is modeled as a coefficient. In Isaac Sim 5.0 and later, - it is modeled as an effort (torque or force). - """ - - joint_dynamic_friction_coeff: torch.Tensor = None - """Joint dynamic friction coefficient provided to the simulation. Shape is (num_instances, num_joints). - - Note: In Isaac Sim 4.5, this parameter is modeled as a coefficient. In Isaac Sim 5.0 and later, - it is modeled as an effort (torque or force). - """ - - joint_viscous_friction_coeff: torch.Tensor = None - """Joint viscous friction coefficient provided to the simulation. Shape is (num_instances, num_joints).""" - - joint_pos_limits: torch.Tensor = None - """Joint position limits provided to the simulation. Shape is (num_instances, num_joints, 2). - - The limits are in the order :math:`[lower, upper]`. - """ - - joint_vel_limits: torch.Tensor = None - """Joint maximum velocity provided to the simulation. Shape is (num_instances, num_joints).""" - - joint_effort_limits: torch.Tensor = None - """Joint maximum effort provided to the simulation. Shape is (num_instances, num_joints).""" - - ## - # Joint properties - Custom. - ## - - soft_joint_pos_limits: torch.Tensor = None - r"""Soft joint positions limits for all joints. Shape is (num_instances, num_joints, 2). - - The limits are in the order :math:`[lower, upper]`.The soft joint position limits are computed as - a sub-region of the :attr:`joint_pos_limits` based on the - :attr:`~isaaclab.assets.ArticulationCfg.soft_joint_pos_limit_factor` parameter. - - Consider the joint position limits :math:`[lower, upper]` and the soft joint position limits - :math:`[soft_lower, soft_upper]`. The soft joint position limits are computed as: - - .. math:: - - soft\_lower = (lower + upper) / 2 - factor * (upper - lower) / 2 - soft\_upper = (lower + upper) / 2 + factor * (upper - lower) / 2 - - The soft joint position limits help specify a safety region around the joint limits. It isn't used by the - simulation, but is useful for learning agents to prevent the joint positions from violating the limits. - """ - - soft_joint_vel_limits: torch.Tensor = None - """Soft joint velocity limits for all joints. Shape is (num_instances, num_joints). - - These are obtained from the actuator model. It may differ from :attr:`joint_vel_limits` if the actuator model - has a variable velocity limit model. For instance, in a variable gear ratio actuator model. - """ - - gear_ratio: torch.Tensor = None - """Gear ratio for relating motor torques to applied Joint torques. Shape is (num_instances, num_joints).""" - - ## - # Fixed tendon properties. - ## - - fixed_tendon_stiffness: torch.Tensor = None - """Fixed tendon stiffness provided to the simulation. Shape is (num_instances, num_fixed_tendons).""" - - fixed_tendon_damping: torch.Tensor = None - """Fixed tendon damping provided to the simulation. Shape is (num_instances, num_fixed_tendons).""" - - fixed_tendon_limit_stiffness: torch.Tensor = None - """Fixed tendon limit stiffness provided to the simulation. Shape is (num_instances, num_fixed_tendons).""" - - fixed_tendon_rest_length: torch.Tensor = None - """Fixed tendon rest length provided to the simulation. Shape is (num_instances, num_fixed_tendons).""" - - fixed_tendon_offset: torch.Tensor = None - """Fixed tendon offset provided to the simulation. Shape is (num_instances, num_fixed_tendons).""" - - fixed_tendon_pos_limits: torch.Tensor = None - """Fixed tendon position limits provided to the simulation. Shape is (num_instances, num_fixed_tendons, 2).""" - - ## - # Spatial tendon properties. - ## - - spatial_tendon_stiffness: torch.Tensor = None - """Spatial tendon stiffness provided to the simulation. Shape is (num_instances, num_spatial_tendons).""" - - spatial_tendon_damping: torch.Tensor = None - """Spatial tendon damping provided to the simulation. Shape is (num_instances, num_spatial_tendons).""" - - spatial_tendon_limit_stiffness: torch.Tensor = None - """Spatial tendon limit stiffness provided to the simulation. Shape is (num_instances, num_spatial_tendons).""" - - spatial_tendon_offset: torch.Tensor = None - """Spatial tendon offset provided to the simulation. Shape is (num_instances, num_spatial_tendons).""" - - ## - # Root state properties. - ## - - @property - def root_link_pose_w(self) -> torch.Tensor: - """Root link pose ``[pos, quat]`` in simulation world frame. Shape is (num_instances, 7). - - This quantity is the pose of the articulation root's actor frame relative to the world. - The orientation is provided in (w, x, y, z) format. - """ - if self._root_link_pose_w.timestamp < self._sim_timestamp: - # read data from simulation - pose = self._root_physx_view.get_root_transforms().clone() - pose[:, 3:7] = math_utils.convert_quat(pose[:, 3:7], to="wxyz") - # set the buffer data and timestamp - self._root_link_pose_w.data = pose - self._root_link_pose_w.timestamp = self._sim_timestamp - - return self._root_link_pose_w.data - - @property - def root_link_vel_w(self) -> torch.Tensor: - """Root link velocity ``[lin_vel, ang_vel]`` in simulation world frame. Shape is (num_instances, 6). - - This quantity contains the linear and angular velocities of the articulation root's actor frame - relative to the world. - """ - if self._root_link_vel_w.timestamp < self._sim_timestamp: - # read the CoM velocity - vel = self.root_com_vel_w.clone() - # adjust linear velocity to link from center of mass - vel[:, :3] += torch.linalg.cross( - vel[:, 3:], math_utils.quat_apply(self.root_link_quat_w, -self.body_com_pos_b[:, 0]), dim=-1 - ) - # set the buffer data and timestamp - self._root_link_vel_w.data = vel - self._root_link_vel_w.timestamp = self._sim_timestamp - - return self._root_link_vel_w.data - - @property - def root_com_pose_w(self) -> torch.Tensor: - """Root center of mass pose ``[pos, quat]`` in simulation world frame. Shape is (num_instances, 7). - - This quantity is the pose of the articulation root's center of mass frame relative to the world. - The orientation is provided in (w, x, y, z) format. - """ - if self._root_com_pose_w.timestamp < self._sim_timestamp: - # apply local transform to center of mass frame - pos, quat = math_utils.combine_frame_transforms( - self.root_link_pos_w, self.root_link_quat_w, self.body_com_pos_b[:, 0], self.body_com_quat_b[:, 0] - ) - # set the buffer data and timestamp - self._root_com_pose_w.data = torch.cat((pos, quat), dim=-1) - self._root_com_pose_w.timestamp = self._sim_timestamp - - return self._root_com_pose_w.data - - @property - def root_com_vel_w(self) -> torch.Tensor: - """Root center of mass velocity ``[lin_vel, ang_vel]`` in simulation world frame. Shape is (num_instances, 6). - - This quantity contains the linear and angular velocities of the articulation root's center of mass frame - relative to the world. - """ - if self._root_com_vel_w.timestamp < self._sim_timestamp: - self._root_com_vel_w.data = self._root_physx_view.get_root_velocities() - self._root_com_vel_w.timestamp = self._sim_timestamp - - return self._root_com_vel_w.data - - @property - def root_state_w(self): - """Root state ``[pos, quat, lin_vel, ang_vel]`` in simulation world frame. Shape is (num_instances, 13). - - The position and quaternion are of the articulation root's actor frame relative to the world. Meanwhile, - the linear and angular velocities are of the articulation root's center of mass frame. - """ - if self._root_state_w.timestamp < self._sim_timestamp: - self._root_state_w.data = torch.cat((self.root_link_pose_w, self.root_com_vel_w), dim=-1) - self._root_state_w.timestamp = self._sim_timestamp - - return self._root_state_w.data - - @property - def root_link_state_w(self): - """Root state ``[pos, quat, lin_vel, ang_vel]`` in simulation world frame. Shape is (num_instances, 13). - - The position, quaternion, and linear/angular velocity are of the articulation root's actor frame relative to the - world. - """ - if self._root_link_state_w.timestamp < self._sim_timestamp: - self._root_link_state_w.data = torch.cat((self.root_link_pose_w, self.root_link_vel_w), dim=-1) - self._root_link_state_w.timestamp = self._sim_timestamp - - return self._root_link_state_w.data - - @property - def root_com_state_w(self): - """Root center of mass state ``[pos, quat, lin_vel, ang_vel]`` in simulation world frame. - Shape is (num_instances, 13). - - The position, quaternion, and linear/angular velocity are of the articulation root link's center of mass frame - relative to the world. Center of mass frame is assumed to be the same orientation as the link rather than the - orientation of the principle inertia. - """ - if self._root_com_state_w.timestamp < self._sim_timestamp: - self._root_com_state_w.data = torch.cat((self.root_com_pose_w, self.root_com_vel_w), dim=-1) - self._root_com_state_w.timestamp = self._sim_timestamp - - return self._root_com_state_w.data - - ## - # Body state properties. - ## - - @property - def body_link_pose_w(self) -> torch.Tensor: - """Body link pose ``[pos, quat]`` in simulation world frame. - Shape is (num_instances, num_bodies, 7). - - This quantity is the pose of the articulation links' actor frame relative to the world. - The orientation is provided in (w, x, y, z) format. - """ - if self._body_link_pose_w.timestamp < self._sim_timestamp: - # perform forward kinematics (shouldn't cause overhead if it happened already) - self._physics_sim_view.update_articulations_kinematic() - # read data from simulation - poses = self._root_physx_view.get_link_transforms().clone() - poses[..., 3:7] = math_utils.convert_quat(poses[..., 3:7], to="wxyz") - # set the buffer data and timestamp - self._body_link_pose_w.data = poses - self._body_link_pose_w.timestamp = self._sim_timestamp - - return self._body_link_pose_w.data - - @property - def body_link_vel_w(self) -> torch.Tensor: - """Body link velocity ``[lin_vel, ang_vel]`` in simulation world frame. - Shape is (num_instances, num_bodies, 6). - - This quantity contains the linear and angular velocities of the articulation links' actor frame - relative to the world. - """ - if self._body_link_vel_w.timestamp < self._sim_timestamp: - # read data from simulation - velocities = self.body_com_vel_w.clone() - # adjust linear velocity to link from center of mass - velocities[..., :3] += torch.linalg.cross( - velocities[..., 3:], math_utils.quat_apply(self.body_link_quat_w, -self.body_com_pos_b), dim=-1 - ) - # set the buffer data and timestamp - self._body_link_vel_w.data = velocities - self._body_link_vel_w.timestamp = self._sim_timestamp - - return self._body_link_vel_w.data - - @property - def body_com_pose_w(self) -> torch.Tensor: - """Body center of mass pose ``[pos, quat]`` in simulation world frame. - Shape is (num_instances, num_bodies, 7). - - This quantity is the pose of the center of mass frame of the articulation links relative to the world. - The orientation is provided in (w, x, y, z) format. - """ - if self._body_com_pose_w.timestamp < self._sim_timestamp: - # apply local transform to center of mass frame - pos, quat = math_utils.combine_frame_transforms( - self.body_link_pos_w, self.body_link_quat_w, self.body_com_pos_b, self.body_com_quat_b - ) - # set the buffer data and timestamp - self._body_com_pose_w.data = torch.cat((pos, quat), dim=-1) - self._body_com_pose_w.timestamp = self._sim_timestamp - - return self._body_com_pose_w.data - - @property - def body_com_vel_w(self) -> torch.Tensor: - """Body center of mass velocity ``[lin_vel, ang_vel]`` in simulation world frame. - Shape is (num_instances, num_bodies, 6). - - This quantity contains the linear and angular velocities of the articulation links' center of mass frame - relative to the world. - """ - if self._body_com_vel_w.timestamp < self._sim_timestamp: - self._body_com_vel_w.data = self._root_physx_view.get_link_velocities() - self._body_com_vel_w.timestamp = self._sim_timestamp - - return self._body_com_vel_w.data - - @property - def body_state_w(self): - """State of all bodies `[pos, quat, lin_vel, ang_vel]` in simulation world frame. - Shape is (num_instances, num_bodies, 13). - - The position and quaternion are of all the articulation links' actor frame. Meanwhile, the linear and angular - velocities are of the articulation links's center of mass frame. - """ - if self._body_state_w.timestamp < self._sim_timestamp: - self._body_state_w.data = torch.cat((self.body_link_pose_w, self.body_com_vel_w), dim=-1) - self._body_state_w.timestamp = self._sim_timestamp - - return self._body_state_w.data - - @property - def body_link_state_w(self): - """State of all bodies' link frame`[pos, quat, lin_vel, ang_vel]` in simulation world frame. - Shape is (num_instances, num_bodies, 13). - - The position, quaternion, and linear/angular velocity are of the body's link frame relative to the world. - """ - if self._body_link_state_w.timestamp < self._sim_timestamp: - self._body_link_state_w.data = torch.cat((self.body_link_pose_w, self.body_link_vel_w), dim=-1) - self._body_link_state_w.timestamp = self._sim_timestamp - - return self._body_link_state_w.data - - @property - def body_com_state_w(self): - """State of all bodies center of mass `[pos, quat, lin_vel, ang_vel]` in simulation world frame. - Shape is (num_instances, num_bodies, 13). - - The position, quaternion, and linear/angular velocity are of the body's center of mass frame relative to the - world. Center of mass frame is assumed to be the same orientation as the link rather than the orientation of the - principle inertia. - """ - if self._body_com_state_w.timestamp < self._sim_timestamp: - self._body_com_state_w.data = torch.cat((self.body_com_pose_w, self.body_com_vel_w), dim=-1) - self._body_com_state_w.timestamp = self._sim_timestamp - - return self._body_com_state_w.data - - @property - def body_com_acc_w(self): - """Acceleration of all bodies center of mass ``[lin_acc, ang_acc]``. - Shape is (num_instances, num_bodies, 6). - - All values are relative to the world. - """ - if self._body_com_acc_w.timestamp < self._sim_timestamp: - # read data from simulation and set the buffer data and timestamp - self._body_com_acc_w.data = self._root_physx_view.get_link_accelerations() - self._body_com_acc_w.timestamp = self._sim_timestamp - - return self._body_com_acc_w.data - - @property - def body_com_pose_b(self) -> torch.Tensor: - """Center of mass pose ``[pos, quat]`` of all bodies in their respective body's link frames. - Shape is (num_instances, 1, 7). - - This quantity is the pose of the center of mass frame of the rigid body relative to the body's link frame. - The orientation is provided in (w, x, y, z) format. - """ - if self._body_com_pose_b.timestamp < self._sim_timestamp: - # read data from simulation - pose = self._root_physx_view.get_coms().to(self.device) - pose[..., 3:7] = math_utils.convert_quat(pose[..., 3:7], to="wxyz") - # set the buffer data and timestamp - self._body_com_pose_b.data = pose - self._body_com_pose_b.timestamp = self._sim_timestamp - - return self._body_com_pose_b.data - - @property - def body_incoming_joint_wrench_b(self) -> torch.Tensor: - """Joint reaction wrench applied from body parent to child body in parent body frame. - - Shape is (num_instances, num_bodies, 6). All body reaction wrenches are provided including the root body to the - world of an articulation. - - For more information on joint wrenches, please check the`PhysX documentation`_ and the underlying - `PhysX Tensor API`_. - - .. _`PhysX documentation`: https://nvidia-omniverse.github.io/PhysX/physx/5.5.1/docs/Articulations.html#link-incoming-joint-force - .. _`PhysX Tensor API`: https://docs.omniverse.nvidia.com/kit/docs/omni_physics/latest/extensions/runtime/source/omni.physics.tensors/docs/api/python.html#omni.physics.tensors.impl.api.ArticulationView.get_link_incoming_joint_force - """ - - if self._body_incoming_joint_wrench_b.timestamp < self._sim_timestamp: - self._body_incoming_joint_wrench_b.data = self._root_physx_view.get_link_incoming_joint_force() - self._body_incoming_joint_wrench_b.time_stamp = self._sim_timestamp - return self._body_incoming_joint_wrench_b.data - - ## - # Joint state properties. - ## - - @property - def joint_pos(self): - """Joint positions of all joints. Shape is (num_instances, num_joints).""" - if self._joint_pos.timestamp < self._sim_timestamp: - # read data from simulation and set the buffer data and timestamp - self._joint_pos.data = self._root_physx_view.get_dof_positions() - self._joint_pos.timestamp = self._sim_timestamp - return self._joint_pos.data - - @property - def joint_vel(self): - """Joint velocities of all joints. Shape is (num_instances, num_joints).""" - if self._joint_vel.timestamp < self._sim_timestamp: - # read data from simulation and set the buffer data and timestamp - self._joint_vel.data = self._root_physx_view.get_dof_velocities() - self._joint_vel.timestamp = self._sim_timestamp - return self._joint_vel.data - - @property - def joint_acc(self): - """Joint acceleration of all joints. Shape is (num_instances, num_joints).""" - if self._joint_acc.timestamp < self._sim_timestamp: - # note: we use finite differencing to compute acceleration - time_elapsed = self._sim_timestamp - self._joint_acc.timestamp - self._joint_acc.data = (self.joint_vel - self._previous_joint_vel) / time_elapsed - self._joint_acc.timestamp = self._sim_timestamp - # update the previous joint velocity - self._previous_joint_vel[:] = self.joint_vel - return self._joint_acc.data - - ## - # Derived Properties. - ## - - @property - def projected_gravity_b(self): - """Projection of the gravity direction on base frame. Shape is (num_instances, 3).""" - return math_utils.quat_apply_inverse(self.root_link_quat_w, self.GRAVITY_VEC_W) - - @property - def heading_w(self): - """Yaw heading of the base frame (in radians). Shape is (num_instances,). - - Note: - This quantity is computed by assuming that the forward-direction of the base - frame is along x-direction, i.e. :math:`(1, 0, 0)`. - """ - forward_w = math_utils.quat_apply(self.root_link_quat_w, self.FORWARD_VEC_B) - return torch.atan2(forward_w[:, 1], forward_w[:, 0]) - - @property - def root_link_lin_vel_b(self) -> torch.Tensor: - """Root link linear velocity in base frame. Shape is (num_instances, 3). - - This quantity is the linear velocity of the articulation root's actor frame with respect to the - its actor frame. - """ - return math_utils.quat_apply_inverse(self.root_link_quat_w, self.root_link_lin_vel_w) - - @property - def root_link_ang_vel_b(self) -> torch.Tensor: - """Root link angular velocity in base world frame. Shape is (num_instances, 3). - - This quantity is the angular velocity of the articulation root's actor frame with respect to the - its actor frame. - """ - return math_utils.quat_apply_inverse(self.root_link_quat_w, self.root_link_ang_vel_w) - - @property - def root_com_lin_vel_b(self) -> torch.Tensor: - """Root center of mass linear velocity in base frame. Shape is (num_instances, 3). - - This quantity is the linear velocity of the articulation root's center of mass frame with respect to the - its actor frame. - """ - return math_utils.quat_apply_inverse(self.root_link_quat_w, self.root_com_lin_vel_w) - - @property - def root_com_ang_vel_b(self) -> torch.Tensor: - """Root center of mass angular velocity in base world frame. Shape is (num_instances, 3). - - This quantity is the angular velocity of the articulation root's center of mass frame with respect to the - its actor frame. - """ - return math_utils.quat_apply_inverse(self.root_link_quat_w, self.root_com_ang_vel_w) - - ## - # Sliced properties. - ## - - @property - def root_link_pos_w(self) -> torch.Tensor: - """Root link position in simulation world frame. Shape is (num_instances, 3). - - This quantity is the position of the actor frame of the root rigid body relative to the world. - """ - return self.root_link_pose_w[:, :3] - - @property - def root_link_quat_w(self) -> torch.Tensor: - """Root link orientation (w, x, y, z) in simulation world frame. Shape is (num_instances, 4). - - This quantity is the orientation of the actor frame of the root rigid body. - """ - return self.root_link_pose_w[:, 3:7] - - @property - def root_link_lin_vel_w(self) -> torch.Tensor: - """Root linear velocity in simulation world frame. Shape is (num_instances, 3). - - This quantity is the linear velocity of the root rigid body's actor frame relative to the world. - """ - return self.root_link_vel_w[:, :3] - - @property - def root_link_ang_vel_w(self) -> torch.Tensor: - """Root link angular velocity in simulation world frame. Shape is (num_instances, 3). - - This quantity is the angular velocity of the actor frame of the root rigid body relative to the world. - """ - return self.root_link_vel_w[:, 3:6] - - @property - def root_com_pos_w(self) -> torch.Tensor: - """Root center of mass position in simulation world frame. Shape is (num_instances, 3). - - This quantity is the position of the actor frame of the root rigid body relative to the world. - """ - return self.root_com_pose_w[:, :3] - - @property - def root_com_quat_w(self) -> torch.Tensor: - """Root center of mass orientation (w, x, y, z) in simulation world frame. Shape is (num_instances, 4). - - This quantity is the orientation of the actor frame of the root rigid body relative to the world. - """ - return self.root_com_pose_w[:, 3:7] - - @property - def root_com_lin_vel_w(self) -> torch.Tensor: - """Root center of mass linear velocity in simulation world frame. Shape is (num_instances, 3). - - This quantity is the linear velocity of the root rigid body's center of mass frame relative to the world. - """ - return self.root_com_vel_w[:, :3] - - @property - def root_com_ang_vel_w(self) -> torch.Tensor: - """Root center of mass angular velocity in simulation world frame. Shape is (num_instances, 3). - - This quantity is the angular velocity of the root rigid body's center of mass frame relative to the world. - """ - return self.root_com_vel_w[:, 3:6] - - @property - def body_link_pos_w(self) -> torch.Tensor: - """Positions of all bodies in simulation world frame. Shape is (num_instances, num_bodies, 3). - - This quantity is the position of the articulation bodies' actor frame relative to the world. - """ - return self.body_link_pose_w[..., :3] - - @property - def body_link_quat_w(self) -> torch.Tensor: - """Orientation (w, x, y, z) of all bodies in simulation world frame. Shape is (num_instances, num_bodies, 4). - - This quantity is the orientation of the articulation bodies' actor frame relative to the world. - """ - return self.body_link_pose_w[..., 3:7] - - @property - def body_link_lin_vel_w(self) -> torch.Tensor: - """Linear velocity of all bodies in simulation world frame. Shape is (num_instances, num_bodies, 3). - - This quantity is the linear velocity of the articulation bodies' center of mass frame relative to the world. - """ - return self.body_link_vel_w[..., :3] - - @property - def body_link_ang_vel_w(self) -> torch.Tensor: - """Angular velocity of all bodies in simulation world frame. Shape is (num_instances, num_bodies, 3). - - This quantity is the angular velocity of the articulation bodies' center of mass frame relative to the world. - """ - return self.body_link_vel_w[..., 3:6] - - @property - def body_com_pos_w(self) -> torch.Tensor: - """Positions of all bodies in simulation world frame. Shape is (num_instances, num_bodies, 3). - - This quantity is the position of the articulation bodies' actor frame. - """ - return self.body_com_pose_w[..., :3] - - @property - def body_com_quat_w(self) -> torch.Tensor: - """Orientation (w, x, y, z) of the principle axis of inertia of all bodies in simulation world frame. - Shape is (num_instances, num_bodies, 4). - - This quantity is the orientation of the articulation bodies' actor frame. - """ - return self.body_com_pose_w[..., 3:7] - - @property - def body_com_lin_vel_w(self) -> torch.Tensor: - """Linear velocity of all bodies in simulation world frame. Shape is (num_instances, num_bodies, 3). - - This quantity is the linear velocity of the articulation bodies' center of mass frame. - """ - return self.body_com_vel_w[..., :3] - - @property - def body_com_ang_vel_w(self) -> torch.Tensor: - """Angular velocity of all bodies in simulation world frame. Shape is (num_instances, num_bodies, 3). - - This quantity is the angular velocity of the articulation bodies' center of mass frame. - """ - return self.body_com_vel_w[..., 3:6] - - @property - def body_com_lin_acc_w(self) -> torch.Tensor: - """Linear acceleration of all bodies in simulation world frame. Shape is (num_instances, num_bodies, 3). - - This quantity is the linear acceleration of the articulation bodies' center of mass frame. - """ - return self.body_com_acc_w[..., :3] - - @property - def body_com_ang_acc_w(self) -> torch.Tensor: - """Angular acceleration of all bodies in simulation world frame. Shape is (num_instances, num_bodies, 3). - - This quantity is the angular acceleration of the articulation bodies' center of mass frame. - """ - return self.body_com_acc_w[..., 3:6] - - @property - def body_com_pos_b(self) -> torch.Tensor: - """Center of mass position of all of the bodies in their respective link frames. - Shape is (num_instances, num_bodies, 3). - - This quantity is the center of mass location relative to its body'slink frame. - """ - return self.body_com_pose_b[..., :3] - - @property - def body_com_quat_b(self) -> torch.Tensor: - """Orientation (w, x, y, z) of the principle axis of inertia of all of the bodies in their - respective link frames. Shape is (num_instances, num_bodies, 4). - - This quantity is the orientation of the principles axes of inertia relative to its body's link frame. - """ - return self.body_com_pose_b[..., 3:7] - - ## - # Backward compatibility. - ## - - @property - def root_pose_w(self) -> torch.Tensor: - """Same as :attr:`root_link_pose_w`.""" - return self.root_link_pose_w - - @property - def root_pos_w(self) -> torch.Tensor: - """Same as :attr:`root_link_pos_w`.""" - return self.root_link_pos_w - - @property - def root_quat_w(self) -> torch.Tensor: - """Same as :attr:`root_link_quat_w`.""" - return self.root_link_quat_w - - @property - def root_vel_w(self) -> torch.Tensor: - """Same as :attr:`root_com_vel_w`.""" - return self.root_com_vel_w - - @property - def root_lin_vel_w(self) -> torch.Tensor: - """Same as :attr:`root_com_lin_vel_w`.""" - return self.root_com_lin_vel_w - - @property - def root_ang_vel_w(self) -> torch.Tensor: - """Same as :attr:`root_com_ang_vel_w`.""" - return self.root_com_ang_vel_w - - @property - def root_lin_vel_b(self) -> torch.Tensor: - """Same as :attr:`root_com_lin_vel_b`.""" - return self.root_com_lin_vel_b - - @property - def root_ang_vel_b(self) -> torch.Tensor: - """Same as :attr:`root_com_ang_vel_b`.""" - return self.root_com_ang_vel_b - - @property - def body_pose_w(self) -> torch.Tensor: - """Same as :attr:`body_link_pose_w`.""" - return self.body_link_pose_w - - @property - def body_pos_w(self) -> torch.Tensor: - """Same as :attr:`body_link_pos_w`.""" - return self.body_link_pos_w - - @property - def body_quat_w(self) -> torch.Tensor: - """Same as :attr:`body_link_quat_w`.""" - return self.body_link_quat_w - - @property - def body_vel_w(self) -> torch.Tensor: - """Same as :attr:`body_com_vel_w`.""" - return self.body_com_vel_w - - @property - def body_lin_vel_w(self) -> torch.Tensor: - """Same as :attr:`body_com_lin_vel_w`.""" - return self.body_com_lin_vel_w - - @property - def body_ang_vel_w(self) -> torch.Tensor: - """Same as :attr:`body_com_ang_vel_w`.""" - return self.body_com_ang_vel_w - - @property - def body_acc_w(self) -> torch.Tensor: - """Same as :attr:`body_com_acc_w`.""" - return self.body_com_acc_w - - @property - def body_lin_acc_w(self) -> torch.Tensor: - """Same as :attr:`body_com_lin_acc_w`.""" - return self.body_com_lin_acc_w - - @property - def body_ang_acc_w(self) -> torch.Tensor: - """Same as :attr:`body_com_ang_acc_w`.""" - return self.body_com_ang_acc_w - - @property - def com_pos_b(self) -> torch.Tensor: - """Same as :attr:`body_com_pos_b`.""" - return self.body_com_pos_b - - @property - def com_quat_b(self) -> torch.Tensor: - """Same as :attr:`body_com_quat_b`.""" - return self.body_com_quat_b - - @property - def joint_limits(self) -> torch.Tensor: - """Deprecated property. Please use :attr:`joint_pos_limits` instead.""" - logger.warning( - "The `joint_limits` property will be deprecated in a future release. Please use `joint_pos_limits` instead." - ) - return self.joint_pos_limits - - @property - def default_joint_limits(self) -> torch.Tensor: - """Deprecated property. Please use :attr:`default_joint_pos_limits` instead.""" - logger.warning( - "The `default_joint_limits` property will be deprecated in a future release. Please use" - " `default_joint_pos_limits` instead." - ) - return self.default_joint_pos_limits - - @property - def joint_velocity_limits(self) -> torch.Tensor: - """Deprecated property. Please use :attr:`joint_vel_limits` instead.""" - logger.warning( - "The `joint_velocity_limits` property will be deprecated in a future release. Please use" - " `joint_vel_limits` instead." - ) - return self.joint_vel_limits - - @property - def joint_friction(self) -> torch.Tensor: - """Deprecated property. Please use :attr:`joint_friction_coeff` instead.""" - logger.warning( - "The `joint_friction` property will be deprecated in a future release. Please use" - " `joint_friction_coeff` instead." - ) - return self.joint_friction_coeff - - @property - def default_joint_friction(self) -> torch.Tensor: - """Deprecated property. Please use :attr:`default_joint_friction_coeff` instead.""" - logger.warning( - "The `default_joint_friction` property will be deprecated in a future release. Please use" - " `default_joint_friction_coeff` instead." - ) - return self.default_joint_friction_coeff - - @property - def fixed_tendon_limit(self) -> torch.Tensor: - """Deprecated property. Please use :attr:`fixed_tendon_pos_limits` instead.""" - logger.warning( - "The `fixed_tendon_limit` property will be deprecated in a future release. Please use" - " `fixed_tendon_pos_limits` instead." - ) - return self.fixed_tendon_pos_limits - - @property - def default_fixed_tendon_limit(self) -> torch.Tensor: - """Deprecated property. Please use :attr:`default_fixed_tendon_pos_limits` instead.""" - logger.warning( - "The `default_fixed_tendon_limit` property will be deprecated in a future release. Please use" - " `default_fixed_tendon_pos_limits` instead." - ) - return self.default_fixed_tendon_pos_limits diff --git a/source/isaaclab_physx/isaaclab_physx/assets/articulation/articulation_old.py b/source/isaaclab_physx/isaaclab_physx/assets/articulation/articulation_old.py deleted file mode 100644 index ee6b6c04fae..00000000000 --- a/source/isaaclab_physx/isaaclab_physx/assets/articulation/articulation_old.py +++ /dev/null @@ -1,2143 +0,0 @@ -# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). -# All rights reserved. -# -# SPDX-License-Identifier: BSD-3-Clause - -# Flag for pyright to ignore type errors in this file. -# pyright: reportPrivateUsage=false - -from __future__ import annotations - -import logging -from collections.abc import Sequence -from typing import TYPE_CHECKING - -import torch -import warp as wp -from prettytable import PrettyTable - -import omni.physics.tensors.impl.api as physx -from isaacsim.core.simulation_manager import SimulationManager -from pxr import PhysxSchema, UsdPhysics - -import isaaclab.sim as sim_utils -import isaaclab.utils.math as math_utils -import isaaclab.utils.string as string_utils -from isaaclab.actuators import ActuatorBase, ActuatorBaseCfg, ImplicitActuator -from isaaclab.utils.types import ArticulationActions -from isaaclab.utils.version import get_isaac_sim_version -from isaaclab.utils.wrench_composer import WrenchComposer - -from ..asset_base import AssetBase -from .articulation_data import ArticulationData - -if TYPE_CHECKING: - from .articulation_cfg import ArticulationCfg - -# import logger -logger = logging.getLogger(__name__) - - -class Articulation(AssetBase): - """An articulation asset class. - - An articulation is a collection of rigid bodies connected by joints. The joints can be either - fixed or actuated. The joints can be of different types, such as revolute, prismatic, D-6, etc. - However, the articulation class has currently been tested with revolute and prismatic joints. - The class supports both floating-base and fixed-base articulations. The type of articulation - is determined based on the root joint of the articulation. If the root joint is fixed, then - the articulation is considered a fixed-base system. Otherwise, it is considered a floating-base - system. This can be checked using the :attr:`Articulation.is_fixed_base` attribute. - - For an asset to be considered an articulation, the root prim of the asset must have the - `USD ArticulationRootAPI`_. This API is used to define the sub-tree of the articulation using - the reduced coordinate formulation. On playing the simulation, the physics engine parses the - articulation root prim and creates the corresponding articulation in the physics engine. The - articulation root prim can be specified using the :attr:`AssetBaseCfg.prim_path` attribute. - - The articulation class also provides the functionality to augment the simulation of an articulated - system with custom actuator models. These models can either be explicit or implicit, as detailed in - the :mod:`isaaclab.actuators` module. The actuator models are specified using the - :attr:`ArticulationCfg.actuators` attribute. These are then parsed and used to initialize the - corresponding actuator models, when the simulation is played. - - During the simulation step, the articulation class first applies the actuator models to compute - the joint commands based on the user-specified targets. These joint commands are then applied - into the simulation. The joint commands can be either position, velocity, or effort commands. - As an example, the following snippet shows how this can be used for position commands: - - .. code-block:: python - - # an example instance of the articulation class - my_articulation = Articulation(cfg) - - # set joint position targets - my_articulation.set_joint_position_target(position) - # propagate the actuator models and apply the computed commands into the simulation - my_articulation.write_data_to_sim() - - # step the simulation using the simulation context - sim_context.step() - - # update the articulation state, where dt is the simulation time step - my_articulation.update(dt) - - .. _`USD ArticulationRootAPI`: https://openusd.org/dev/api/class_usd_physics_articulation_root_a_p_i.html - - """ - - cfg: ArticulationCfg - """Configuration instance for the articulations.""" - - actuators: dict[str, ActuatorBase] - """Dictionary of actuator instances for the articulation. - - The keys are the actuator names and the values are the actuator instances. The actuator instances - are initialized based on the actuator configurations specified in the :attr:`ArticulationCfg.actuators` - attribute. They are used to compute the joint commands during the :meth:`write_data_to_sim` function. - """ - - def __init__(self, cfg: ArticulationCfg): - """Initialize the articulation. - - Args: - cfg: A configuration instance. - """ - super().__init__(cfg) - - """ - Properties - """ - - @property - def data(self) -> ArticulationData: - return self._data - - @property - def num_instances(self) -> int: - return self.root_physx_view.count - - @property - def is_fixed_base(self) -> bool: - """Whether the articulation is a fixed-base or floating-base system.""" - return self.root_physx_view.shared_metatype.fixed_base - - @property - def num_joints(self) -> int: - """Number of joints in articulation.""" - return self.root_physx_view.shared_metatype.dof_count - - @property - def num_fixed_tendons(self) -> int: - """Number of fixed tendons in articulation.""" - return self.root_physx_view.max_fixed_tendons - - @property - def num_spatial_tendons(self) -> int: - """Number of spatial tendons in articulation.""" - return self.root_physx_view.max_spatial_tendons - - @property - def num_bodies(self) -> int: - """Number of bodies in articulation.""" - return self.root_physx_view.shared_metatype.link_count - - @property - def joint_names(self) -> list[str]: - """Ordered names of joints in articulation.""" - return self.root_physx_view.shared_metatype.dof_names - - @property - def fixed_tendon_names(self) -> list[str]: - """Ordered names of fixed tendons in articulation.""" - return self._fixed_tendon_names - - @property - def spatial_tendon_names(self) -> list[str]: - """Ordered names of spatial tendons in articulation.""" - return self._spatial_tendon_names - - @property - def body_names(self) -> list[str]: - """Ordered names of bodies in articulation.""" - return self.root_physx_view.shared_metatype.link_names - - @property - def root_physx_view(self) -> physx.ArticulationView: - """Articulation view for the asset (PhysX). - - Note: - Use this view with caution. It requires handling of tensors in a specific way. - """ - return self._root_physx_view - - @property - def instantaneous_wrench_composer(self) -> WrenchComposer: - """Instantaneous wrench composer. - - Returns a :class:`~isaaclab.utils.wrench_composer.WrenchComposer` instance. Wrenches added or set to this wrench - composer are only valid for the current simulation step. At the end of the simulation step, the wrenches set - to this object are discarded. This is useful to apply forces that change all the time, things like drag forces - for instance. - - Note: - Permanent wrenches are composed into the instantaneous wrench before the instantaneous wrenches are - applied to the simulation. - """ - return self._instantaneous_wrench_composer - - @property - def permanent_wrench_composer(self) -> WrenchComposer: - """Permanent wrench composer. - - Returns a :class:`~isaaclab.utils.wrench_composer.WrenchComposer` instance. Wrenches added or set to this wrench - composer are persistent and are applied to the simulation at every step. This is useful to apply forces that - are constant over a period of time, things like the thrust of a motor for instance. - - Note: - Permanent wrenches are composed into the instantaneous wrench before the instantaneous wrenches are - applied to the simulation. - """ - return self._permanent_wrench_composer - - """ - Operations. - """ - - def reset(self, env_ids: Sequence[int] | None = None): - # use ellipses object to skip initial indices. - if env_ids is None: - env_ids = slice(None) - # reset actuators - for actuator in self.actuators.values(): - actuator.reset(env_ids) - # reset external wrenches. - self._instantaneous_wrench_composer.reset(env_ids) - self._permanent_wrench_composer.reset(env_ids) - - def write_data_to_sim(self): - """Write external wrenches and joint commands to the simulation. - - If any explicit actuators are present, then the actuator models are used to compute the - joint commands. Otherwise, the joint commands are directly set into the simulation. - - Note: - We write external wrench to the simulation here since this function is called before the simulation step. - This ensures that the external wrench is applied at every simulation step. - """ - # write external wrench - if self._instantaneous_wrench_composer.active or self._permanent_wrench_composer.active: - if self._instantaneous_wrench_composer.active: - # Compose instantaneous wrench with permanent wrench - self._instantaneous_wrench_composer.add_forces_and_torques( - forces=self._permanent_wrench_composer.composed_force, - torques=self._permanent_wrench_composer.composed_torque, - body_ids=self._ALL_BODY_INDICES_WP, - env_ids=self._ALL_INDICES_WP, - ) - # Apply both instantaneous and permanent wrench to the simulation - self.root_physx_view.apply_forces_and_torques_at_position( - force_data=self._instantaneous_wrench_composer.composed_force_as_torch.view(-1, 3), - torque_data=self._instantaneous_wrench_composer.composed_torque_as_torch.view(-1, 3), - position_data=None, - indices=self._ALL_INDICES, - is_global=False, - ) - else: - # Apply permanent wrench to the simulation - self.root_physx_view.apply_forces_and_torques_at_position( - force_data=self._permanent_wrench_composer.composed_force_as_torch.view(-1, 3), - torque_data=self._permanent_wrench_composer.composed_torque_as_torch.view(-1, 3), - position_data=None, - indices=self._ALL_INDICES, - is_global=False, - ) - self._instantaneous_wrench_composer.reset() - - # apply actuator models - self._apply_actuator_model() - # write actions into simulation - self.root_physx_view.set_dof_actuation_forces(self._joint_effort_target_sim, self._ALL_INDICES) - # position and velocity targets only for implicit actuators - if self._has_implicit_actuators: - self.root_physx_view.set_dof_position_targets(self._joint_pos_target_sim, self._ALL_INDICES) - self.root_physx_view.set_dof_velocity_targets(self._joint_vel_target_sim, self._ALL_INDICES) - - def update(self, dt: float): - self._data.update(dt) - - """ - Operations - Finders. - """ - - def find_bodies(self, name_keys: str | Sequence[str], preserve_order: bool = False) -> tuple[list[int], list[str]]: - """Find bodies in the articulation based on the name keys. - - Please check the :meth:`isaaclab.utils.string_utils.resolve_matching_names` function for more - information on the name matching. - - Args: - name_keys: A regular expression or a list of regular expressions to match the body names. - preserve_order: Whether to preserve the order of the name keys in the output. Defaults to False. - - Returns: - A tuple of lists containing the body indices and names. - """ - return string_utils.resolve_matching_names(name_keys, self.body_names, preserve_order) - - def find_joints( - self, name_keys: str | Sequence[str], joint_subset: list[str] | None = None, preserve_order: bool = False - ) -> tuple[list[int], list[str]]: - """Find joints in the articulation based on the name keys. - - Please see the :func:`isaaclab.utils.string.resolve_matching_names` function for more information - on the name matching. - - Args: - name_keys: A regular expression or a list of regular expressions to match the joint names. - joint_subset: A subset of joints to search for. Defaults to None, which means all joints - in the articulation are searched. - preserve_order: Whether to preserve the order of the name keys in the output. Defaults to False. - - Returns: - A tuple of lists containing the joint indices and names. - """ - if joint_subset is None: - joint_subset = self.joint_names - # find joints - return string_utils.resolve_matching_names(name_keys, joint_subset, preserve_order) - - def find_fixed_tendons( - self, name_keys: str | Sequence[str], tendon_subsets: list[str] | None = None, preserve_order: bool = False - ) -> tuple[list[int], list[str]]: - """Find fixed tendons in the articulation based on the name keys. - - Please see the :func:`isaaclab.utils.string.resolve_matching_names` function for more information - on the name matching. - - Args: - name_keys: A regular expression or a list of regular expressions to match the joint - names with fixed tendons. - tendon_subsets: A subset of joints with fixed tendons to search for. Defaults to None, which means - all joints in the articulation are searched. - preserve_order: Whether to preserve the order of the name keys in the output. Defaults to False. - - Returns: - A tuple of lists containing the tendon indices and names. - """ - if tendon_subsets is None: - # tendons follow the joint names they are attached to - tendon_subsets = self.fixed_tendon_names - # find tendons - return string_utils.resolve_matching_names(name_keys, tendon_subsets, preserve_order) - - def find_spatial_tendons( - self, name_keys: str | Sequence[str], tendon_subsets: list[str] | None = None, preserve_order: bool = False - ) -> tuple[list[int], list[str]]: - """Find spatial tendons in the articulation based on the name keys. - - Please see the :func:`isaaclab.utils.string.resolve_matching_names` function for more information - on the name matching. - - Args: - name_keys: A regular expression or a list of regular expressions to match the tendon names. - tendon_subsets: A subset of tendons to search for. Defaults to None, which means all tendons - in the articulation are searched. - preserve_order: Whether to preserve the order of the name keys in the output. Defaults to False. - - Returns: - A tuple of lists containing the tendon indices and names. - """ - if tendon_subsets is None: - tendon_subsets = self.spatial_tendon_names - # find tendons - return string_utils.resolve_matching_names(name_keys, tendon_subsets, preserve_order) - - """ - Operations - State Writers. - """ - - def write_root_state_to_sim(self, root_state: torch.Tensor, env_ids: Sequence[int] | None = None): - """Set the root state over selected environment indices into the simulation. - - The root state comprises of the cartesian position, quaternion orientation in (w, x, y, z), and linear - and angular velocity. All the quantities are in the simulation frame. - - Args: - root_state: Root state in simulation frame. Shape is (len(env_ids), 13). - env_ids: Environment indices. If None, then all indices are used. - """ - self.write_root_link_pose_to_sim(root_state[:, :7], env_ids=env_ids) - self.write_root_com_velocity_to_sim(root_state[:, 7:], env_ids=env_ids) - - def write_root_com_state_to_sim(self, root_state: torch.Tensor, env_ids: Sequence[int] | None = None): - """Set the root center of mass state over selected environment indices into the simulation. - - The root state comprises of the cartesian position, quaternion orientation in (w, x, y, z), and linear - and angular velocity. All the quantities are in the simulation frame. - - Args: - root_state: Root state in simulation frame. Shape is (len(env_ids), 13). - env_ids: Environment indices. If None, then all indices are used. - """ - self.write_root_com_pose_to_sim(root_state[:, :7], env_ids=env_ids) - self.write_root_com_velocity_to_sim(root_state[:, 7:], env_ids=env_ids) - - def write_root_link_state_to_sim(self, root_state: torch.Tensor, env_ids: Sequence[int] | None = None): - """Set the root link state over selected environment indices into the simulation. - - The root state comprises of the cartesian position, quaternion orientation in (w, x, y, z), and linear - and angular velocity. All the quantities are in the simulation frame. - - Args: - root_state: Root state in simulation frame. Shape is (len(env_ids), 13). - env_ids: Environment indices. If None, then all indices are used. - """ - self.write_root_link_pose_to_sim(root_state[:, :7], env_ids=env_ids) - self.write_root_link_velocity_to_sim(root_state[:, 7:], env_ids=env_ids) - - def write_root_pose_to_sim(self, root_pose: torch.Tensor, env_ids: Sequence[int] | None = None): - """Set the root pose over selected environment indices into the simulation. - - The root pose comprises of the cartesian position and quaternion orientation in (w, x, y, z). - - Args: - root_pose: Root poses in simulation frame. Shape is (len(env_ids), 7). - env_ids: Environment indices. If None, then all indices are used. - """ - self.write_root_link_pose_to_sim(root_pose, env_ids=env_ids) - - def write_root_link_pose_to_sim(self, root_pose: torch.Tensor, env_ids: Sequence[int] | None = None): - """Set the root link pose over selected environment indices into the simulation. - - The root pose comprises of the cartesian position and quaternion orientation in (w, x, y, z). - - Args: - root_pose: Root poses in simulation frame. Shape is (len(env_ids), 7). - env_ids: Environment indices. If None, then all indices are used. - """ - # resolve all indices - physx_env_ids = env_ids - if env_ids is None: - env_ids = slice(None) - physx_env_ids = self._ALL_INDICES - - # note: we need to do this here since tensors are not set into simulation until step. - # set into internal buffers - self._data.root_link_pose_w[env_ids] = root_pose.clone() - # update these buffers only if the user is using them. Otherwise this adds to overhead. - if self._data._root_link_state_w.data is not None: - self._data.root_link_state_w[env_ids, :7] = self._data.root_link_pose_w[env_ids] - if self._data._root_state_w.data is not None: - self._data.root_state_w[env_ids, :7] = self._data.root_link_pose_w[env_ids] - - # convert root quaternion from wxyz to xyzw - root_poses_xyzw = self._data.root_link_pose_w.clone() - root_poses_xyzw[:, 3:] = math_utils.convert_quat(root_poses_xyzw[:, 3:], to="xyzw") - - # Need to invalidate the buffer to trigger the update with the new state. - self._data._body_link_pose_w.timestamp = -1.0 - self._data._body_com_pose_w.timestamp = -1.0 - self._data._body_state_w.timestamp = -1.0 - self._data._body_link_state_w.timestamp = -1.0 - self._data._body_com_state_w.timestamp = -1.0 - - # set into simulation - self.root_physx_view.set_root_transforms(root_poses_xyzw, indices=physx_env_ids) - - def write_root_com_pose_to_sim(self, root_pose: torch.Tensor, env_ids: Sequence[int] | None = None): - """Set the root center of mass pose over selected environment indices into the simulation. - - The root pose comprises of the cartesian position and quaternion orientation in (w, x, y, z). - The orientation is the orientation of the principle axes of inertia. - - Args: - root_pose: Root center of mass poses in simulation frame. Shape is (len(env_ids), 7). - env_ids: Environment indices. If None, then all indices are used. - """ - # resolve all indices - if env_ids is None: - local_env_ids = slice(env_ids) - else: - local_env_ids = env_ids - - # set into internal buffers - self._data.root_com_pose_w[local_env_ids] = root_pose.clone() - # update these buffers only if the user is using them. Otherwise this adds to overhead. - if self._data._root_com_state_w.data is not None: - self._data.root_com_state_w[local_env_ids, :7] = self._data.root_com_pose_w[local_env_ids] - - # get CoM pose in link frame - com_pos_b = self.data.body_com_pos_b[local_env_ids, 0, :] - com_quat_b = self.data.body_com_quat_b[local_env_ids, 0, :] - # transform input CoM pose to link frame - root_link_pos, root_link_quat = math_utils.combine_frame_transforms( - root_pose[..., :3], - root_pose[..., 3:7], - math_utils.quat_apply(math_utils.quat_inv(com_quat_b), -com_pos_b), - math_utils.quat_inv(com_quat_b), - ) - root_link_pose = torch.cat((root_link_pos, root_link_quat), dim=-1) - - # write transformed pose in link frame to sim - self.write_root_link_pose_to_sim(root_pose=root_link_pose, env_ids=env_ids) - - def write_root_velocity_to_sim(self, root_velocity: torch.Tensor, env_ids: Sequence[int] | None = None): - """Set the root center of mass velocity over selected environment indices into the simulation. - - The velocity comprises linear velocity (x, y, z) and angular velocity (x, y, z) in that order. - NOTE: This sets the velocity of the root's center of mass rather than the roots frame. - - Args: - root_velocity: Root center of mass velocities in simulation world frame. Shape is (len(env_ids), 6). - env_ids: Environment indices. If None, then all indices are used. - """ - self.write_root_com_velocity_to_sim(root_velocity=root_velocity, env_ids=env_ids) - - def write_root_com_velocity_to_sim(self, root_velocity: torch.Tensor, env_ids: Sequence[int] | None = None): - """Set the root center of mass velocity over selected environment indices into the simulation. - - The velocity comprises linear velocity (x, y, z) and angular velocity (x, y, z) in that order. - NOTE: This sets the velocity of the root's center of mass rather than the roots frame. - - Args: - root_velocity: Root center of mass velocities in simulation world frame. Shape is (len(env_ids), 6). - env_ids: Environment indices. If None, then all indices are used. - """ - # resolve all indices - physx_env_ids = env_ids - if env_ids is None: - env_ids = slice(None) - physx_env_ids = self._ALL_INDICES - - # note: we need to do this here since tensors are not set into simulation until step. - # set into internal buffers - self._data.root_com_vel_w[env_ids] = root_velocity.clone() - # update these buffers only if the user is using them. Otherwise this adds to overhead. - if self._data._root_com_state_w.data is not None: - self._data.root_com_state_w[env_ids, 7:] = self._data.root_com_vel_w[env_ids] - if self._data._root_state_w.data is not None: - self._data.root_state_w[env_ids, 7:] = self._data.root_com_vel_w[env_ids] - # make the acceleration zero to prevent reporting old values - self._data.body_acc_w[env_ids] = 0.0 - - # set into simulation - self.root_physx_view.set_root_velocities(self._data.root_com_vel_w, indices=physx_env_ids) - - def write_root_link_velocity_to_sim(self, root_velocity: torch.Tensor, env_ids: Sequence[int] | None = None): - """Set the root link velocity over selected environment indices into the simulation. - - The velocity comprises linear velocity (x, y, z) and angular velocity (x, y, z) in that order. - NOTE: This sets the velocity of the root's frame rather than the roots center of mass. - - Args: - root_velocity: Root frame velocities in simulation world frame. Shape is (len(env_ids), 6). - env_ids: Environment indices. If None, then all indices are used. - """ - # resolve all indices - if env_ids is None: - local_env_ids = slice(env_ids) - else: - local_env_ids = env_ids - - # set into internal buffers - self._data.root_link_vel_w[local_env_ids] = root_velocity.clone() - # update these buffers only if the user is using them. Otherwise this adds to overhead. - if self._data._root_link_state_w.data is not None: - self._data.root_link_state_w[local_env_ids, 7:] = self._data.root_link_vel_w[local_env_ids] - - # get CoM pose in link frame - quat = self.data.root_link_quat_w[local_env_ids] - com_pos_b = self.data.body_com_pos_b[local_env_ids, 0, :] - # transform input velocity to center of mass frame - root_com_velocity = root_velocity.clone() - root_com_velocity[:, :3] += torch.linalg.cross( - root_com_velocity[:, 3:], math_utils.quat_apply(quat, com_pos_b), dim=-1 - ) - - # write transformed velocity in CoM frame to sim - self.write_root_com_velocity_to_sim(root_velocity=root_com_velocity, env_ids=env_ids) - - def write_joint_state_to_sim( - self, - position: torch.Tensor, - velocity: torch.Tensor, - joint_ids: Sequence[int] | slice | None = None, - env_ids: Sequence[int] | slice | None = None, - ): - """Write joint positions and velocities to the simulation. - - Args: - position: Joint positions. Shape is (len(env_ids), len(joint_ids)). - velocity: Joint velocities. Shape is (len(env_ids), len(joint_ids)). - joint_ids: The joint indices to set the targets for. Defaults to None (all joints). - env_ids: The environment indices to set the targets for. Defaults to None (all environments). - """ - # set into simulation - self.write_joint_position_to_sim(position, joint_ids=joint_ids, env_ids=env_ids) - self.write_joint_velocity_to_sim(velocity, joint_ids=joint_ids, env_ids=env_ids) - - def write_joint_position_to_sim( - self, - position: torch.Tensor, - joint_ids: Sequence[int] | slice | None = None, - env_ids: Sequence[int] | slice | None = None, - ): - """Write joint positions to the simulation. - - Args: - position: Joint positions. Shape is (len(env_ids), len(joint_ids)). - joint_ids: The joint indices to set the targets for. Defaults to None (all joints). - env_ids: The environment indices to set the targets for. Defaults to None (all environments). - """ - # resolve indices - physx_env_ids = env_ids - if env_ids is None: - env_ids = slice(None) - physx_env_ids = self._ALL_INDICES - if joint_ids is None: - joint_ids = slice(None) - # broadcast env_ids if needed to allow double indexing - if env_ids != slice(None) and joint_ids != slice(None): - env_ids = env_ids[:, None] - # set into internal buffers - self._data.joint_pos[env_ids, joint_ids] = position - # Need to invalidate the buffer to trigger the update with the new root pose. - self._data._body_com_vel_w.timestamp = -1.0 - self._data._body_link_vel_w.timestamp = -1.0 - self._data._body_com_pose_b.timestamp = -1.0 - self._data._body_com_pose_w.timestamp = -1.0 - self._data._body_link_pose_w.timestamp = -1.0 - - self._data._body_state_w.timestamp = -1.0 - self._data._body_link_state_w.timestamp = -1.0 - self._data._body_com_state_w.timestamp = -1.0 - # set into simulation - self.root_physx_view.set_dof_positions(self._data.joint_pos, indices=physx_env_ids) - - def write_joint_velocity_to_sim( - self, - velocity: torch.Tensor, - joint_ids: Sequence[int] | slice | None = None, - env_ids: Sequence[int] | slice | None = None, - ): - """Write joint velocities to the simulation. - - Args: - velocity: Joint velocities. Shape is (len(env_ids), len(joint_ids)). - joint_ids: The joint indices to set the targets for. Defaults to None (all joints). - env_ids: The environment indices to set the targets for. Defaults to None (all environments). - """ - # resolve indices - physx_env_ids = env_ids - if env_ids is None: - env_ids = slice(None) - physx_env_ids = self._ALL_INDICES - if joint_ids is None: - joint_ids = slice(None) - # broadcast env_ids if needed to allow double indexing - if env_ids != slice(None) and joint_ids != slice(None): - env_ids = env_ids[:, None] - # set into internal buffers - self._data.joint_vel[env_ids, joint_ids] = velocity - self._data._previous_joint_vel[env_ids, joint_ids] = velocity - self._data.joint_acc[env_ids, joint_ids] = 0.0 - # set into simulation - self.root_physx_view.set_dof_velocities(self._data.joint_vel, indices=physx_env_ids) - - """ - Operations - Simulation Parameters Writers. - """ - - def write_joint_stiffness_to_sim( - self, - stiffness: torch.Tensor | float, - joint_ids: Sequence[int] | slice | None = None, - env_ids: Sequence[int] | None = None, - ): - """Write joint stiffness into the simulation. - - Args: - stiffness: Joint stiffness. Shape is (len(env_ids), len(joint_ids)). - joint_ids: The joint indices to set the stiffness for. Defaults to None (all joints). - env_ids: The environment indices to set the stiffness for. Defaults to None (all environments). - """ - # note: This function isn't setting the values for actuator models. (#128) - # resolve indices - physx_env_ids = env_ids - if env_ids is None: - env_ids = slice(None) - physx_env_ids = self._ALL_INDICES - if joint_ids is None: - joint_ids = slice(None) - # broadcast env_ids if needed to allow double indexing - if env_ids != slice(None) and joint_ids != slice(None): - env_ids = env_ids[:, None] - # set into internal buffers - self._data.joint_stiffness[env_ids, joint_ids] = stiffness - # set into simulation - self.root_physx_view.set_dof_stiffnesses(self._data.joint_stiffness.cpu(), indices=physx_env_ids.cpu()) - - def write_joint_damping_to_sim( - self, - damping: torch.Tensor | float, - joint_ids: Sequence[int] | slice | None = None, - env_ids: Sequence[int] | None = None, - ): - """Write joint damping into the simulation. - - Args: - damping: Joint damping. Shape is (len(env_ids), len(joint_ids)). - joint_ids: The joint indices to set the damping for. Defaults to None (all joints). - env_ids: The environment indices to set the damping for. Defaults to None (all environments). - """ - # note: This function isn't setting the values for actuator models. (#128) - # resolve indices - physx_env_ids = env_ids - if env_ids is None: - env_ids = slice(None) - physx_env_ids = self._ALL_INDICES - if joint_ids is None: - joint_ids = slice(None) - # broadcast env_ids if needed to allow double indexing - if env_ids != slice(None) and joint_ids != slice(None): - env_ids = env_ids[:, None] - # set into internal buffers - self._data.joint_damping[env_ids, joint_ids] = damping - # set into simulation - self.root_physx_view.set_dof_dampings(self._data.joint_damping.cpu(), indices=physx_env_ids.cpu()) - - def write_joint_position_limit_to_sim( - self, - limits: torch.Tensor | float, - joint_ids: Sequence[int] | slice | None = None, - env_ids: Sequence[int] | None = None, - warn_limit_violation: bool = True, - ): - """Write joint position limits into the simulation. - - Args: - limits: Joint limits. Shape is (len(env_ids), len(joint_ids), 2). - joint_ids: The joint indices to set the limits for. Defaults to None (all joints). - env_ids: The environment indices to set the limits for. Defaults to None (all environments). - warn_limit_violation: Whether to use warning or info level logging when default joint positions - exceed the new limits. Defaults to True. - """ - # note: This function isn't setting the values for actuator models. (#128) - # resolve indices - physx_env_ids = env_ids - if env_ids is None: - env_ids = slice(None) - physx_env_ids = self._ALL_INDICES - if joint_ids is None: - joint_ids = slice(None) - # broadcast env_ids if needed to allow double indexing - if env_ids != slice(None) and joint_ids != slice(None): - env_ids = env_ids[:, None] - # set into internal buffers - self._data.joint_pos_limits[env_ids, joint_ids] = limits - # update default joint pos to stay within the new limits - if torch.any( - (self._data.default_joint_pos[env_ids, joint_ids] < limits[..., 0]) - | (self._data.default_joint_pos[env_ids, joint_ids] > limits[..., 1]) - ): - self._data.default_joint_pos[env_ids, joint_ids] = torch.clamp( - self._data.default_joint_pos[env_ids, joint_ids], limits[..., 0], limits[..., 1] - ) - violation_message = ( - "Some default joint positions are outside of the range of the new joint limits. Default joint positions" - " will be clamped to be within the new joint limits." - ) - if warn_limit_violation: - # warn level will show in console - logger.warning(violation_message) - else: - # info level is only written to log file - logger.info(violation_message) - # set into simulation - self.root_physx_view.set_dof_limits(self._data.joint_pos_limits.cpu(), indices=physx_env_ids.cpu()) - - # compute the soft limits based on the joint limits - # TODO: Optimize this computation for only selected joints - # soft joint position limits (recommended not to be too close to limits). - joint_pos_mean = (self._data.joint_pos_limits[..., 0] + self._data.joint_pos_limits[..., 1]) / 2 - joint_pos_range = self._data.joint_pos_limits[..., 1] - self._data.joint_pos_limits[..., 0] - soft_limit_factor = self.cfg.soft_joint_pos_limit_factor - # add to data - self._data.soft_joint_pos_limits[..., 0] = joint_pos_mean - 0.5 * joint_pos_range * soft_limit_factor - self._data.soft_joint_pos_limits[..., 1] = joint_pos_mean + 0.5 * joint_pos_range * soft_limit_factor - - def write_joint_velocity_limit_to_sim( - self, - limits: torch.Tensor | float, - joint_ids: Sequence[int] | slice | None = None, - env_ids: Sequence[int] | None = None, - ): - """Write joint max velocity to the simulation. - - The velocity limit is used to constrain the joint velocities in the physics engine. The joint will only - be able to reach this velocity if the joint's effort limit is sufficiently large. If the joint is moving - faster than this velocity, the physics engine will actually try to brake the joint to reach this velocity. - - Args: - limits: Joint max velocity. Shape is (len(env_ids), len(joint_ids)). - joint_ids: The joint indices to set the max velocity for. Defaults to None (all joints). - env_ids: The environment indices to set the max velocity for. Defaults to None (all environments). - """ - # resolve indices - physx_env_ids = env_ids - if env_ids is None: - env_ids = slice(None) - physx_env_ids = self._ALL_INDICES - if joint_ids is None: - joint_ids = slice(None) - # broadcast env_ids if needed to allow double indexing - if env_ids != slice(None) and joint_ids != slice(None): - env_ids = env_ids[:, None] - # move tensor to cpu if needed - if isinstance(limits, torch.Tensor): - limits = limits.to(self.device) - # set into internal buffers - self._data.joint_vel_limits[env_ids, joint_ids] = limits - # set into simulation - self.root_physx_view.set_dof_max_velocities(self._data.joint_vel_limits.cpu(), indices=physx_env_ids.cpu()) - - def write_joint_effort_limit_to_sim( - self, - limits: torch.Tensor | float, - joint_ids: Sequence[int] | slice | None = None, - env_ids: Sequence[int] | None = None, - ): - """Write joint effort limits into the simulation. - - The effort limit is used to constrain the computed joint efforts in the physics engine. If the - computed effort exceeds this limit, the physics engine will clip the effort to this value. - - Args: - limits: Joint torque limits. Shape is (len(env_ids), len(joint_ids)). - joint_ids: The joint indices to set the joint torque limits for. Defaults to None (all joints). - env_ids: The environment indices to set the joint torque limits for. Defaults to None (all environments). - """ - # note: This function isn't setting the values for actuator models. (#128) - # resolve indices - physx_env_ids = env_ids - if env_ids is None: - env_ids = slice(None) - physx_env_ids = self._ALL_INDICES - if joint_ids is None: - joint_ids = slice(None) - # broadcast env_ids if needed to allow double indexing - if env_ids != slice(None) and joint_ids != slice(None): - env_ids = env_ids[:, None] - # move tensor to cpu if needed - if isinstance(limits, torch.Tensor): - limits = limits.to(self.device) - # set into internal buffers - self._data.joint_effort_limits[env_ids, joint_ids] = limits - # set into simulation - self.root_physx_view.set_dof_max_forces(self._data.joint_effort_limits.cpu(), indices=physx_env_ids.cpu()) - - def write_joint_armature_to_sim( - self, - armature: torch.Tensor | float, - joint_ids: Sequence[int] | slice | None = None, - env_ids: Sequence[int] | None = None, - ): - """Write joint armature into the simulation. - - The armature is directly added to the corresponding joint-space inertia. It helps improve the - simulation stability by reducing the joint velocities. - - Args: - armature: Joint armature. Shape is (len(env_ids), len(joint_ids)). - joint_ids: The joint indices to set the joint torque limits for. Defaults to None (all joints). - env_ids: The environment indices to set the joint torque limits for. Defaults to None (all environments). - """ - # resolve indices - physx_env_ids = env_ids - if env_ids is None: - env_ids = slice(None) - physx_env_ids = self._ALL_INDICES - if joint_ids is None: - joint_ids = slice(None) - # broadcast env_ids if needed to allow double indexing - if env_ids != slice(None) and joint_ids != slice(None): - env_ids = env_ids[:, None] - # set into internal buffers - self._data.joint_armature[env_ids, joint_ids] = armature - # set into simulation - self.root_physx_view.set_dof_armatures(self._data.joint_armature.cpu(), indices=physx_env_ids.cpu()) - - def write_joint_friction_coefficient_to_sim( - self, - joint_friction_coeff: torch.Tensor | float, - joint_dynamic_friction_coeff: torch.Tensor | float | None = None, - joint_viscous_friction_coeff: torch.Tensor | float | None = None, - joint_ids: Sequence[int] | slice | None = None, - env_ids: Sequence[int] | None = None, - ): - r"""Write joint friction coefficients into the simulation. - - For Isaac Sim versions below 5.0, only the static friction coefficient is set. - This limits the resisting force or torque up to a maximum proportional to the transmitted - spatial force: :math:`\|F_{resist}\| \leq \mu_s \, \|F_{spatial}\|`. - - For Isaac Sim versions 5.0 and above, the static, dynamic, and viscous friction coefficients - are set. The model combines Coulomb (static & dynamic) friction with a viscous term: - - - Static friction :math:`\mu_s` defines the maximum effort that prevents motion at rest. - - Dynamic friction :math:`\mu_d` applies once motion begins and remains constant during motion. - - Viscous friction :math:`c_v` is a velocity-proportional resistive term. - - Args: - joint_friction_coeff: Static friction coefficient :math:`\mu_s`. - Shape is (len(env_ids), len(joint_ids)). Scalars are broadcast to all selections. - joint_dynamic_friction_coeff: Dynamic (Coulomb) friction coefficient :math:`\mu_d`. - Same shape as above. If None, the dynamic coefficient is not updated. - joint_viscous_friction_coeff: Viscous friction coefficient :math:`c_v`. - Same shape as above. If None, the viscous coefficient is not updated. - joint_ids: The joint indices to set the friction coefficients for. Defaults to None (all joints). - env_ids: The environment indices to set the friction coefficients for. Defaults to None (all environments). - """ - # resolve indices - physx_env_ids = env_ids - if env_ids is None: - env_ids = slice(None) - physx_env_ids = self._ALL_INDICES - if joint_ids is None: - joint_ids = slice(None) - # broadcast env_ids if needed to allow double indexing - if env_ids != slice(None) and joint_ids != slice(None): - env_ids = env_ids[:, None] - # set into internal buffers - self._data.joint_friction_coeff[env_ids, joint_ids] = joint_friction_coeff - - # if dynamic or viscous friction coeffs are provided, set them too - if joint_dynamic_friction_coeff is not None: - self._data.joint_dynamic_friction_coeff[env_ids, joint_ids] = joint_dynamic_friction_coeff - if joint_viscous_friction_coeff is not None: - self._data.joint_viscous_friction_coeff[env_ids, joint_ids] = joint_viscous_friction_coeff - - # move the indices to cpu - physx_envs_ids_cpu = physx_env_ids.cpu() - - # set into simulation - if get_isaac_sim_version().major < 5: - self.root_physx_view.set_dof_friction_coefficients( - self._data.joint_friction_coeff.cpu(), indices=physx_envs_ids_cpu - ) - else: - friction_props = self.root_physx_view.get_dof_friction_properties() - friction_props[physx_envs_ids_cpu, :, 0] = self._data.joint_friction_coeff[physx_envs_ids_cpu, :].cpu() - - # only set dynamic and viscous friction if provided - if joint_dynamic_friction_coeff is not None: - friction_props[physx_envs_ids_cpu, :, 1] = self._data.joint_dynamic_friction_coeff[ - physx_envs_ids_cpu, : - ].cpu() - - # only set viscous friction if provided - if joint_viscous_friction_coeff is not None: - friction_props[physx_envs_ids_cpu, :, 2] = self._data.joint_viscous_friction_coeff[ - physx_envs_ids_cpu, : - ].cpu() - - self.root_physx_view.set_dof_friction_properties(friction_props, indices=physx_envs_ids_cpu) - - def write_joint_dynamic_friction_coefficient_to_sim( - self, - joint_dynamic_friction_coeff: torch.Tensor | float, - joint_ids: Sequence[int] | slice | None = None, - env_ids: Sequence[int] | None = None, - ): - if get_isaac_sim_version().major < 5: - logger.warning("Setting joint dynamic friction coefficients are not supported in Isaac Sim < 5.0") - return - # resolve indices - physx_env_ids = env_ids - if env_ids is None: - env_ids = slice(None) - physx_env_ids = self._ALL_INDICES - if joint_ids is None: - joint_ids = slice(None) - # broadcast env_ids if needed to allow double indexing - if env_ids != slice(None) and joint_ids != slice(None): - env_ids = env_ids[:, None] - # set into internal buffers - self._data.joint_dynamic_friction_coeff[env_ids, joint_ids] = joint_dynamic_friction_coeff - # set into simulation - friction_props = self.root_physx_view.get_dof_friction_properties() - friction_props[physx_env_ids.cpu(), :, 1] = self._data.joint_dynamic_friction_coeff[physx_env_ids, :].cpu() - self.root_physx_view.set_dof_friction_properties(friction_props, indices=physx_env_ids.cpu()) - - def write_joint_viscous_friction_coefficient_to_sim( - self, - joint_viscous_friction_coeff: torch.Tensor | float, - joint_ids: Sequence[int] | slice | None = None, - env_ids: Sequence[int] | None = None, - ): - if get_isaac_sim_version().major < 5: - logger.warning("Setting joint viscous friction coefficients are not supported in Isaac Sim < 5.0") - return - # resolve indices - physx_env_ids = env_ids - if env_ids is None: - env_ids = slice(None) - physx_env_ids = self._ALL_INDICES - if joint_ids is None: - joint_ids = slice(None) - # broadcast env_ids if needed to allow double indexing - if env_ids != slice(None) and joint_ids != slice(None): - env_ids = env_ids[:, None] - # set into internal buffers - self._data.joint_viscous_friction_coeff[env_ids, joint_ids] = joint_viscous_friction_coeff - # set into simulation - friction_props = self.root_physx_view.get_dof_friction_properties() - friction_props[physx_env_ids.cpu(), :, 2] = self._data.joint_viscous_friction_coeff[physx_env_ids, :].cpu() - self.root_physx_view.set_dof_friction_properties(friction_props, indices=physx_env_ids.cpu()) - - """ - Operations - Setters. - """ - - def set_external_force_and_torque( - self, - forces: torch.Tensor, - torques: torch.Tensor, - positions: torch.Tensor | None = None, - body_ids: Sequence[int] | slice | None = None, - env_ids: Sequence[int] | None = None, - is_global: bool = False, - ): - """Set external force and torque to apply on the asset's bodies in their local frame. - - For many applications, we want to keep the applied external force on rigid bodies constant over a period of - time (for instance, during the policy control). This function allows us to store the external force and torque - into buffers which are then applied to the simulation at every step. Optionally, set the position to apply the - external wrench at (in the local link frame of the bodies). - - .. caution:: - If the function is called with empty forces and torques, then this function disables the application - of external wrench to the simulation. - - .. code-block:: python - - # example of disabling external wrench - asset.set_external_force_and_torque(forces=torch.zeros(0, 3), torques=torch.zeros(0, 3)) - - .. note:: - This function does not apply the external wrench to the simulation. It only fills the buffers with - the desired values. To apply the external wrench, call the :meth:`write_data_to_sim` function - right before the simulation step. - - Args: - forces: External forces in bodies' local frame. Shape is (len(env_ids), len(body_ids), 3). - torques: External torques in bodies' local frame. Shape is (len(env_ids), len(body_ids), 3). - positions: Positions to apply external wrench. Shape is (len(env_ids), len(body_ids), 3). Defaults to None. - body_ids: Body indices to apply external wrench to. Defaults to None (all bodies). - env_ids: Environment indices to apply external wrench to. Defaults to None (all instances). - is_global: Whether to apply the external wrench in the global frame. Defaults to False. If set to False, - the external wrench is applied in the link frame of the articulations' bodies. - """ - logger.warning( - "The function 'set_external_force_and_torque' will be deprecated in a future release. Please" - " use 'permanent_wrench_composer.set_forces_and_torques' instead." - ) - if forces is None and torques is None: - logger.warning("No forces or torques provided. No permanent external wrench will be applied.") - - # resolve all indices - # -- env_ids - if env_ids is None: - env_ids = self._ALL_INDICES_WP - elif not isinstance(env_ids, torch.Tensor): - env_ids = wp.array(env_ids, dtype=wp.int32, device=self.device) - else: - env_ids = wp.from_torch(env_ids.to(torch.int32), dtype=wp.int32) - # -- body_ids - if body_ids is None: - body_ids = self._ALL_BODY_INDICES_WP - elif isinstance(body_ids, slice): - body_ids = wp.from_torch( - torch.arange(self.num_bodies, dtype=torch.int32, device=self.device)[body_ids], dtype=wp.int32 - ) - elif not isinstance(body_ids, torch.Tensor): - body_ids = wp.array(body_ids, dtype=wp.int32, device=self.device) - else: - body_ids = wp.from_torch(body_ids.to(torch.int32), dtype=wp.int32) - - # Write to wrench composer - self._permanent_wrench_composer.set_forces_and_torques( - forces=wp.from_torch(forces, dtype=wp.vec3f) if forces is not None else None, - torques=wp.from_torch(torques, dtype=wp.vec3f) if torques is not None else None, - positions=wp.from_torch(positions, dtype=wp.vec3f) if positions is not None else None, - body_ids=body_ids, - env_ids=env_ids, - is_global=is_global, - ) - - def set_joint_position_target( - self, target: torch.Tensor, joint_ids: Sequence[int] | slice | None = None, env_ids: Sequence[int] | None = None - ): - """Set joint position targets into internal buffers. - - This function does not apply the joint targets to the simulation. It only fills the buffers with - the desired values. To apply the joint targets, call the :meth:`write_data_to_sim` function. - - Args: - target: Joint position targets. Shape is (len(env_ids), len(joint_ids)). - joint_ids: The joint indices to set the targets for. Defaults to None (all joints). - env_ids: The environment indices to set the targets for. Defaults to None (all environments). - """ - # resolve indices - if env_ids is None: - env_ids = slice(None) - if joint_ids is None: - joint_ids = slice(None) - # broadcast env_ids if needed to allow double indexing - if env_ids != slice(None) and joint_ids != slice(None): - env_ids = env_ids[:, None] - # set targets - self._data.joint_pos_target[env_ids, joint_ids] = target - - def set_joint_velocity_target( - self, target: torch.Tensor, joint_ids: Sequence[int] | slice | None = None, env_ids: Sequence[int] | None = None - ): - """Set joint velocity targets into internal buffers. - - This function does not apply the joint targets to the simulation. It only fills the buffers with - the desired values. To apply the joint targets, call the :meth:`write_data_to_sim` function. - - Args: - target: Joint velocity targets. Shape is (len(env_ids), len(joint_ids)). - joint_ids: The joint indices to set the targets for. Defaults to None (all joints). - env_ids: The environment indices to set the targets for. Defaults to None (all environments). - """ - # resolve indices - if env_ids is None: - env_ids = slice(None) - if joint_ids is None: - joint_ids = slice(None) - # broadcast env_ids if needed to allow double indexing - if env_ids != slice(None) and joint_ids != slice(None): - env_ids = env_ids[:, None] - # set targets - self._data.joint_vel_target[env_ids, joint_ids] = target - - def set_joint_effort_target( - self, target: torch.Tensor, joint_ids: Sequence[int] | slice | None = None, env_ids: Sequence[int] | None = None - ): - """Set joint efforts into internal buffers. - - This function does not apply the joint targets to the simulation. It only fills the buffers with - the desired values. To apply the joint targets, call the :meth:`write_data_to_sim` function. - - Args: - target: Joint effort targets. Shape is (len(env_ids), len(joint_ids)). - joint_ids: The joint indices to set the targets for. Defaults to None (all joints). - env_ids: The environment indices to set the targets for. Defaults to None (all environments). - """ - # resolve indices - if env_ids is None: - env_ids = slice(None) - if joint_ids is None: - joint_ids = slice(None) - # broadcast env_ids if needed to allow double indexing - if env_ids != slice(None) and joint_ids != slice(None): - env_ids = env_ids[:, None] - # set targets - self._data.joint_effort_target[env_ids, joint_ids] = target - - """ - Operations - Tendons. - """ - - def set_fixed_tendon_stiffness( - self, - stiffness: torch.Tensor, - fixed_tendon_ids: Sequence[int] | slice | None = None, - env_ids: Sequence[int] | None = None, - ): - """Set fixed tendon stiffness into internal buffers. - - This function does not apply the tendon stiffness to the simulation. It only fills the buffers with - the desired values. To apply the tendon stiffness, call the - :meth:`write_fixed_tendon_properties_to_sim` method. - - Args: - stiffness: Fixed tendon stiffness. Shape is (len(env_ids), len(fixed_tendon_ids)). - fixed_tendon_ids: The tendon indices to set the stiffness for. Defaults to None (all fixed tendons). - env_ids: The environment indices to set the stiffness for. Defaults to None (all environments). - """ - # resolve indices - if env_ids is None: - env_ids = slice(None) - if fixed_tendon_ids is None: - fixed_tendon_ids = slice(None) - if env_ids != slice(None) and fixed_tendon_ids != slice(None): - env_ids = env_ids[:, None] - # set stiffness - self._data.fixed_tendon_stiffness[env_ids, fixed_tendon_ids] = stiffness - - def set_fixed_tendon_damping( - self, - damping: torch.Tensor, - fixed_tendon_ids: Sequence[int] | slice | None = None, - env_ids: Sequence[int] | None = None, - ): - """Set fixed tendon damping into internal buffers. - - This function does not apply the tendon damping to the simulation. It only fills the buffers with - the desired values. To apply the tendon damping, call the :meth:`write_fixed_tendon_properties_to_sim` function. - - Args: - damping: Fixed tendon damping. Shape is (len(env_ids), len(fixed_tendon_ids)). - fixed_tendon_ids: The tendon indices to set the damping for. Defaults to None (all fixed tendons). - env_ids: The environment indices to set the damping for. Defaults to None (all environments). - """ - # resolve indices - if env_ids is None: - env_ids = slice(None) - if fixed_tendon_ids is None: - fixed_tendon_ids = slice(None) - if env_ids != slice(None) and fixed_tendon_ids != slice(None): - env_ids = env_ids[:, None] - # set damping - self._data.fixed_tendon_damping[env_ids, fixed_tendon_ids] = damping - - def set_fixed_tendon_limit_stiffness( - self, - limit_stiffness: torch.Tensor, - fixed_tendon_ids: Sequence[int] | slice | None = None, - env_ids: Sequence[int] | None = None, - ): - """Set fixed tendon limit stiffness efforts into internal buffers. - - This function does not apply the tendon limit stiffness to the simulation. It only fills the buffers with - the desired values. To apply the tendon limit stiffness, call the - :meth:`write_fixed_tendon_properties_to_sim` method. - - Args: - limit_stiffness: Fixed tendon limit stiffness. Shape is (len(env_ids), len(fixed_tendon_ids)). - fixed_tendon_ids: The tendon indices to set the limit stiffness for. Defaults to None (all fixed tendons). - env_ids: The environment indices to set the limit stiffness for. Defaults to None (all environments). - """ - # resolve indices - if env_ids is None: - env_ids = slice(None) - if fixed_tendon_ids is None: - fixed_tendon_ids = slice(None) - if env_ids != slice(None) and fixed_tendon_ids != slice(None): - env_ids = env_ids[:, None] - # set limit_stiffness - self._data.fixed_tendon_limit_stiffness[env_ids, fixed_tendon_ids] = limit_stiffness - - def set_fixed_tendon_position_limit( - self, - limit: torch.Tensor, - fixed_tendon_ids: Sequence[int] | slice | None = None, - env_ids: Sequence[int] | None = None, - ): - """Set fixed tendon limit efforts into internal buffers. - - This function does not apply the tendon limit to the simulation. It only fills the buffers with - the desired values. To apply the tendon limit, call the :meth:`write_fixed_tendon_properties_to_sim` function. - - Args: - limit: Fixed tendon limit. Shape is (len(env_ids), len(fixed_tendon_ids)). - fixed_tendon_ids: The tendon indices to set the limit for. Defaults to None (all fixed tendons). - env_ids: The environment indices to set the limit for. Defaults to None (all environments). - """ - # resolve indices - if env_ids is None: - env_ids = slice(None) - if fixed_tendon_ids is None: - fixed_tendon_ids = slice(None) - if env_ids != slice(None) and fixed_tendon_ids != slice(None): - env_ids = env_ids[:, None] - # set limit - self._data.fixed_tendon_pos_limits[env_ids, fixed_tendon_ids] = limit - - def set_fixed_tendon_rest_length( - self, - rest_length: torch.Tensor, - fixed_tendon_ids: Sequence[int] | slice | None = None, - env_ids: Sequence[int] | None = None, - ): - """Set fixed tendon rest length efforts into internal buffers. - - This function does not apply the tendon rest length to the simulation. It only fills the buffers with - the desired values. To apply the tendon rest length, call the - :meth:`write_fixed_tendon_properties_to_sim` method. - - Args: - rest_length: Fixed tendon rest length. Shape is (len(env_ids), len(fixed_tendon_ids)). - fixed_tendon_ids: The tendon indices to set the rest length for. Defaults to None (all fixed tendons). - env_ids: The environment indices to set the rest length for. Defaults to None (all environments). - """ - # resolve indices - if env_ids is None: - env_ids = slice(None) - if fixed_tendon_ids is None: - fixed_tendon_ids = slice(None) - if env_ids != slice(None) and fixed_tendon_ids != slice(None): - env_ids = env_ids[:, None] - # set rest_length - self._data.fixed_tendon_rest_length[env_ids, fixed_tendon_ids] = rest_length - - def set_fixed_tendon_offset( - self, - offset: torch.Tensor, - fixed_tendon_ids: Sequence[int] | slice | None = None, - env_ids: Sequence[int] | None = None, - ): - """Set fixed tendon offset efforts into internal buffers. - - This function does not apply the tendon offset to the simulation. It only fills the buffers with - the desired values. To apply the tendon offset, call the :meth:`write_fixed_tendon_properties_to_sim` function. - - Args: - offset: Fixed tendon offset. Shape is (len(env_ids), len(fixed_tendon_ids)). - fixed_tendon_ids: The tendon indices to set the offset for. Defaults to None (all fixed tendons). - env_ids: The environment indices to set the offset for. Defaults to None (all environments). - """ - # resolve indices - if env_ids is None: - env_ids = slice(None) - if fixed_tendon_ids is None: - fixed_tendon_ids = slice(None) - if env_ids != slice(None) and fixed_tendon_ids != slice(None): - env_ids = env_ids[:, None] - # set offset - self._data.fixed_tendon_offset[env_ids, fixed_tendon_ids] = offset - - def write_fixed_tendon_properties_to_sim( - self, - fixed_tendon_ids: Sequence[int] | slice | None = None, - env_ids: Sequence[int] | None = None, - ): - """Write fixed tendon properties into the simulation. - - Args: - fixed_tendon_ids: The fixed tendon indices to set the limits for. Defaults to None (all fixed tendons). - env_ids: The environment indices to set the limits for. Defaults to None (all environments). - """ - # resolve indices - physx_env_ids = env_ids - if env_ids is None: - physx_env_ids = self._ALL_INDICES - if fixed_tendon_ids is None: - fixed_tendon_ids = slice(None) - - # set into simulation - self.root_physx_view.set_fixed_tendon_properties( - self._data.fixed_tendon_stiffness, - self._data.fixed_tendon_damping, - self._data.fixed_tendon_limit_stiffness, - self._data.fixed_tendon_pos_limits, - self._data.fixed_tendon_rest_length, - self._data.fixed_tendon_offset, - indices=physx_env_ids, - ) - - def set_spatial_tendon_stiffness( - self, - stiffness: torch.Tensor, - spatial_tendon_ids: Sequence[int] | slice | None = None, - env_ids: Sequence[int] | None = None, - ): - """Set spatial tendon stiffness into internal buffers. - - This function does not apply the tendon stiffness to the simulation. It only fills the buffers with - the desired values. To apply the tendon stiffness, call the - :meth:`write_spatial_tendon_properties_to_sim` method. - - Args: - stiffness: Spatial tendon stiffness. Shape is (len(env_ids), len(spatial_tendon_ids)). - spatial_tendon_ids: The tendon indices to set the stiffness for. Defaults to None (all spatial tendons). - env_ids: The environment indices to set the stiffness for. Defaults to None (all environments). - """ - if get_isaac_sim_version().major < 5: - logger.warning( - "Spatial tendons are not supported in Isaac Sim < 5.0. Please update to Isaac Sim 5.0 or later." - ) - return - # resolve indices - if env_ids is None: - env_ids = slice(None) - if spatial_tendon_ids is None: - spatial_tendon_ids = slice(None) - if env_ids != slice(None) and spatial_tendon_ids != slice(None): - env_ids = env_ids[:, None] - # set stiffness - self._data.spatial_tendon_stiffness[env_ids, spatial_tendon_ids] = stiffness - - def set_spatial_tendon_damping( - self, - damping: torch.Tensor, - spatial_tendon_ids: Sequence[int] | slice | None = None, - env_ids: Sequence[int] | None = None, - ): - """Set spatial tendon damping into internal buffers. - - This function does not apply the tendon damping to the simulation. It only fills the buffers with - the desired values. To apply the tendon damping, call the - :meth:`write_spatial_tendon_properties_to_sim` method. - - Args: - damping: Spatial tendon damping. Shape is (len(env_ids), len(spatial_tendon_ids)). - spatial_tendon_ids: The tendon indices to set the damping for. Defaults to None, - which means all spatial tendons. - env_ids: The environment indices to set the damping for. Defaults to None, which means all environments. - """ - if get_isaac_sim_version().major < 5: - logger.warning( - "Spatial tendons are not supported in Isaac Sim < 5.0. Please update to Isaac Sim 5.0 or later." - ) - return - # resolve indices - if env_ids is None: - env_ids = slice(None) - if spatial_tendon_ids is None: - spatial_tendon_ids = slice(None) - if env_ids != slice(None) and spatial_tendon_ids != slice(None): - env_ids = env_ids[:, None] - # set damping - self._data.spatial_tendon_damping[env_ids, spatial_tendon_ids] = damping - - def set_spatial_tendon_limit_stiffness( - self, - limit_stiffness: torch.Tensor, - spatial_tendon_ids: Sequence[int] | slice | None = None, - env_ids: Sequence[int] | None = None, - ): - """Set spatial tendon limit stiffness into internal buffers. - - This function does not apply the tendon limit stiffness to the simulation. It only fills the buffers with - the desired values. To apply the tendon limit stiffness, call the - :meth:`write_spatial_tendon_properties_to_sim` method. - - Args: - limit_stiffness: Spatial tendon limit stiffness. Shape is (len(env_ids), len(spatial_tendon_ids)). - spatial_tendon_ids: The tendon indices to set the limit stiffness for. Defaults to None, - which means all spatial tendons. - env_ids: The environment indices to set the limit stiffness for. Defaults to None (all environments). - """ - if get_isaac_sim_version().major < 5: - logger.warning( - "Spatial tendons are not supported in Isaac Sim < 5.0. Please update to Isaac Sim 5.0 or later." - ) - return - # resolve indices - if env_ids is None: - env_ids = slice(None) - if spatial_tendon_ids is None: - spatial_tendon_ids = slice(None) - if env_ids != slice(None) and spatial_tendon_ids != slice(None): - env_ids = env_ids[:, None] - # set limit stiffness - self._data.spatial_tendon_limit_stiffness[env_ids, spatial_tendon_ids] = limit_stiffness - - def set_spatial_tendon_offset( - self, - offset: torch.Tensor, - spatial_tendon_ids: Sequence[int] | slice | None = None, - env_ids: Sequence[int] | None = None, - ): - """Set spatial tendon offset efforts into internal buffers. - - This function does not apply the tendon offset to the simulation. It only fills the buffers with - the desired values. To apply the tendon offset, call the - :meth:`write_spatial_tendon_properties_to_sim` method. - - Args: - offset: Spatial tendon offset. Shape is (len(env_ids), len(spatial_tendon_ids)). - spatial_tendon_ids: The tendon indices to set the offset for. Defaults to None (all spatial tendons). - env_ids: The environment indices to set the offset for. Defaults to None (all environments). - """ - if get_isaac_sim_version().major < 5: - logger.warning( - "Spatial tendons are not supported in Isaac Sim < 5.0. Please update to Isaac Sim 5.0 or later." - ) - return - # resolve indices - if env_ids is None: - env_ids = slice(None) - if spatial_tendon_ids is None: - spatial_tendon_ids = slice(None) - if env_ids != slice(None) and spatial_tendon_ids != slice(None): - env_ids = env_ids[:, None] - # set offset - self._data.spatial_tendon_offset[env_ids, spatial_tendon_ids] = offset - - def write_spatial_tendon_properties_to_sim( - self, - spatial_tendon_ids: Sequence[int] | slice | None = None, - env_ids: Sequence[int] | None = None, - ): - """Write spatial tendon properties into the simulation. - - Args: - spatial_tendon_ids: The spatial tendon indices to set the properties for. Defaults to None, - which means all spatial tendons. - env_ids: The environment indices to set the properties for. Defaults to None, - which means all environments. - """ - # resolve indices - physx_env_ids = env_ids - if env_ids is None: - physx_env_ids = self._ALL_INDICES - if spatial_tendon_ids is None: - spatial_tendon_ids = slice(None) - - # set into simulation - self.root_physx_view.set_spatial_tendon_properties( - self._data.spatial_tendon_stiffness, - self._data.spatial_tendon_damping, - self._data.spatial_tendon_limit_stiffness, - self._data.spatial_tendon_offset, - indices=physx_env_ids, - ) - - """ - Internal helper. - """ - - def _initialize_impl(self): - # obtain global simulation view - self._physics_sim_view = SimulationManager.get_physics_sim_view() - - if self.cfg.articulation_root_prim_path is not None: - # The articulation root prim path is specified explicitly, so we can just use this. - root_prim_path_expr = self.cfg.prim_path + self.cfg.articulation_root_prim_path - else: - # No articulation root prim path was specified, so we need to search - # for it. We search for this in the first environment and then - # create a regex that matches all environments. - first_env_matching_prim = sim_utils.find_first_matching_prim(self.cfg.prim_path) - if first_env_matching_prim is None: - raise RuntimeError(f"Failed to find prim for expression: '{self.cfg.prim_path}'.") - first_env_matching_prim_path = first_env_matching_prim.GetPath().pathString - - # Find all articulation root prims in the first environment. - first_env_root_prims = sim_utils.get_all_matching_child_prims( - first_env_matching_prim_path, - predicate=lambda prim: prim.HasAPI(UsdPhysics.ArticulationRootAPI), - traverse_instance_prims=False, - ) - if len(first_env_root_prims) == 0: - raise RuntimeError( - f"Failed to find an articulation when resolving '{first_env_matching_prim_path}'." - " Please ensure that the prim has 'USD ArticulationRootAPI' applied." - ) - if len(first_env_root_prims) > 1: - raise RuntimeError( - f"Failed to find a single articulation when resolving '{first_env_matching_prim_path}'." - f" Found multiple '{first_env_root_prims}' under '{first_env_matching_prim_path}'." - " Please ensure that there is only one articulation in the prim path tree." - ) - - # Now we convert the found articulation root from the first - # environment back into a regex that matches all environments. - first_env_root_prim_path = first_env_root_prims[0].GetPath().pathString - root_prim_path_relative_to_prim_path = first_env_root_prim_path[len(first_env_matching_prim_path) :] - root_prim_path_expr = self.cfg.prim_path + root_prim_path_relative_to_prim_path - - # -- articulation - self._root_physx_view = self._physics_sim_view.create_articulation_view(root_prim_path_expr.replace(".*", "*")) - - # check if the articulation was created - if self._root_physx_view._backend is None: - raise RuntimeError(f"Failed to create articulation at: {root_prim_path_expr}. Please check PhysX logs.") - - if get_isaac_sim_version().major < 5: - logger.warning( - "Spatial tendons are not supported in Isaac Sim < 5.0: patching spatial-tendon getter" - " and setter to use dummy value" - ) - self._root_physx_view.max_spatial_tendons = 0 - self._root_physx_view.get_spatial_tendon_stiffnesses = lambda: torch.empty(0, device=self.device) - self._root_physx_view.get_spatial_tendon_dampings = lambda: torch.empty(0, device=self.device) - self._root_physx_view.get_spatial_tendon_limit_stiffnesses = lambda: torch.empty(0, device=self.device) - self._root_physx_view.get_spatial_tendon_offsets = lambda: torch.empty(0, device=self.device) - self._root_physx_view.set_spatial_tendon_properties = lambda *args, **kwargs: logger.warning( - "Spatial tendons are not supported in Isaac Sim < 5.0: Calling" - " set_spatial_tendon_properties has no effect" - ) - - # log information about the articulation - logger.info(f"Articulation initialized at: {self.cfg.prim_path} with root '{root_prim_path_expr}'.") - logger.info(f"Is fixed root: {self.is_fixed_base}") - logger.info(f"Number of bodies: {self.num_bodies}") - logger.info(f"Body names: {self.body_names}") - logger.info(f"Number of joints: {self.num_joints}") - logger.info(f"Joint names: {self.joint_names}") - logger.info(f"Number of fixed tendons: {self.num_fixed_tendons}") - - # container for data access - self._data = ArticulationData(self.root_physx_view, self.device) - - # create buffers - self._create_buffers() - # process configuration - self._process_cfg() - self._process_actuators_cfg() - self._process_tendons() - # validate configuration - self._validate_cfg() - # update the robot data - self.update(0.0) - # log joint information - self._log_articulation_info() - - def _create_buffers(self): - # constants - self._ALL_INDICES = torch.arange(self.num_instances, dtype=torch.long, device=self.device) - self._ALL_BODY_INDICES = torch.arange(self.num_bodies, dtype=torch.long, device=self.device) - self._ALL_INDICES_WP = wp.from_torch(self._ALL_INDICES.to(torch.int32), dtype=wp.int32) - self._ALL_BODY_INDICES_WP = wp.from_torch(self._ALL_BODY_INDICES.to(torch.int32), dtype=wp.int32) - - # external wrench composer - self._instantaneous_wrench_composer = WrenchComposer(self) - self._permanent_wrench_composer = WrenchComposer(self) - - # asset named data - self._data.joint_names = self.joint_names - self._data.body_names = self.body_names - # tendon names are set in _process_tendons function - - # -- joint properties - self._data.default_joint_pos_limits = self.root_physx_view.get_dof_limits().to(self.device).clone() - self._data.default_joint_stiffness = self.root_physx_view.get_dof_stiffnesses().to(self.device).clone() - self._data.default_joint_damping = self.root_physx_view.get_dof_dampings().to(self.device).clone() - self._data.default_joint_armature = self.root_physx_view.get_dof_armatures().to(self.device).clone() - if get_isaac_sim_version().major < 5: - self._data.default_joint_friction_coeff = ( - self.root_physx_view.get_dof_friction_coefficients().to(self.device).clone() - ) - self._data.default_joint_dynamic_friction_coeff = torch.zeros_like(self._data.default_joint_friction_coeff) - self._data.default_joint_viscous_friction_coeff = torch.zeros_like(self._data.default_joint_friction_coeff) - else: - friction_props = self.root_physx_view.get_dof_friction_properties() - self._data.default_joint_friction_coeff = friction_props[:, :, 0].to(self.device).clone() - self._data.default_joint_dynamic_friction_coeff = friction_props[:, :, 1].to(self.device).clone() - self._data.default_joint_viscous_friction_coeff = friction_props[:, :, 2].to(self.device).clone() - - self._data.joint_pos_limits = self._data.default_joint_pos_limits.clone() - self._data.joint_vel_limits = self.root_physx_view.get_dof_max_velocities().to(self.device).clone() - self._data.joint_effort_limits = self.root_physx_view.get_dof_max_forces().to(self.device).clone() - self._data.joint_stiffness = self._data.default_joint_stiffness.clone() - self._data.joint_damping = self._data.default_joint_damping.clone() - self._data.joint_armature = self._data.default_joint_armature.clone() - self._data.joint_friction_coeff = self._data.default_joint_friction_coeff.clone() - self._data.joint_dynamic_friction_coeff = self._data.default_joint_dynamic_friction_coeff.clone() - self._data.joint_viscous_friction_coeff = self._data.default_joint_viscous_friction_coeff.clone() - - # -- body properties - self._data.default_mass = self.root_physx_view.get_masses().clone() - self._data.default_inertia = self.root_physx_view.get_inertias().clone() - - # -- joint commands (sent to the actuator from the user) - self._data.joint_pos_target = torch.zeros(self.num_instances, self.num_joints, device=self.device) - self._data.joint_vel_target = torch.zeros_like(self._data.joint_pos_target) - self._data.joint_effort_target = torch.zeros_like(self._data.joint_pos_target) - # -- joint commands (sent to the simulation after actuator processing) - self._joint_pos_target_sim = torch.zeros_like(self._data.joint_pos_target) - self._joint_vel_target_sim = torch.zeros_like(self._data.joint_pos_target) - self._joint_effort_target_sim = torch.zeros_like(self._data.joint_pos_target) - - # -- computed joint efforts from the actuator models - self._data.computed_torque = torch.zeros_like(self._data.joint_pos_target) - self._data.applied_torque = torch.zeros_like(self._data.joint_pos_target) - - # -- other data that are filled based on explicit actuator models - self._data.soft_joint_vel_limits = torch.zeros(self.num_instances, self.num_joints, device=self.device) - self._data.gear_ratio = torch.ones(self.num_instances, self.num_joints, device=self.device) - - # soft joint position limits (recommended not to be too close to limits). - joint_pos_mean = (self._data.joint_pos_limits[..., 0] + self._data.joint_pos_limits[..., 1]) / 2 - joint_pos_range = self._data.joint_pos_limits[..., 1] - self._data.joint_pos_limits[..., 0] - soft_limit_factor = self.cfg.soft_joint_pos_limit_factor - # add to data - self._data.soft_joint_pos_limits = torch.zeros(self.num_instances, self.num_joints, 2, device=self.device) - self._data.soft_joint_pos_limits[..., 0] = joint_pos_mean - 0.5 * joint_pos_range * soft_limit_factor - self._data.soft_joint_pos_limits[..., 1] = joint_pos_mean + 0.5 * joint_pos_range * soft_limit_factor - - def _process_cfg(self): - """Post processing of configuration parameters.""" - # default state - # -- root state - # note: we cast to tuple to avoid torch/numpy type mismatch. - default_root_state = ( - tuple(self.cfg.init_state.pos) - + tuple(self.cfg.init_state.rot) - + tuple(self.cfg.init_state.lin_vel) - + tuple(self.cfg.init_state.ang_vel) - ) - default_root_state = torch.tensor(default_root_state, dtype=torch.float, device=self.device) - self._data.default_root_state = default_root_state.repeat(self.num_instances, 1) - - # -- joint state - self._data.default_joint_pos = torch.zeros(self.num_instances, self.num_joints, device=self.device) - self._data.default_joint_vel = torch.zeros_like(self._data.default_joint_pos) - # joint pos - indices_list, _, values_list = string_utils.resolve_matching_names_values( - self.cfg.init_state.joint_pos, self.joint_names - ) - self._data.default_joint_pos[:, indices_list] = torch.tensor(values_list, device=self.device) - # joint vel - indices_list, _, values_list = string_utils.resolve_matching_names_values( - self.cfg.init_state.joint_vel, self.joint_names - ) - self._data.default_joint_vel[:, indices_list] = torch.tensor(values_list, device=self.device) - - """ - Internal simulation callbacks. - """ - - def _invalidate_initialize_callback(self, event): - """Invalidates the scene elements.""" - # call parent - super()._invalidate_initialize_callback(event) - self._root_physx_view = None - - """ - Internal helpers -- Actuators. - """ - - def _process_actuators_cfg(self): - """Process and apply articulation joint properties.""" - # create actuators - self.actuators = dict() - # flag for implicit actuators - # if this is false, we by-pass certain checks when doing actuator-related operations - self._has_implicit_actuators = False - - # iterate over all actuator configurations - for actuator_name, actuator_cfg in self.cfg.actuators.items(): - # type annotation for type checkers - actuator_cfg: ActuatorBaseCfg - # create actuator group - joint_ids, joint_names = self.find_joints(actuator_cfg.joint_names_expr) - # check if any joints are found - if len(joint_names) == 0: - raise ValueError( - f"No joints found for actuator group: {actuator_name} with joint name expression:" - f" {actuator_cfg.joint_names_expr}." - ) - # resolve joint indices - # we pass a slice if all joints are selected to avoid indexing overhead - if len(joint_names) == self.num_joints: - joint_ids = slice(None) - else: - joint_ids = torch.tensor(joint_ids, device=self.device) - # create actuator collection - # note: for efficiency avoid indexing when over all indices - actuator: ActuatorBase = actuator_cfg.class_type( - cfg=actuator_cfg, - joint_names=joint_names, - joint_ids=joint_ids, - num_envs=self.num_instances, - device=self.device, - stiffness=self._data.default_joint_stiffness[:, joint_ids], - damping=self._data.default_joint_damping[:, joint_ids], - armature=self._data.default_joint_armature[:, joint_ids], - friction=self._data.default_joint_friction_coeff[:, joint_ids], - dynamic_friction=self._data.default_joint_dynamic_friction_coeff[:, joint_ids], - viscous_friction=self._data.default_joint_viscous_friction_coeff[:, joint_ids], - effort_limit=self._data.joint_effort_limits[:, joint_ids].clone(), - velocity_limit=self._data.joint_vel_limits[:, joint_ids], - ) - # log information on actuator groups - model_type = "implicit" if actuator.is_implicit_model else "explicit" - logger.info( - f"Actuator collection: {actuator_name} with model '{actuator_cfg.class_type.__name__}'" - f" (type: {model_type}) and joint names: {joint_names} [{joint_ids}]." - ) - # store actuator group - self.actuators[actuator_name] = actuator - # set the passed gains and limits into the simulation - if isinstance(actuator, ImplicitActuator): - self._has_implicit_actuators = True - # the gains and limits are set into the simulation since actuator model is implicit - self.write_joint_stiffness_to_sim(actuator.stiffness, joint_ids=actuator.joint_indices) - self.write_joint_damping_to_sim(actuator.damping, joint_ids=actuator.joint_indices) - else: - # the gains and limits are processed by the actuator model - # we set gains to zero, and torque limit to a high value in simulation to avoid any interference - self.write_joint_stiffness_to_sim(0.0, joint_ids=actuator.joint_indices) - self.write_joint_damping_to_sim(0.0, joint_ids=actuator.joint_indices) - - # Set common properties into the simulation - self.write_joint_effort_limit_to_sim(actuator.effort_limit_sim, joint_ids=actuator.joint_indices) - self.write_joint_velocity_limit_to_sim(actuator.velocity_limit_sim, joint_ids=actuator.joint_indices) - self.write_joint_armature_to_sim(actuator.armature, joint_ids=actuator.joint_indices) - self.write_joint_friction_coefficient_to_sim(actuator.friction, joint_ids=actuator.joint_indices) - if get_isaac_sim_version().major >= 5: - self.write_joint_dynamic_friction_coefficient_to_sim( - actuator.dynamic_friction, joint_ids=actuator.joint_indices - ) - self.write_joint_viscous_friction_coefficient_to_sim( - actuator.viscous_friction, joint_ids=actuator.joint_indices - ) - - # Store the configured values from the actuator model - # note: this is the value configured in the actuator model (for implicit and explicit actuators) - self._data.default_joint_stiffness[:, actuator.joint_indices] = actuator.stiffness - self._data.default_joint_damping[:, actuator.joint_indices] = actuator.damping - self._data.default_joint_armature[:, actuator.joint_indices] = actuator.armature - self._data.default_joint_friction_coeff[:, actuator.joint_indices] = actuator.friction - if get_isaac_sim_version().major >= 5: - self._data.default_joint_dynamic_friction_coeff[:, actuator.joint_indices] = actuator.dynamic_friction - self._data.default_joint_viscous_friction_coeff[:, actuator.joint_indices] = actuator.viscous_friction - - # perform some sanity checks to ensure actuators are prepared correctly - total_act_joints = sum(actuator.num_joints for actuator in self.actuators.values()) - if total_act_joints != (self.num_joints - self.num_fixed_tendons): - logger.warning( - "Not all actuators are configured! Total number of actuated joints not equal to number of" - f" joints available: {total_act_joints} != {self.num_joints - self.num_fixed_tendons}." - ) - - if self.cfg.actuator_value_resolution_debug_print: - t = PrettyTable(["Group", "Property", "Name", "ID", "USD Value", "ActutatorCfg Value", "Applied"]) - for actuator_group, actuator in self.actuators.items(): - group_count = 0 - for property, resolution_details in actuator.joint_property_resolution_table.items(): - for prop_idx, resolution_detail in enumerate(resolution_details): - actuator_group_str = actuator_group if group_count == 0 else "" - property_str = property if prop_idx == 0 else "" - fmt = [f"{v:.2e}" if isinstance(v, float) else str(v) for v in resolution_detail] - t.add_row([actuator_group_str, property_str, *fmt]) - group_count += 1 - logger.warning(f"\nActuatorCfg-USD Value Discrepancy Resolution (matching values are skipped): \n{t}") - - def _process_tendons(self): - """Process fixed and spatial tendons.""" - # create a list to store the fixed tendon names - self._fixed_tendon_names = list() - self._spatial_tendon_names = list() - # parse fixed tendons properties if they exist - if self.num_fixed_tendons > 0 or self.num_spatial_tendons > 0: - joint_paths = self.root_physx_view.dof_paths[0] - - # iterate over all joints to find tendons attached to them - for j in range(self.num_joints): - usd_joint_path = joint_paths[j] - # check whether joint has tendons - tendon name follows the joint name it is attached to - joint = UsdPhysics.Joint.Get(self.stage, usd_joint_path) - if joint.GetPrim().HasAPI(PhysxSchema.PhysxTendonAxisRootAPI): - joint_name = usd_joint_path.split("/")[-1] - self._fixed_tendon_names.append(joint_name) - elif joint.GetPrim().HasAPI(PhysxSchema.PhysxTendonAttachmentRootAPI) or joint.GetPrim().HasAPI( - PhysxSchema.PhysxTendonAttachmentLeafAPI - ): - joint_name = usd_joint_path.split("/")[-1] - self._spatial_tendon_names.append(joint_name) - - # store the fixed tendon names - self.data.fixed_tendon_names = self._fixed_tendon_names - self.data.spatial_tendon_names = self._spatial_tendon_names - - def _apply_actuator_model(self): - """Processes joint commands for the articulation by forwarding them to the actuators. - - The actions are first processed using actuator models. Depending on the robot configuration, - the actuator models compute the joint level simulation commands and sets them into the PhysX buffers. - """ - # process actions per group - for actuator in self.actuators.values(): - # prepare input for actuator model based on cached data - # TODO : A tensor dict would be nice to do the indexing of all tensors together - control_action = ArticulationActions( - joint_positions=self._data.joint_pos_target[:, actuator.joint_indices], - joint_velocities=self._data.joint_vel_target[:, actuator.joint_indices], - joint_efforts=self._data.joint_effort_target[:, actuator.joint_indices], - joint_indices=actuator.joint_indices, - ) - # compute joint command from the actuator model - control_action = actuator.compute( - control_action, - joint_pos=self._data.joint_pos[:, actuator.joint_indices], - joint_vel=self._data.joint_vel[:, actuator.joint_indices], - ) - # update targets (these are set into the simulation) - if control_action.joint_positions is not None: - self._joint_pos_target_sim[:, actuator.joint_indices] = control_action.joint_positions - if control_action.joint_velocities is not None: - self._joint_vel_target_sim[:, actuator.joint_indices] = control_action.joint_velocities - if control_action.joint_efforts is not None: - self._joint_effort_target_sim[:, actuator.joint_indices] = control_action.joint_efforts - # update state of the actuator model - # -- torques - self._data.computed_torque[:, actuator.joint_indices] = actuator.computed_effort - self._data.applied_torque[:, actuator.joint_indices] = actuator.applied_effort - # -- actuator data - self._data.soft_joint_vel_limits[:, actuator.joint_indices] = actuator.velocity_limit - # TODO: find a cleaner way to handle gear ratio. Only needed for variable gear ratio actuators. - if hasattr(actuator, "gear_ratio"): - self._data.gear_ratio[:, actuator.joint_indices] = actuator.gear_ratio - - """ - Internal helpers -- Debugging. - """ - - def _validate_cfg(self): - """Validate the configuration after processing. - - Note: - This function should be called only after the configuration has been processed and the buffers have been - created. Otherwise, some settings that are altered during processing may not be validated. - For instance, the actuator models may change the joint max velocity limits. - """ - # check that the default values are within the limits - joint_pos_limits = self.root_physx_view.get_dof_limits()[0].to(self.device) - out_of_range = self._data.default_joint_pos[0] < joint_pos_limits[:, 0] - out_of_range |= self._data.default_joint_pos[0] > joint_pos_limits[:, 1] - violated_indices = torch.nonzero(out_of_range, as_tuple=False).squeeze(-1) - # throw error if any of the default joint positions are out of the limits - if len(violated_indices) > 0: - # prepare message for violated joints - msg = "The following joints have default positions out of the limits: \n" - for idx in violated_indices: - joint_name = self.data.joint_names[idx] - joint_limit = joint_pos_limits[idx] - joint_pos = self.data.default_joint_pos[0, idx] - # add to message - msg += f"\t- '{joint_name}': {joint_pos:.3f} not in [{joint_limit[0]:.3f}, {joint_limit[1]:.3f}]\n" - raise ValueError(msg) - - # check that the default joint velocities are within the limits - joint_max_vel = self.root_physx_view.get_dof_max_velocities()[0].to(self.device) - out_of_range = torch.abs(self._data.default_joint_vel[0]) > joint_max_vel - violated_indices = torch.nonzero(out_of_range, as_tuple=False).squeeze(-1) - if len(violated_indices) > 0: - # prepare message for violated joints - msg = "The following joints have default velocities out of the limits: \n" - for idx in violated_indices: - joint_name = self.data.joint_names[idx] - joint_limit = [-joint_max_vel[idx], joint_max_vel[idx]] - joint_vel = self.data.default_joint_vel[0, idx] - # add to message - msg += f"\t- '{joint_name}': {joint_vel:.3f} not in [{joint_limit[0]:.3f}, {joint_limit[1]:.3f}]\n" - raise ValueError(msg) - - def _log_articulation_info(self): - """Log information about the articulation. - - Note: We purposefully read the values from the simulator to ensure that the values are configured as expected. - """ - - # define custom formatters for large numbers and limit ranges - def format_large_number(_, v: float) -> str: - """Format large numbers using scientific notation.""" - if abs(v) >= 1e3: - return f"{v:.1e}" - else: - return f"{v:.3f}" - - def format_limits(_, v: tuple[float, float]) -> str: - """Format limit ranges using scientific notation.""" - if abs(v[0]) >= 1e3 or abs(v[1]) >= 1e3: - return f"[{v[0]:.1e}, {v[1]:.1e}]" - else: - return f"[{v[0]:.3f}, {v[1]:.3f}]" - - # read out all joint parameters from simulation - # -- gains - stiffnesses = self.root_physx_view.get_dof_stiffnesses()[0].tolist() - dampings = self.root_physx_view.get_dof_dampings()[0].tolist() - # -- properties - armatures = self.root_physx_view.get_dof_armatures()[0].tolist() - if get_isaac_sim_version().major < 5: - static_frictions = self.root_physx_view.get_dof_friction_coefficients()[0].tolist() - else: - friction_props = self.root_physx_view.get_dof_friction_properties() - static_frictions = friction_props[:, :, 0][0].tolist() - dynamic_frictions = friction_props[:, :, 1][0].tolist() - viscous_frictions = friction_props[:, :, 2][0].tolist() - # -- limits - position_limits = self.root_physx_view.get_dof_limits()[0].tolist() - velocity_limits = self.root_physx_view.get_dof_max_velocities()[0].tolist() - effort_limits = self.root_physx_view.get_dof_max_forces()[0].tolist() - # create table for term information - joint_table = PrettyTable() - joint_table.title = f"Simulation Joint Information (Prim path: {self.cfg.prim_path})" - # build field names based on Isaac Sim version - field_names = ["Index", "Name", "Stiffness", "Damping", "Armature"] - if get_isaac_sim_version().major < 5: - field_names.append("Static Friction") - else: - field_names.extend(["Static Friction", "Dynamic Friction", "Viscous Friction"]) - field_names.extend(["Position Limits", "Velocity Limits", "Effort Limits"]) - joint_table.field_names = field_names - - # apply custom formatters to numeric columns - joint_table.custom_format["Stiffness"] = format_large_number - joint_table.custom_format["Damping"] = format_large_number - joint_table.custom_format["Armature"] = format_large_number - joint_table.custom_format["Static Friction"] = format_large_number - if get_isaac_sim_version().major >= 5: - joint_table.custom_format["Dynamic Friction"] = format_large_number - joint_table.custom_format["Viscous Friction"] = format_large_number - joint_table.custom_format["Position Limits"] = format_limits - joint_table.custom_format["Velocity Limits"] = format_large_number - joint_table.custom_format["Effort Limits"] = format_large_number - - # set alignment of table columns - joint_table.align["Name"] = "l" - # add info on each term - for index, name in enumerate(self.joint_names): - # build row data based on Isaac Sim version - row_data = [index, name, stiffnesses[index], dampings[index], armatures[index]] - if get_isaac_sim_version().major < 5: - row_data.append(static_frictions[index]) - else: - row_data.extend([static_frictions[index], dynamic_frictions[index], viscous_frictions[index]]) - row_data.extend([position_limits[index], velocity_limits[index], effort_limits[index]]) - # add row to table - joint_table.add_row(row_data) - # convert table to string - logger.info(f"Simulation parameters for joints in {self.cfg.prim_path}:\n" + joint_table.get_string()) - - # read out all fixed tendon parameters from simulation - if self.num_fixed_tendons > 0: - # -- gains - ft_stiffnesses = self.root_physx_view.get_fixed_tendon_stiffnesses()[0].tolist() - ft_dampings = self.root_physx_view.get_fixed_tendon_dampings()[0].tolist() - # -- limits - ft_limit_stiffnesses = self.root_physx_view.get_fixed_tendon_limit_stiffnesses()[0].tolist() - ft_limits = self.root_physx_view.get_fixed_tendon_limits()[0].tolist() - ft_rest_lengths = self.root_physx_view.get_fixed_tendon_rest_lengths()[0].tolist() - ft_offsets = self.root_physx_view.get_fixed_tendon_offsets()[0].tolist() - # create table for term information - tendon_table = PrettyTable() - tendon_table.title = f"Simulation Fixed Tendon Information (Prim path: {self.cfg.prim_path})" - tendon_table.field_names = [ - "Index", - "Stiffness", - "Damping", - "Limit Stiffness", - "Limits", - "Rest Length", - "Offset", - ] - tendon_table.float_format = ".3" - - # apply custom formatters to tendon table columns - tendon_table.custom_format["Stiffness"] = format_large_number - tendon_table.custom_format["Damping"] = format_large_number - tendon_table.custom_format["Limit Stiffness"] = format_large_number - tendon_table.custom_format["Limits"] = format_limits - tendon_table.custom_format["Rest Length"] = format_large_number - tendon_table.custom_format["Offset"] = format_large_number - - # add info on each term - for index in range(self.num_fixed_tendons): - tendon_table.add_row( - [ - index, - ft_stiffnesses[index], - ft_dampings[index], - ft_limit_stiffnesses[index], - ft_limits[index], - ft_rest_lengths[index], - ft_offsets[index], - ] - ) - # convert table to string - logger.info( - f"Simulation parameters for fixed tendons in {self.cfg.prim_path}:\n" + tendon_table.get_string() - ) - - if self.num_spatial_tendons > 0: - # -- gains - st_stiffnesses = self.root_physx_view.get_spatial_tendon_stiffnesses()[0].tolist() - st_dampings = self.root_physx_view.get_spatial_tendon_dampings()[0].tolist() - # -- limits - st_limit_stiffnesses = self.root_physx_view.get_spatial_tendon_limit_stiffnesses()[0].tolist() - st_offsets = self.root_physx_view.get_spatial_tendon_offsets()[0].tolist() - # create table for term information - tendon_table = PrettyTable() - tendon_table.title = f"Simulation Spatial Tendon Information (Prim path: {self.cfg.prim_path})" - tendon_table.field_names = [ - "Index", - "Stiffness", - "Damping", - "Limit Stiffness", - "Offset", - ] - tendon_table.float_format = ".3" - # add info on each term - for index in range(self.num_spatial_tendons): - tendon_table.add_row( - [ - index, - st_stiffnesses[index], - st_dampings[index], - st_limit_stiffnesses[index], - st_offsets[index], - ] - ) - # convert table to string - logger.info( - f"Simulation parameters for spatial tendons in {self.cfg.prim_path}:\n" + tendon_table.get_string() - ) - - """ - Deprecated methods. - """ - - def write_joint_friction_to_sim( - self, - joint_friction: torch.Tensor | float, - joint_ids: Sequence[int] | slice | None = None, - env_ids: Sequence[int] | None = None, - ): - """Write joint friction coefficients into the simulation. - - .. deprecated:: 2.1.0 - Please use :meth:`write_joint_friction_coefficient_to_sim` instead. - """ - logger.warning( - "The function 'write_joint_friction_to_sim' will be deprecated in a future release. Please" - " use 'write_joint_friction_coefficient_to_sim' instead." - ) - self.write_joint_friction_coefficient_to_sim(joint_friction, joint_ids=joint_ids, env_ids=env_ids) - - def write_joint_limits_to_sim( - self, - limits: torch.Tensor | float, - joint_ids: Sequence[int] | slice | None = None, - env_ids: Sequence[int] | None = None, - warn_limit_violation: bool = True, - ): - """Write joint limits into the simulation. - - .. deprecated:: 2.1.0 - Please use :meth:`write_joint_position_limit_to_sim` instead. - """ - logger.warning( - "The function 'write_joint_limits_to_sim' will be deprecated in a future release. Please" - " use 'write_joint_position_limit_to_sim' instead." - ) - self.write_joint_position_limit_to_sim( - limits, joint_ids=joint_ids, env_ids=env_ids, warn_limit_violation=warn_limit_violation - ) - - def set_fixed_tendon_limit( - self, - limit: torch.Tensor, - fixed_tendon_ids: Sequence[int] | slice | None = None, - env_ids: Sequence[int] | None = None, - ): - """Set fixed tendon position limits into internal buffers. - - .. deprecated:: 2.1.0 - Please use :meth:`set_fixed_tendon_position_limit` instead. - """ - logger.warning( - "The function 'set_fixed_tendon_limit' will be deprecated in a future release. Please" - " use 'set_fixed_tendon_position_limit' instead." - ) - self.set_fixed_tendon_position_limit(limit, fixed_tendon_ids=fixed_tendon_ids, env_ids=env_ids) diff --git a/source/isaaclab_physx/isaaclab_physx/assets/rigid_object/rigid_object_data_old.py b/source/isaaclab_physx/isaaclab_physx/assets/rigid_object/rigid_object_data_old.py deleted file mode 100644 index d1a94f1eac7..00000000000 --- a/source/isaaclab_physx/isaaclab_physx/assets/rigid_object/rigid_object_data_old.py +++ /dev/null @@ -1,657 +0,0 @@ -# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). -# All rights reserved. -# -# SPDX-License-Identifier: BSD-3-Clause - -import weakref - -import torch - -import omni.physics.tensors.impl.api as physx - -import isaaclab.utils.math as math_utils -from isaaclab.sim.utils.stage import get_current_stage_id -from isaaclab.utils.buffers import TimestampedBuffer - - -class RigidObjectData: - """Data container for a rigid object. - - This class contains the data for a rigid object in the simulation. The data includes the state of - the root rigid body and the state of all the bodies in the object. The data is stored in the simulation - world frame unless otherwise specified. - - For a rigid body, there are two frames of reference that are used: - - - Actor frame: The frame of reference of the rigid body prim. This typically corresponds to the Xform prim - with the rigid body schema. - - Center of mass frame: The frame of reference of the center of mass of the rigid body. - - Depending on the settings of the simulation, the actor frame and the center of mass frame may be the same. - This needs to be taken into account when interpreting the data. - - The data is lazily updated, meaning that the data is only updated when it is accessed. This is useful - when the data is expensive to compute or retrieve. The data is updated when the timestamp of the buffer - is older than the current simulation timestamp. The timestamp is updated whenever the data is updated. - """ - - def __init__(self, root_physx_view: physx.RigidBodyView, device: str): - """Initializes the rigid object data. - - Args: - root_physx_view: The root rigid body view. - device: The device used for processing. - """ - # Set the parameters - self.device = device - # Set the root rigid body view - # note: this is stored as a weak reference to avoid circular references between the asset class - # and the data container. This is important to avoid memory leaks. - self._root_physx_view: physx.RigidBodyView = weakref.proxy(root_physx_view) - - # Set initial time stamp - self._sim_timestamp = 0.0 - - # Obtain global physics sim view - stage_id = get_current_stage_id() - physics_sim_view = physx.create_simulation_view("torch", stage_id) - physics_sim_view.set_subspace_roots("/") - gravity = physics_sim_view.get_gravity() - # Convert to direction vector - gravity_dir = torch.tensor((gravity[0], gravity[1], gravity[2]), device=self.device) - gravity_dir = math_utils.normalize(gravity_dir.unsqueeze(0)).squeeze(0) - - # Initialize constants - self.GRAVITY_VEC_W = gravity_dir.repeat(self._root_physx_view.count, 1) - self.FORWARD_VEC_B = torch.tensor((1.0, 0.0, 0.0), device=self.device).repeat(self._root_physx_view.count, 1) - - # Initialize the lazy buffers. - # -- link frame w.r.t. world frame - self._root_link_pose_w = TimestampedBuffer() - self._root_link_vel_w = TimestampedBuffer() - # -- com frame w.r.t. link frame - self._body_com_pose_b = TimestampedBuffer() - # -- com frame w.r.t. world frame - self._root_com_pose_w = TimestampedBuffer() - self._root_com_vel_w = TimestampedBuffer() - self._body_com_acc_w = TimestampedBuffer() - # -- combined state (these are cached as they concatenate) - self._root_state_w = TimestampedBuffer() - self._root_link_state_w = TimestampedBuffer() - self._root_com_state_w = TimestampedBuffer() - - def update(self, dt: float): - """Updates the data for the rigid object. - - Args: - dt: The time step for the update. This must be a positive value. - """ - # update the simulation timestamp - self._sim_timestamp += dt - - ## - # Names. - ## - - body_names: list[str] = None - """Body names in the order parsed by the simulation view.""" - - ## - # Defaults. - ## - - default_root_state: torch.Tensor = None - """Default root state ``[pos, quat, lin_vel, ang_vel]`` in local environment frame. Shape is (num_instances, 13). - - The position and quaternion are of the rigid body's actor frame. Meanwhile, the linear and angular velocities are - of the center of mass frame. - """ - - default_mass: torch.Tensor = None - """Default mass read from the simulation. Shape is (num_instances, 1).""" - - default_inertia: torch.Tensor = None - """Default inertia tensor read from the simulation. Shape is (num_instances, 9). - - The inertia tensor should be given with respect to the center of mass, expressed in the rigid body's actor frame. - The values are stored in the order :math:`[I_{xx}, I_{yx}, I_{zx}, I_{xy}, I_{yy}, I_{zy}, I_{xz}, I_{yz}, I_{zz}]`. - However, due to the symmetry of inertia tensors, row- and column-major orders are equivalent. - - This quantity is parsed from the USD schema at the time of initialization. - """ - - ## - # Root state properties. - ## - - @property - def root_link_pose_w(self) -> torch.Tensor: - """Root link pose ``[pos, quat]`` in simulation world frame. Shape is (num_instances, 7). - - This quantity is the pose of the actor frame of the root rigid body relative to the world. - The orientation is provided in (w, x, y, z) format. - """ - if self._root_link_pose_w.timestamp < self._sim_timestamp: - # read data from simulation - pose = self._root_physx_view.get_transforms().clone() - pose[:, 3:7] = math_utils.convert_quat(pose[:, 3:7], to="wxyz") - # set the buffer data and timestamp - self._root_link_pose_w.data = pose - self._root_link_pose_w.timestamp = self._sim_timestamp - - return self._root_link_pose_w.data - - @property - def root_link_vel_w(self) -> torch.Tensor: - """Root link velocity ``[lin_vel, ang_vel]`` in simulation world frame. Shape is (num_instances, 6). - - This quantity contains the linear and angular velocities of the actor frame of the root - rigid body relative to the world. - """ - if self._root_link_vel_w.timestamp < self._sim_timestamp: - # read the CoM velocity - vel = self.root_com_vel_w.clone() - # adjust linear velocity to link from center of mass - vel[:, :3] += torch.linalg.cross( - vel[:, 3:], math_utils.quat_apply(self.root_link_quat_w, -self.body_com_pos_b[:, 0]), dim=-1 - ) - # set the buffer data and timestamp - self._root_link_vel_w.data = vel - self._root_link_vel_w.timestamp = self._sim_timestamp - - return self._root_link_vel_w.data - - @property - def root_com_pose_w(self) -> torch.Tensor: - """Root center of mass pose ``[pos, quat]`` in simulation world frame. Shape is (num_instances, 7). - - This quantity is the pose of the center of mass frame of the root rigid body relative to the world. - The orientation is provided in (w, x, y, z) format. - """ - if self._root_com_pose_w.timestamp < self._sim_timestamp: - # apply local transform to center of mass frame - pos, quat = math_utils.combine_frame_transforms( - self.root_link_pos_w, self.root_link_quat_w, self.body_com_pos_b[:, 0], self.body_com_quat_b[:, 0] - ) - # set the buffer data and timestamp - self._root_com_pose_w.data = torch.cat((pos, quat), dim=-1) - self._root_com_pose_w.timestamp = self._sim_timestamp - - return self._root_com_pose_w.data - - @property - def root_com_vel_w(self) -> torch.Tensor: - """Root center of mass velocity ``[lin_vel, ang_vel]`` in simulation world frame. Shape is (num_instances, 6). - - This quantity contains the linear and angular velocities of the root rigid body's center of mass frame - relative to the world. - """ - if self._root_com_vel_w.timestamp < self._sim_timestamp: - self._root_com_vel_w.data = self._root_physx_view.get_velocities() - self._root_com_vel_w.timestamp = self._sim_timestamp - - return self._root_com_vel_w.data - - @property - def root_state_w(self) -> torch.Tensor: - """Root state ``[pos, quat, lin_vel, ang_vel]`` in simulation world frame. Shape is (num_instances, 13). - - The position and orientation are of the rigid body's actor frame. Meanwhile, the linear and angular - velocities are of the rigid body's center of mass frame. - """ - if self._root_state_w.timestamp < self._sim_timestamp: - self._root_state_w.data = torch.cat((self.root_link_pose_w, self.root_com_vel_w), dim=-1) - self._root_state_w.timestamp = self._sim_timestamp - - return self._root_state_w.data - - @property - def root_link_state_w(self) -> torch.Tensor: - """Root state ``[pos, quat, lin_vel, ang_vel]`` in simulation world frame. Shape is (num_instances, 13). - - The position, quaternion, and linear/angular velocity are of the rigid body root frame relative to the - world. The orientation is provided in (w, x, y, z) format. - """ - if self._root_link_state_w.timestamp < self._sim_timestamp: - self._root_link_state_w.data = torch.cat((self.root_link_pose_w, self.root_link_vel_w), dim=-1) - self._root_link_state_w.timestamp = self._sim_timestamp - - return self._root_link_state_w.data - - @property - def root_com_state_w(self) -> torch.Tensor: - """Root center of mass state ``[pos, quat, lin_vel, ang_vel]`` in simulation world frame. - Shape is (num_instances, 13). - - The position, quaternion, and linear/angular velocity are of the rigid body's center of mass frame - relative to the world. Center of mass frame is the orientation principle axes of inertia. - """ - if self._root_com_state_w.timestamp < self._sim_timestamp: - self._root_com_state_w.data = torch.cat((self.root_com_pose_w, self.root_com_vel_w), dim=-1) - self._root_com_state_w.timestamp = self._sim_timestamp - - return self._root_com_state_w.data - - ## - # Body state properties. - ## - - @property - def body_link_pose_w(self) -> torch.Tensor: - """Body link pose ``[pos, quat]`` in simulation world frame. Shape is (num_instances, 1, 7). - - This quantity is the pose of the actor frame of the rigid body relative to the world. - The orientation is provided in (w, x, y, z) format. - """ - return self.root_link_pose_w.view(-1, 1, 7) - - @property - def body_link_vel_w(self) -> torch.Tensor: - """Body link velocity ``[lin_vel, ang_vel]`` in simulation world frame. Shape is (num_instances, 1, 6). - - This quantity contains the linear and angular velocities of the actor frame of the root - rigid body relative to the world. - """ - return self.root_link_vel_w.view(-1, 1, 6) - - @property - def body_com_pose_w(self) -> torch.Tensor: - """Body center of mass pose ``[pos, quat]`` in simulation world frame. Shape is (num_instances, 1, 7). - - This quantity is the pose of the center of mass frame of the rigid body relative to the world. - The orientation is provided in (w, x, y, z) format. - """ - return self.root_com_pose_w.view(-1, 1, 7) - - @property - def body_com_vel_w(self) -> torch.Tensor: - """Body center of mass velocity ``[lin_vel, ang_vel]`` in simulation world frame. - Shape is (num_instances, 1, 6). - - This quantity contains the linear and angular velocities of the root rigid body's center of mass frame - relative to the world. - """ - return self.root_com_vel_w.view(-1, 1, 6) - - @property - def body_state_w(self) -> torch.Tensor: - """State of all bodies `[pos, quat, lin_vel, ang_vel]` in simulation world frame. - Shape is (num_instances, 1, 13). - - The position and orientation are of the rigid bodies' actor frame. Meanwhile, the linear and angular - velocities are of the rigid bodies' center of mass frame. - """ - return self.root_state_w.view(-1, 1, 13) - - @property - def body_link_state_w(self) -> torch.Tensor: - """State of all bodies ``[pos, quat, lin_vel, ang_vel]`` in simulation world frame. - Shape is (num_instances, 1, 13). - - The position, quaternion, and linear/angular velocity are of the body's link frame relative to the world. - The orientation is provided in (w, x, y, z) format. - """ - return self.root_link_state_w.view(-1, 1, 13) - - @property - def body_com_state_w(self) -> torch.Tensor: - """State of all bodies ``[pos, quat, lin_vel, ang_vel]`` in simulation world frame. - Shape is (num_instances, num_bodies, 13). - - The position, quaternion, and linear/angular velocity are of the body's center of mass frame relative to the - world. Center of mass frame is assumed to be the same orientation as the link rather than the orientation of the - principle inertia. The orientation is provided in (w, x, y, z) format. - """ - return self.root_com_state_w.view(-1, 1, 13) - - @property - def body_com_acc_w(self) -> torch.Tensor: - """Acceleration of all bodies ``[lin_acc, ang_acc]`` in the simulation world frame. - Shape is (num_instances, 1, 6). - - This quantity is the acceleration of the rigid bodies' center of mass frame relative to the world. - """ - if self._body_com_acc_w.timestamp < self._sim_timestamp: - self._body_com_acc_w.data = self._root_physx_view.get_accelerations().unsqueeze(1) - self._body_com_acc_w.timestamp = self._sim_timestamp - - return self._body_com_acc_w.data - - @property - def body_com_pose_b(self) -> torch.Tensor: - """Center of mass pose ``[pos, quat]`` of all bodies in their respective body's link frames. - Shape is (num_instances, 1, 7). - - This quantity is the pose of the center of mass frame of the rigid body relative to the body's link frame. - The orientation is provided in (w, x, y, z) format. - """ - if self._body_com_pose_b.timestamp < self._sim_timestamp: - # read data from simulation - pose = self._root_physx_view.get_coms().to(self.device) - pose[:, 3:7] = math_utils.convert_quat(pose[:, 3:7], to="wxyz") - # set the buffer data and timestamp - self._body_com_pose_b.data = pose.view(-1, 1, 7) - self._body_com_pose_b.timestamp = self._sim_timestamp - - return self._body_com_pose_b.data - - ## - # Derived Properties. - ## - - @property - def projected_gravity_b(self) -> torch.Tensor: - """Projection of the gravity direction on base frame. Shape is (num_instances, 3).""" - return math_utils.quat_apply_inverse(self.root_link_quat_w, self.GRAVITY_VEC_W) - - @property - def heading_w(self) -> torch.Tensor: - """Yaw heading of the base frame (in radians). Shape is (num_instances,). - - Note: - This quantity is computed by assuming that the forward-direction of the base - frame is along x-direction, i.e. :math:`(1, 0, 0)`. - """ - forward_w = math_utils.quat_apply(self.root_link_quat_w, self.FORWARD_VEC_B) - return torch.atan2(forward_w[:, 1], forward_w[:, 0]) - - @property - def root_link_lin_vel_b(self) -> torch.Tensor: - """Root link linear velocity in base frame. Shape is (num_instances, 3). - - This quantity is the linear velocity of the actor frame of the root rigid body frame with respect to the - rigid body's actor frame. - """ - return math_utils.quat_apply_inverse(self.root_link_quat_w, self.root_link_lin_vel_w) - - @property - def root_link_ang_vel_b(self) -> torch.Tensor: - """Root link angular velocity in base world frame. Shape is (num_instances, 3). - - This quantity is the angular velocity of the actor frame of the root rigid body frame with respect to the - rigid body's actor frame. - """ - return math_utils.quat_apply_inverse(self.root_link_quat_w, self.root_link_ang_vel_w) - - @property - def root_com_lin_vel_b(self) -> torch.Tensor: - """Root center of mass linear velocity in base frame. Shape is (num_instances, 3). - - This quantity is the linear velocity of the root rigid body's center of mass frame with respect to the - rigid body's actor frame. - """ - return math_utils.quat_apply_inverse(self.root_link_quat_w, self.root_com_lin_vel_w) - - @property - def root_com_ang_vel_b(self) -> torch.Tensor: - """Root center of mass angular velocity in base world frame. Shape is (num_instances, 3). - - This quantity is the angular velocity of the root rigid body's center of mass frame with respect to the - rigid body's actor frame. - """ - return math_utils.quat_apply_inverse(self.root_link_quat_w, self.root_com_ang_vel_w) - - ## - # Sliced properties. - ## - - @property - def root_link_pos_w(self) -> torch.Tensor: - """Root link position in simulation world frame. Shape is (num_instances, 3). - - This quantity is the position of the actor frame of the root rigid body relative to the world. - """ - return self.root_link_pose_w[:, :3] - - @property - def root_link_quat_w(self) -> torch.Tensor: - """Root link orientation (w, x, y, z) in simulation world frame. Shape is (num_instances, 4). - - This quantity is the orientation of the actor frame of the root rigid body. - """ - return self.root_link_pose_w[:, 3:7] - - @property - def root_link_lin_vel_w(self) -> torch.Tensor: - """Root linear velocity in simulation world frame. Shape is (num_instances, 3). - - This quantity is the linear velocity of the root rigid body's actor frame relative to the world. - """ - return self.root_link_vel_w[:, :3] - - @property - def root_link_ang_vel_w(self) -> torch.Tensor: - """Root link angular velocity in simulation world frame. Shape is (num_instances, 3). - - This quantity is the angular velocity of the actor frame of the root rigid body relative to the world. - """ - return self.root_link_vel_w[:, 3:6] - - @property - def root_com_pos_w(self) -> torch.Tensor: - """Root center of mass position in simulation world frame. Shape is (num_instances, 3). - - This quantity is the position of the actor frame of the root rigid body relative to the world. - """ - return self.root_com_pose_w[:, :3] - - @property - def root_com_quat_w(self) -> torch.Tensor: - """Root center of mass orientation (w, x, y, z) in simulation world frame. Shape is (num_instances, 4). - - This quantity is the orientation of the actor frame of the root rigid body relative to the world. - """ - return self.root_com_pose_w[:, 3:7] - - @property - def root_com_lin_vel_w(self) -> torch.Tensor: - """Root center of mass linear velocity in simulation world frame. Shape is (num_instances, 3). - - This quantity is the linear velocity of the root rigid body's center of mass frame relative to the world. - """ - return self.root_com_vel_w[:, :3] - - @property - def root_com_ang_vel_w(self) -> torch.Tensor: - """Root center of mass angular velocity in simulation world frame. Shape is (num_instances, 3). - - This quantity is the angular velocity of the root rigid body's center of mass frame relative to the world. - """ - return self.root_com_vel_w[:, 3:6] - - @property - def body_link_pos_w(self) -> torch.Tensor: - """Positions of all bodies in simulation world frame. Shape is (num_instances, 1, 3). - - This quantity is the position of the rigid bodies' actor frame relative to the world. - """ - return self.body_link_pose_w[..., :3] - - @property - def body_link_quat_w(self) -> torch.Tensor: - """Orientation (w, x, y, z) of all bodies in simulation world frame. Shape is (num_instances, 1, 4). - - This quantity is the orientation of the rigid bodies' actor frame relative to the world. - """ - return self.body_link_pose_w[..., 3:7] - - @property - def body_link_lin_vel_w(self) -> torch.Tensor: - """Linear velocity of all bodies in simulation world frame. Shape is (num_instances, 1, 3). - - This quantity is the linear velocity of the rigid bodies' center of mass frame relative to the world. - """ - return self.body_link_vel_w[..., :3] - - @property - def body_link_ang_vel_w(self) -> torch.Tensor: - """Angular velocity of all bodies in simulation world frame. Shape is (num_instances, 1, 3). - - This quantity is the angular velocity of the rigid bodies' center of mass frame relative to the world. - """ - return self.body_link_vel_w[..., 3:6] - - @property - def body_com_pos_w(self) -> torch.Tensor: - """Positions of all bodies in simulation world frame. Shape is (num_instances, 1, 3). - - This quantity is the position of the rigid bodies' actor frame. - """ - return self.body_com_pose_w[..., :3] - - @property - def body_com_quat_w(self) -> torch.Tensor: - """Orientation (w, x, y, z) of the principle axis of inertia of all bodies in simulation world frame. - - Shape is (num_instances, 1, 4). This quantity is the orientation of the rigid bodies' actor frame. - """ - return self.body_com_pose_w[..., 3:7] - - @property - def body_com_lin_vel_w(self) -> torch.Tensor: - """Linear velocity of all bodies in simulation world frame. Shape is (num_instances, 1, 3). - - This quantity is the linear velocity of the rigid bodies' center of mass frame. - """ - return self.body_com_vel_w[..., :3] - - @property - def body_com_ang_vel_w(self) -> torch.Tensor: - """Angular velocity of all bodies in simulation world frame. Shape is (num_instances, 1, 3). - - This quantity is the angular velocity of the rigid bodies' center of mass frame. - """ - return self.body_com_vel_w[..., 3:6] - - @property - def body_com_lin_acc_w(self) -> torch.Tensor: - """Linear acceleration of all bodies in simulation world frame. Shape is (num_instances, 1, 3). - - This quantity is the linear acceleration of the rigid bodies' center of mass frame. - """ - return self.body_com_acc_w[..., :3] - - @property - def body_com_ang_acc_w(self) -> torch.Tensor: - """Angular acceleration of all bodies in simulation world frame. Shape is (num_instances, 1, 3). - - This quantity is the angular acceleration of the rigid bodies' center of mass frame. - """ - return self.body_com_acc_w[..., 3:6] - - @property - def body_com_pos_b(self) -> torch.Tensor: - """Center of mass position of all of the bodies in their respective link frames. - Shape is (num_instances, 1, 3). - - This quantity is the center of mass location relative to its body'slink frame. - """ - return self.body_com_pose_b[..., :3] - - @property - def body_com_quat_b(self) -> torch.Tensor: - """Orientation (w, x, y, z) of the principle axis of inertia of all of the bodies in their - respective link frames. Shape is (num_instances, 1, 4). - - This quantity is the orientation of the principles axes of inertia relative to its body's link frame. - """ - return self.body_com_pose_b[..., 3:7] - - ## - # Properties for backwards compatibility. - ## - - @property - def root_pose_w(self) -> torch.Tensor: - """Same as :attr:`root_link_pose_w`.""" - return self.root_link_pose_w - - @property - def root_pos_w(self) -> torch.Tensor: - """Same as :attr:`root_link_pos_w`.""" - return self.root_link_pos_w - - @property - def root_quat_w(self) -> torch.Tensor: - """Same as :attr:`root_link_quat_w`.""" - return self.root_link_quat_w - - @property - def root_vel_w(self) -> torch.Tensor: - """Same as :attr:`root_com_vel_w`.""" - return self.root_com_vel_w - - @property - def root_lin_vel_w(self) -> torch.Tensor: - """Same as :attr:`root_com_lin_vel_w`.""" - return self.root_com_lin_vel_w - - @property - def root_ang_vel_w(self) -> torch.Tensor: - """Same as :attr:`root_com_ang_vel_w`.""" - return self.root_com_ang_vel_w - - @property - def root_lin_vel_b(self) -> torch.Tensor: - """Same as :attr:`root_com_lin_vel_b`.""" - return self.root_com_lin_vel_b - - @property - def root_ang_vel_b(self) -> torch.Tensor: - """Same as :attr:`root_com_ang_vel_b`.""" - return self.root_com_ang_vel_b - - @property - def body_pose_w(self) -> torch.Tensor: - """Same as :attr:`body_link_pose_w`.""" - return self.body_link_pose_w - - @property - def body_pos_w(self) -> torch.Tensor: - """Same as :attr:`body_link_pos_w`.""" - return self.body_link_pos_w - - @property - def body_quat_w(self) -> torch.Tensor: - """Same as :attr:`body_link_quat_w`.""" - return self.body_link_quat_w - - @property - def body_vel_w(self) -> torch.Tensor: - """Same as :attr:`body_com_vel_w`.""" - return self.body_com_vel_w - - @property - def body_lin_vel_w(self) -> torch.Tensor: - """Same as :attr:`body_com_lin_vel_w`.""" - return self.body_com_lin_vel_w - - @property - def body_ang_vel_w(self) -> torch.Tensor: - """Same as :attr:`body_com_ang_vel_w`.""" - return self.body_com_ang_vel_w - - @property - def body_acc_w(self) -> torch.Tensor: - """Same as :attr:`body_com_acc_w`.""" - return self.body_com_acc_w - - @property - def body_lin_acc_w(self) -> torch.Tensor: - """Same as :attr:`body_com_lin_acc_w`.""" - return self.body_com_lin_acc_w - - @property - def body_ang_acc_w(self) -> torch.Tensor: - """Same as :attr:`body_com_ang_acc_w`.""" - return self.body_com_ang_acc_w - - @property - def com_pos_b(self) -> torch.Tensor: - """Same as :attr:`body_com_pos_b`.""" - return self.body_com_pos_b - - @property - def com_quat_b(self) -> torch.Tensor: - """Same as :attr:`body_com_quat_b`.""" - return self.body_com_quat_b diff --git a/source/isaaclab_physx/isaaclab_physx/assets/rigid_object/rigid_object_old.py b/source/isaaclab_physx/isaaclab_physx/assets/rigid_object/rigid_object_old.py deleted file mode 100644 index 6d0ec98f431..00000000000 --- a/source/isaaclab_physx/isaaclab_physx/assets/rigid_object/rigid_object_old.py +++ /dev/null @@ -1,582 +0,0 @@ -# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). -# All rights reserved. -# -# SPDX-License-Identifier: BSD-3-Clause - -from __future__ import annotations - -import logging -from collections.abc import Sequence -from typing import TYPE_CHECKING - -import torch -import warp as wp - -import omni.physics.tensors.impl.api as physx -from isaacsim.core.simulation_manager import SimulationManager -from pxr import UsdPhysics - -import isaaclab.sim as sim_utils -import isaaclab.utils.math as math_utils -import isaaclab.utils.string as string_utils -from isaaclab.utils.wrench_composer import WrenchComposer - -from ..asset_base import AssetBase -from .rigid_object_data import RigidObjectData - -if TYPE_CHECKING: - from .rigid_object_cfg import RigidObjectCfg - -# import logger -logger = logging.getLogger(__name__) - - -class RigidObject(AssetBase): - """A rigid object asset class. - - Rigid objects are assets comprising of rigid bodies. They can be used to represent dynamic objects - such as boxes, spheres, etc. A rigid body is described by its pose, velocity and mass distribution. - - For an asset to be considered a rigid object, the root prim of the asset must have the `USD RigidBodyAPI`_ - applied to it. This API is used to define the simulation properties of the rigid body. On playing the - simulation, the physics engine will automatically register the rigid body and create a corresponding - rigid body handle. This handle can be accessed using the :attr:`root_physx_view` attribute. - - .. note:: - - For users familiar with Isaac Sim, the PhysX view class API is not the exactly same as Isaac Sim view - class API. Similar to Isaac Lab, Isaac Sim wraps around the PhysX view API. However, as of now (2023.1 release), - we see a large difference in initializing the view classes in Isaac Sim. This is because the view classes - in Isaac Sim perform additional USD-related operations which are slow and also not required. - - .. _`USD RigidBodyAPI`: https://openusd.org/dev/api/class_usd_physics_rigid_body_a_p_i.html - """ - - cfg: RigidObjectCfg - """Configuration instance for the rigid object.""" - - def __init__(self, cfg: RigidObjectCfg): - """Initialize the rigid object. - - Args: - cfg: A configuration instance. - """ - super().__init__(cfg) - - """ - Properties - """ - - @property - def data(self) -> RigidObjectData: - return self._data - - @property - def num_instances(self) -> int: - return self.root_physx_view.count - - @property - def num_bodies(self) -> int: - """Number of bodies in the asset. - - This is always 1 since each object is a single rigid body. - """ - return 1 - - @property - def body_names(self) -> list[str]: - """Ordered names of bodies in the rigid object.""" - prim_paths = self.root_physx_view.prim_paths[: self.num_bodies] - return [path.split("/")[-1] for path in prim_paths] - - @property - def root_physx_view(self) -> physx.RigidBodyView: - """Rigid body view for the asset (PhysX). - - Note: - Use this view with caution. It requires handling of tensors in a specific way. - """ - return self._root_physx_view - - @property - def instantaneous_wrench_composer(self) -> WrenchComposer: - """Instantaneous wrench composer. - - Returns a :class:`~isaaclab.utils.wrench_composer.WrenchComposer` instance. Wrenches added or set to this wrench - composer are only valid for the current simulation step. At the end of the simulation step, the wrenches set - to this object are discarded. This is useful to apply forces that change all the time, things like drag forces - for instance. - """ - return self._instantaneous_wrench_composer - - @property - def permanent_wrench_composer(self) -> WrenchComposer: - """Permanent wrench composer. - - Returns a :class:`~isaaclab.utils.wrench_composer.WrenchComposer` instance. Wrenches added or set to this wrench - composer are persistent and are applied to the simulation at every step. This is useful to apply forces that - are constant over a period of time, things like the thrust of a motor for instance. - """ - return self._permanent_wrench_composer - - """ - Operations. - """ - - def reset(self, env_ids: Sequence[int] | None = None): - # resolve all indices - if env_ids is None: - env_ids = slice(None) - # reset external wrench - self._instantaneous_wrench_composer.reset(env_ids) - self._permanent_wrench_composer.reset(env_ids) - - def write_data_to_sim(self): - """Write external wrench to the simulation. - - Note: - We write external wrench to the simulation here since this function is called before the simulation step. - This ensures that the external wrench is applied at every simulation step. - """ - # write external wrench - if self._instantaneous_wrench_composer.active or self._permanent_wrench_composer.active: - if self._instantaneous_wrench_composer.active: - # Compose instantaneous wrench with permanent wrench - self._instantaneous_wrench_composer.add_forces_and_torques( - forces=self._permanent_wrench_composer.composed_force, - torques=self._permanent_wrench_composer.composed_torque, - body_ids=self._ALL_BODY_INDICES_WP, - env_ids=self._ALL_INDICES_WP, - ) - # Apply both instantaneous and permanent wrench to the simulation - self.root_physx_view.apply_forces_and_torques_at_position( - force_data=self._instantaneous_wrench_composer.composed_force_as_torch.view(-1, 3), - torque_data=self._instantaneous_wrench_composer.composed_torque_as_torch.view(-1, 3), - position_data=None, - indices=self._ALL_INDICES, - is_global=False, - ) - else: - # Apply permanent wrench to the simulation - self.root_physx_view.apply_forces_and_torques_at_position( - force_data=self._permanent_wrench_composer.composed_force_as_torch.view(-1, 3), - torque_data=self._permanent_wrench_composer.composed_torque_as_torch.view(-1, 3), - position_data=None, - indices=self._ALL_INDICES, - is_global=False, - ) - self._instantaneous_wrench_composer.reset() - - def update(self, dt: float): - self._data.update(dt) - - """ - Operations - Finders. - """ - - def find_bodies(self, name_keys: str | Sequence[str], preserve_order: bool = False) -> tuple[list[int], list[str]]: - """Find bodies in the rigid body based on the name keys. - - Please check the :meth:`isaaclab.utils.string_utils.resolve_matching_names` function for more - information on the name matching. - - Args: - name_keys: A regular expression or a list of regular expressions to match the body names. - preserve_order: Whether to preserve the order of the name keys in the output. Defaults to False. - - Returns: - A tuple of lists containing the body indices and names. - """ - return string_utils.resolve_matching_names(name_keys, self.body_names, preserve_order) - - """ - Operations - Write to simulation. - """ - - def write_root_state_to_sim(self, root_state: torch.Tensor, env_ids: Sequence[int] | None = None): - """Set the root state over selected environment indices into the simulation. - - The root state comprises of the cartesian position, quaternion orientation in (w, x, y, z), and linear - and angular velocity. All the quantities are in the simulation frame. - - Args: - root_state: Root state in simulation frame. Shape is (len(env_ids), 13). - env_ids: Environment indices. If None, then all indices are used. - """ - self.write_root_link_pose_to_sim(root_state[:, :7], env_ids=env_ids) - self.write_root_com_velocity_to_sim(root_state[:, 7:], env_ids=env_ids) - - def write_root_com_state_to_sim(self, root_state: torch.Tensor, env_ids: Sequence[int] | None = None): - """Set the root center of mass state over selected environment indices into the simulation. - - The root state comprises of the cartesian position, quaternion orientation in (w, x, y, z), and linear - and angular velocity. All the quantities are in the simulation frame. - - Args: - root_state: Root state in simulation frame. Shape is (len(env_ids), 13). - env_ids: Environment indices. If None, then all indices are used. - """ - self.write_root_com_pose_to_sim(root_state[:, :7], env_ids=env_ids) - self.write_root_com_velocity_to_sim(root_state[:, 7:], env_ids=env_ids) - - def write_root_link_state_to_sim(self, root_state: torch.Tensor, env_ids: Sequence[int] | None = None): - """Set the root link state over selected environment indices into the simulation. - - The root state comprises of the cartesian position, quaternion orientation in (w, x, y, z), and linear - and angular velocity. All the quantities are in the simulation frame. - - Args: - root_state: Root state in simulation frame. Shape is (len(env_ids), 13). - env_ids: Environment indices. If None, then all indices are used. - """ - self.write_root_link_pose_to_sim(root_state[:, :7], env_ids=env_ids) - self.write_root_link_velocity_to_sim(root_state[:, 7:], env_ids=env_ids) - - def write_root_pose_to_sim(self, root_pose: torch.Tensor, env_ids: Sequence[int] | None = None): - """Set the root pose over selected environment indices into the simulation. - - The root pose comprises of the cartesian position and quaternion orientation in (w, x, y, z). - - Args: - root_pose: Root link poses in simulation frame. Shape is (len(env_ids), 7). - env_ids: Environment indices. If None, then all indices are used. - """ - self.write_root_link_pose_to_sim(root_pose, env_ids=env_ids) - - def write_root_link_pose_to_sim(self, root_pose: torch.Tensor, env_ids: Sequence[int] | None = None): - """Set the root link pose over selected environment indices into the simulation. - - The root pose comprises of the cartesian position and quaternion orientation in (w, x, y, z). - - Args: - root_pose: Root link poses in simulation frame. Shape is (len(env_ids), 7). - env_ids: Environment indices. If None, then all indices are used. - """ - # resolve all indices - physx_env_ids = env_ids - if env_ids is None: - env_ids = slice(None) - physx_env_ids = self._ALL_INDICES - - # note: we need to do this here since tensors are not set into simulation until step. - # set into internal buffers - self._data.root_link_pose_w[env_ids] = root_pose.clone() - # update these buffers only if the user is using them. Otherwise this adds to overhead. - if self._data._root_link_state_w.data is not None: - self._data.root_link_state_w[env_ids, :7] = self._data.root_link_pose_w[env_ids] - if self._data._root_state_w.data is not None: - self._data.root_state_w[env_ids, :7] = self._data.root_link_pose_w[env_ids] - if self._data._root_com_state_w.data is not None: - expected_com_pos, expected_com_quat = math_utils.combine_frame_transforms( - self._data.root_link_pose_w[env_ids, :3], - self._data.root_link_pose_w[env_ids, 3:7], - self.data.body_com_pos_b[env_ids, 0, :], - self.data.body_com_quat_b[env_ids, 0, :], - ) - self._data.root_com_state_w[env_ids, :3] = expected_com_pos - self._data.root_com_state_w[env_ids, 3:7] = expected_com_quat - # convert root quaternion from wxyz to xyzw - root_poses_xyzw = self._data.root_link_pose_w.clone() - root_poses_xyzw[:, 3:] = math_utils.convert_quat(root_poses_xyzw[:, 3:], to="xyzw") - # set into simulation - self.root_physx_view.set_transforms(root_poses_xyzw, indices=physx_env_ids) - - def write_root_com_pose_to_sim(self, root_pose: torch.Tensor, env_ids: Sequence[int] | None = None): - """Set the root center of mass pose over selected environment indices into the simulation. - - The root pose comprises of the cartesian position and quaternion orientation in (w, x, y, z). - The orientation is the orientation of the principle axes of inertia. - - Args: - root_pose: Root center of mass poses in simulation frame. Shape is (len(env_ids), 7). - env_ids: Environment indices. If None, then all indices are used. - """ - # resolve all indices - if env_ids is None: - local_env_ids = slice(env_ids) - else: - local_env_ids = env_ids - - # set into internal buffers - self._data.root_com_pose_w[local_env_ids] = root_pose.clone() - # update these buffers only if the user is using them. Otherwise this adds to overhead. - if self._data._root_com_state_w.data is not None: - self._data.root_com_state_w[local_env_ids, :7] = self._data.root_com_pose_w[local_env_ids] - - # get CoM pose in link frame - com_pos_b = self.data.body_com_pos_b[local_env_ids, 0, :] - com_quat_b = self.data.body_com_quat_b[local_env_ids, 0, :] - # transform input CoM pose to link frame - root_link_pos, root_link_quat = math_utils.combine_frame_transforms( - root_pose[..., :3], - root_pose[..., 3:7], - math_utils.quat_apply(math_utils.quat_inv(com_quat_b), -com_pos_b), - math_utils.quat_inv(com_quat_b), - ) - root_link_pose = torch.cat((root_link_pos, root_link_quat), dim=-1) - - # write transformed pose in link frame to sim - self.write_root_link_pose_to_sim(root_link_pose, env_ids=env_ids) - - def write_root_velocity_to_sim(self, root_velocity: torch.Tensor, env_ids: Sequence[int] | None = None): - """Set the root center of mass velocity over selected environment indices into the simulation. - - The velocity comprises linear velocity (x, y, z) and angular velocity (x, y, z) in that order. - NOTE: This sets the velocity of the root's center of mass rather than the roots frame. - - Args: - root_velocity: Root center of mass velocities in simulation world frame. Shape is (len(env_ids), 6). - env_ids: Environment indices. If None, then all indices are used. - """ - self.write_root_com_velocity_to_sim(root_velocity=root_velocity, env_ids=env_ids) - - def write_root_com_velocity_to_sim(self, root_velocity: torch.Tensor, env_ids: Sequence[int] | None = None): - """Set the root center of mass velocity over selected environment indices into the simulation. - - The velocity comprises linear velocity (x, y, z) and angular velocity (x, y, z) in that order. - NOTE: This sets the velocity of the root's center of mass rather than the roots frame. - - Args: - root_velocity: Root center of mass velocities in simulation world frame. Shape is (len(env_ids), 6). - env_ids: Environment indices. If None, then all indices are used. - """ - # resolve all indices - physx_env_ids = env_ids - if env_ids is None: - env_ids = slice(None) - physx_env_ids = self._ALL_INDICES - - # note: we need to do this here since tensors are not set into simulation until step. - # set into internal buffers - self._data.root_com_vel_w[env_ids] = root_velocity.clone() - # update these buffers only if the user is using them. Otherwise this adds to overhead. - if self._data._root_com_state_w.data is not None: - self._data.root_com_state_w[env_ids, 7:] = self._data.root_com_vel_w[env_ids] - if self._data._root_state_w.data is not None: - self._data.root_state_w[env_ids, 7:] = self._data.root_com_vel_w[env_ids] - if self._data._root_link_state_w.data is not None: - self._data.root_link_state_w[env_ids, 7:] = self._data.root_com_vel_w[env_ids] - # make the acceleration zero to prevent reporting old values - self._data.body_com_acc_w[env_ids] = 0.0 - # set into simulation - self.root_physx_view.set_velocities(self._data.root_com_vel_w, indices=physx_env_ids) - - def write_root_link_velocity_to_sim(self, root_velocity: torch.Tensor, env_ids: Sequence[int] | None = None): - """Set the root link velocity over selected environment indices into the simulation. - - The velocity comprises linear velocity (x, y, z) and angular velocity (x, y, z) in that order. - NOTE: This sets the velocity of the root's frame rather than the roots center of mass. - - Args: - root_velocity: Root frame velocities in simulation world frame. Shape is (len(env_ids), 6). - env_ids: Environment indices. If None, then all indices are used. - """ - # resolve all indices - if env_ids is None: - local_env_ids = slice(env_ids) - else: - local_env_ids = env_ids - - # set into internal buffers - self._data.root_link_vel_w[local_env_ids] = root_velocity.clone() - # update these buffers only if the user is using them. Otherwise this adds to overhead. - if self._data._root_link_state_w.data is not None: - self._data.root_link_state_w[local_env_ids, 7:] = self._data.root_link_vel_w[local_env_ids] - - # get CoM pose in link frame - quat = self.data.root_link_quat_w[local_env_ids] - com_pos_b = self.data.body_com_pos_b[local_env_ids, 0, :] - # transform input velocity to center of mass frame - root_com_velocity = root_velocity.clone() - root_com_velocity[:, :3] += torch.linalg.cross( - root_com_velocity[:, 3:], math_utils.quat_apply(quat, com_pos_b), dim=-1 - ) - - # write transformed velocity in CoM frame to sim - self.write_root_com_velocity_to_sim(root_com_velocity, env_ids=env_ids) - - """ - Operations - Setters. - """ - - def set_external_force_and_torque( - self, - forces: torch.Tensor, - torques: torch.Tensor, - positions: torch.Tensor | None = None, - body_ids: Sequence[int] | slice | None = None, - env_ids: Sequence[int] | None = None, - is_global: bool = False, - ): - """Set external force and torque to apply on the asset's bodies in their local frame. - - For many applications, we want to keep the applied external force on rigid bodies constant over a period of - time (for instance, during the policy control). This function allows us to store the external force and torque - into buffers which are then applied to the simulation at every step. Optionally, set the position to apply the - external wrench at (in the local link frame of the bodies). - - .. caution:: - If the function is called with empty forces and torques, then this function disables the application - of external wrench to the simulation. - - .. code-block:: python - - # example of disabling external wrench - asset.set_external_force_and_torque(forces=torch.zeros(0, 3), torques=torch.zeros(0, 3)) - - .. note:: - This function does not apply the external wrench to the simulation. It only fills the buffers with - the desired values. To apply the external wrench, call the :meth:`write_data_to_sim` function - right before the simulation step. - - Args: - forces: External forces in bodies' local frame. Shape is (len(env_ids), len(body_ids), 3). - torques: External torques in bodies' local frame. Shape is (len(env_ids), len(body_ids), 3). - positions: External wrench positions in bodies' local frame. Shape is (len(env_ids), len(body_ids), 3). - Defaults to None. - body_ids: Body indices to apply external wrench to. Defaults to None (all bodies). - env_ids: Environment indices to apply external wrench to. Defaults to None (all instances). - is_global: Whether to apply the external wrench in the global frame. Defaults to False. If set to False, - the external wrench is applied in the link frame of the bodies. - """ - logger.warning( - "The function 'set_external_force_and_torque' will be deprecated in a future release. Please" - " use 'permanent_wrench_composer.set_forces_and_torques' instead." - ) - if forces is None and torques is None: - logger.warning("No forces or torques provided. No permanent external wrench will be applied.") - - # resolve all indices - # -- env_ids - if env_ids is None: - env_ids = self._ALL_INDICES_WP - elif not isinstance(env_ids, torch.Tensor): - env_ids = wp.array(env_ids, dtype=wp.int32, device=self.device) - else: - env_ids = wp.from_torch(env_ids.to(torch.int32), dtype=wp.int32) - # -- body_ids - body_ids = self._ALL_BODY_INDICES_WP - - # Write to wrench composer - self._permanent_wrench_composer.set_forces_and_torques( - forces=wp.from_torch(forces, dtype=wp.vec3f) if forces is not None else None, - torques=wp.from_torch(torques, dtype=wp.vec3f) if torques is not None else None, - positions=wp.from_torch(positions, dtype=wp.vec3f) if positions is not None else None, - body_ids=body_ids, - env_ids=env_ids, - is_global=is_global, - ) - - """ - Internal helper. - """ - - def _initialize_impl(self): - # obtain global simulation view - self._physics_sim_view = SimulationManager.get_physics_sim_view() - # obtain the first prim in the regex expression (all others are assumed to be a copy of this) - template_prim = sim_utils.find_first_matching_prim(self.cfg.prim_path) - if template_prim is None: - raise RuntimeError(f"Failed to find prim for expression: '{self.cfg.prim_path}'.") - template_prim_path = template_prim.GetPath().pathString - - # find rigid root prims - root_prims = sim_utils.get_all_matching_child_prims( - template_prim_path, - predicate=lambda prim: prim.HasAPI(UsdPhysics.RigidBodyAPI), - traverse_instance_prims=False, - ) - if len(root_prims) == 0: - raise RuntimeError( - f"Failed to find a rigid body when resolving '{self.cfg.prim_path}'." - " Please ensure that the prim has 'USD RigidBodyAPI' applied." - ) - if len(root_prims) > 1: - raise RuntimeError( - f"Failed to find a single rigid body when resolving '{self.cfg.prim_path}'." - f" Found multiple '{root_prims}' under '{template_prim_path}'." - " Please ensure that there is only one rigid body in the prim path tree." - ) - - articulation_prims = sim_utils.get_all_matching_child_prims( - template_prim_path, - predicate=lambda prim: prim.HasAPI(UsdPhysics.ArticulationRootAPI), - traverse_instance_prims=False, - ) - if len(articulation_prims) != 0: - if articulation_prims[0].GetAttribute("physxArticulation:articulationEnabled").Get(): - raise RuntimeError( - f"Found an articulation root when resolving '{self.cfg.prim_path}' for rigid objects. These are" - f" located at: '{articulation_prims}' under '{template_prim_path}'. Please disable the articulation" - " root in the USD or from code by setting the parameter" - " 'ArticulationRootPropertiesCfg.articulation_enabled' to False in the spawn configuration." - ) - - # resolve root prim back into regex expression - root_prim_path = root_prims[0].GetPath().pathString - root_prim_path_expr = self.cfg.prim_path + root_prim_path[len(template_prim_path) :] - # -- object view - self._root_physx_view = self._physics_sim_view.create_rigid_body_view(root_prim_path_expr.replace(".*", "*")) - - # check if the rigid body was created - if self._root_physx_view._backend is None: - raise RuntimeError(f"Failed to create rigid body at: {self.cfg.prim_path}. Please check PhysX logs.") - - # log information about the rigid body - logger.info(f"Rigid body initialized at: {self.cfg.prim_path} with root '{root_prim_path_expr}'.") - logger.info(f"Number of instances: {self.num_instances}") - logger.info(f"Number of bodies: {self.num_bodies}") - logger.info(f"Body names: {self.body_names}") - - # container for data access - self._data = RigidObjectData(self.root_physx_view, self.device) - - # create buffers - self._create_buffers() - # process configuration - self._process_cfg() - # update the rigid body data - self.update(0.0) - - def _create_buffers(self): - """Create buffers for storing data.""" - # constants - self._ALL_INDICES = torch.arange(self.num_instances, dtype=torch.long, device=self.device) - self._ALL_INDICES_WP = wp.from_torch(self._ALL_INDICES.to(torch.int32), dtype=wp.int32) - self._ALL_BODY_INDICES_WP = wp.from_torch( - torch.arange(self.num_bodies, dtype=torch.int32, device=self.device), dtype=wp.int32 - ) - - # external wrench composer - self._instantaneous_wrench_composer = WrenchComposer(self) - self._permanent_wrench_composer = WrenchComposer(self) - - # set information about rigid body into data - self._data.body_names = self.body_names - self._data.default_mass = self.root_physx_view.get_masses().clone() - self._data.default_inertia = self.root_physx_view.get_inertias().clone() - - def _process_cfg(self): - """Post processing of configuration parameters.""" - # default state - # -- root state - # note: we cast to tuple to avoid torch/numpy type mismatch. - default_root_state = ( - tuple(self.cfg.init_state.pos) - + tuple(self.cfg.init_state.rot) - + tuple(self.cfg.init_state.lin_vel) - + tuple(self.cfg.init_state.ang_vel) - ) - default_root_state = torch.tensor(default_root_state, dtype=torch.float, device=self.device) - self._data.default_root_state = default_root_state.repeat(self.num_instances, 1) - - """ - Internal simulation callbacks. - """ - - def _invalidate_initialize_callback(self, event): - """Invalidates the scene elements.""" - # call parent - super()._invalidate_initialize_callback(event) - # set all existing views to None to invalidate them - self._root_physx_view = None diff --git a/source/isaaclab_physx/isaaclab_physx/assets/rigid_object_collection/rigid_object_collection_data_old.py b/source/isaaclab_physx/isaaclab_physx/assets/rigid_object_collection/rigid_object_collection_data_old.py deleted file mode 100644 index 5156ef729e4..00000000000 --- a/source/isaaclab_physx/isaaclab_physx/assets/rigid_object_collection/rigid_object_collection_data_old.py +++ /dev/null @@ -1,515 +0,0 @@ -# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). -# All rights reserved. -# -# SPDX-License-Identifier: BSD-3-Clause - -import weakref - -import torch - -import omni.physics.tensors.impl.api as physx - -import isaaclab.utils.math as math_utils -from isaaclab.sim.utils.stage import get_current_stage_id -from isaaclab.utils.buffers import TimestampedBuffer - - -class RigidObjectCollectionData: - """Data container for a rigid object collection. - - This class contains the data for a rigid object collection in the simulation. The data includes the state of - all the bodies in the collection. The data is stored in the simulation world frame unless otherwise specified. - The data is in the order ``(num_instances, num_objects, data_size)``, where data_size is the size of the data. - - For a rigid body, there are two frames of reference that are used: - - - Actor frame: The frame of reference of the rigid body prim. This typically corresponds to the Xform prim - with the rigid body schema. - - Center of mass frame: The frame of reference of the center of mass of the rigid body. - - Depending on the settings of the simulation, the actor frame and the center of mass frame may be the same. - This needs to be taken into account when interpreting the data. - - The data is lazily updated, meaning that the data is only updated when it is accessed. This is useful - when the data is expensive to compute or retrieve. The data is updated when the timestamp of the buffer - is older than the current simulation timestamp. The timestamp is updated whenever the data is updated. - """ - - def __init__(self, root_physx_view: physx.RigidBodyView, num_objects: int, device: str): - """Initializes the data. - - Args: - root_physx_view: The root rigid body view. - num_objects: The number of objects in the collection. - device: The device used for processing. - """ - # Set the parameters - self.device = device - self.num_objects = num_objects - # Set the root rigid body view - # note: this is stored as a weak reference to avoid circular references between the asset class - # and the data container. This is important to avoid memory leaks. - self._root_physx_view: physx.RigidBodyView = weakref.proxy(root_physx_view) - self.num_instances = self._root_physx_view.count // self.num_objects - - # Set initial time stamp - self._sim_timestamp = 0.0 - - # Obtain global physics sim view - stage_id = get_current_stage_id() - physics_sim_view = physx.create_simulation_view("torch", stage_id) - physics_sim_view.set_subspace_roots("/") - gravity = physics_sim_view.get_gravity() - # Convert to direction vector - gravity_dir = torch.tensor((gravity[0], gravity[1], gravity[2]), device=self.device) - gravity_dir = math_utils.normalize(gravity_dir.unsqueeze(0)).squeeze(0) - - # Initialize constants - self.GRAVITY_VEC_W = gravity_dir.repeat(self.num_instances, self.num_objects, 1) - self.FORWARD_VEC_B = torch.tensor((1.0, 0.0, 0.0), device=self.device).repeat( - self.num_instances, self.num_objects, 1 - ) - - # Initialize the lazy buffers. - # -- link frame w.r.t. world frame - self._object_link_pose_w = TimestampedBuffer() - self._object_link_vel_w = TimestampedBuffer() - # -- com frame w.r.t. link frame - self._object_com_pose_b = TimestampedBuffer() - # -- com frame w.r.t. world frame - self._object_com_pose_w = TimestampedBuffer() - self._object_com_vel_w = TimestampedBuffer() - self._object_com_acc_w = TimestampedBuffer() - # -- combined state(these are cached as they concatenate) - self._object_state_w = TimestampedBuffer() - self._object_link_state_w = TimestampedBuffer() - self._object_com_state_w = TimestampedBuffer() - - def update(self, dt: float): - """Updates the data for the rigid object collection. - - Args: - dt: The time step for the update. This must be a positive value. - """ - # update the simulation timestamp - self._sim_timestamp += dt - - ## - # Names. - ## - - object_names: list[str] = None - """Object names in the order parsed by the simulation view.""" - - ## - # Defaults. - ## - - default_object_state: torch.Tensor = None - """Default object state ``[pos, quat, lin_vel, ang_vel]`` in local environment frame. - Shape is (num_instances, num_objects, 13). - - The position and quaternion are of each object's rigid body's actor frame. Meanwhile, the linear and - angular velocities are of the center of mass frame. - """ - - default_mass: torch.Tensor = None - """Default object mass read from the simulation. Shape is (num_instances, num_objects, 1).""" - - default_inertia: torch.Tensor = None - """Default object inertia tensor read from the simulation. Shape is (num_instances, num_objects, 9). - - The inertia tensor should be given with respect to the center of mass, expressed in the rigid body's actor frame. - The values are stored in the order :math:`[I_{xx}, I_{yx}, I_{zx}, I_{xy}, I_{yy}, I_{zy}, I_{xz}, I_{yz}, I_{zz}]`. - However, due to the symmetry of inertia tensors, row- and column-major orders are equivalent. - - This quantity is parsed from the USD schema at the time of initialization. - """ - - ## - # Root state properties. - ## - - @property - def object_link_pose_w(self): - """Object link pose ``[pos, quat]`` in simulation world frame. Shape is (num_instances, num_objects, 7). - - The position and orientation are of the rigid body's actor frame. - """ - if self._object_link_pose_w.timestamp < self._sim_timestamp: - # read data from simulation - pose = self._reshape_view_to_data(self._root_physx_view.get_transforms().clone()) - pose[..., 3:7] = math_utils.convert_quat(pose[..., 3:7], to="wxyz") - # set the buffer data and timestamp - self._object_link_pose_w.data = pose - self._object_link_pose_w.timestamp = self._sim_timestamp - - return self._object_link_pose_w.data - - @property - def object_link_vel_w(self): - """Object link velocity ``[lin_vel, ang_vel]`` in simulation world frame. - Shape is (num_instances, num_objects, 6). - - The linear and angular velocities are of the rigid body's actor frame. - """ - if self._object_link_vel_w.timestamp < self._sim_timestamp: - # read data from simulation - velocity = self.object_com_vel_w.clone() - # adjust linear velocity to link from center of mass - velocity[..., :3] += torch.linalg.cross( - velocity[..., 3:], math_utils.quat_apply(self.object_link_quat_w, -self.object_com_pos_b), dim=-1 - ) - # set the buffer data and timestamp - self._object_link_vel_w.data = velocity - self._object_link_vel_w.timestamp = self._sim_timestamp - - return self._object_link_vel_w.data - - @property - def object_com_pose_w(self): - """Object center of mass pose ``[pos, quat]`` in simulation world frame. - Shape is (num_instances, num_objects, 7). - - The position and orientation are of the rigid body's center of mass frame. - """ - if self._object_com_pose_w.timestamp < self._sim_timestamp: - # adjust pose to center of mass - pos, quat = math_utils.combine_frame_transforms( - self.object_link_pos_w, self.object_link_quat_w, self.object_com_pos_b, self.object_com_quat_b - ) - # set the buffer data and timestamp - self._object_com_pose_w.data = torch.cat((pos, quat), dim=-1) - self._object_com_pose_w.timestamp = self._sim_timestamp - - return self._object_com_pose_w.data - - @property - def object_com_vel_w(self): - """Object center of mass velocity ``[lin_vel, ang_vel]`` in simulation world frame. - Shape is (num_instances, num_objects, 6). - - The linear and angular velocities are of the rigid body's center of mass frame. - """ - if self._object_com_vel_w.timestamp < self._sim_timestamp: - self._object_com_vel_w.data = self._reshape_view_to_data(self._root_physx_view.get_velocities()) - self._object_com_vel_w.timestamp = self._sim_timestamp - - return self._object_com_vel_w.data - - @property - def object_state_w(self): - """Object state ``[pos, quat, lin_vel, ang_vel]`` in simulation world frame. - Shape is (num_instances, num_objects, 13). - - The position and orientation are of the rigid body's actor frame. Meanwhile, the linear and angular - velocities are of the rigid body's center of mass frame. - """ - if self._object_state_w.timestamp < self._sim_timestamp: - self._object_state_w.data = torch.cat((self.object_link_pose_w, self.object_com_vel_w), dim=-1) - self._object_state_w.timestamp = self._sim_timestamp - - return self._object_state_w.data - - @property - def object_link_state_w(self): - """Object center of mass state ``[pos, quat, lin_vel, ang_vel]`` in simulation world frame. - Shape is (num_instances, num_objects, 13). - - The position, quaternion, and linear/angular velocity are of the rigid body root frame relative to the - world. - """ - if self._object_link_state_w.timestamp < self._sim_timestamp: - self._object_link_state_w.data = torch.cat((self.object_link_pose_w, self.object_link_vel_w), dim=-1) - self._object_link_state_w.timestamp = self._sim_timestamp - - return self._object_link_state_w.data - - @property - def object_com_state_w(self): - """Object state ``[pos, quat, lin_vel, ang_vel]`` in simulation world frame. - Shape is (num_instances, num_objects, 13). - - The position, quaternion, and linear/angular velocity are of the rigid body's center of mass frame - relative to the world. Center of mass frame has the orientation along the principle axes of inertia. - """ - if self._object_com_state_w.timestamp < self._sim_timestamp: - self._object_com_state_w.data = torch.cat((self.object_com_pose_w, self.object_com_vel_w), dim=-1) - self._object_com_state_w.timestamp = self._sim_timestamp - - return self._object_com_state_w.data - - @property - def object_com_acc_w(self): - """Acceleration of all objects. Shape is (num_instances, num_objects, 6). - - This quantity is the acceleration of the rigid bodies' center of mass frame. - """ - if self._object_com_acc_w.timestamp < self._sim_timestamp: - self._object_com_acc_w.data = self._reshape_view_to_data(self._root_physx_view.get_accelerations()) - self._object_com_acc_w.timestamp = self._sim_timestamp - return self._object_com_acc_w.data - - @property - def object_com_pose_b(self): - """Object center of mass pose ``[pos, quat]`` in their respective body's link frame. - Shape is (num_instances, num_objects, 7). - - The position and orientation are of the rigid body's center of mass frame. - The orientation is provided in (w, x, y, z) format. - """ - if self._object_com_pose_b.timestamp < self._sim_timestamp: - # obtain the coms - poses = self._root_physx_view.get_coms().to(self.device) - poses[:, 3:7] = math_utils.convert_quat(poses[:, 3:7], to="wxyz") - # read data from simulation - self._object_com_pose_b.data = self._reshape_view_to_data(poses) - self._object_com_pose_b.timestamp = self._sim_timestamp - - return self._object_com_pose_b.data - - ## - # Derived properties. - ## - - @property - def projected_gravity_b(self): - """Projection of the gravity direction on base frame. Shape is (num_instances, num_objects, 3).""" - return math_utils.quat_apply_inverse(self.object_link_quat_w, self.GRAVITY_VEC_W) - - @property - def heading_w(self): - """Yaw heading of the base frame (in radians). Shape is (num_instances, num_objects,). - - Note: - This quantity is computed by assuming that the forward-direction of the base - frame is along x-direction, i.e. :math:`(1, 0, 0)`. - """ - forward_w = math_utils.quat_apply(self.object_link_quat_w, self.FORWARD_VEC_B) - return torch.atan2(forward_w[..., 1], forward_w[..., 0]) - - @property - def object_link_lin_vel_b(self) -> torch.Tensor: - """Object link linear velocity in base frame. Shape is (num_instances, num_objects, 3). - - This quantity is the linear velocity of the actor frame of the root rigid body frame with - respect to the rigid body's actor frame. - """ - return math_utils.quat_apply_inverse(self.object_link_quat_w, self.object_link_lin_vel_w) - - @property - def object_link_ang_vel_b(self) -> torch.Tensor: - """Object link angular velocity in base world frame. Shape is (num_instances, num_objects, 3). - - This quantity is the angular velocity of the actor frame of the root rigid body frame with - respect to the rigid body's actor frame. - """ - return math_utils.quat_apply_inverse(self.object_link_quat_w, self.object_link_ang_vel_w) - - @property - def object_com_lin_vel_b(self) -> torch.Tensor: - """Object center of mass linear velocity in base frame. Shape is (num_instances, num_objects, 3). - - This quantity is the linear velocity of the center of mass frame of the root rigid body frame with - respect to the rigid body's actor frame. - """ - return math_utils.quat_apply_inverse(self.object_link_quat_w, self.object_com_lin_vel_w) - - @property - def object_com_ang_vel_b(self) -> torch.Tensor: - """Object center of mass angular velocity in base world frame. Shape is (num_instances, num_objects, 3). - - This quantity is the angular velocity of the center of mass frame of the root rigid body frame with - respect to the rigid body's actor frame. - """ - return math_utils.quat_apply_inverse(self.object_link_quat_w, self.object_com_ang_vel_w) - - ## - # Sliced properties. - ## - - @property - def object_link_pos_w(self) -> torch.Tensor: - """Object link position in simulation world frame. Shape is (num_instances, num_objects, 3). - - This quantity is the position of the actor frame of the rigid bodies. - """ - return self.object_link_pose_w[..., :3] - - @property - def object_link_quat_w(self) -> torch.Tensor: - """Object link orientation (w, x, y, z) in simulation world frame. Shape is (num_instances, num_objects, 4). - - This quantity is the orientation of the actor frame of the rigid bodies. - """ - return self.object_link_pose_w[..., 3:7] - - @property - def object_link_lin_vel_w(self) -> torch.Tensor: - """Object link linear velocity in simulation world frame. Shape is (num_instances, num_objects, 3). - - This quantity is the linear velocity of the rigid bodies' actor frame. - """ - return self.object_link_vel_w[..., :3] - - @property - def object_link_ang_vel_w(self) -> torch.Tensor: - """Object link angular velocity in simulation world frame. Shape is (num_instances, num_objects, 3). - - This quantity is the angular velocity of the rigid bodies' actor frame. - """ - return self.object_link_vel_w[..., 3:6] - - @property - def object_com_pos_w(self) -> torch.Tensor: - """Object center of mass position in simulation world frame. Shape is (num_instances, num_objects, 3). - - This quantity is the position of the center of mass frame of the rigid bodies. - """ - return self.object_com_pose_w[..., :3] - - @property - def object_com_quat_w(self) -> torch.Tensor: - """Object center of mass orientation (w, x, y, z) in simulation world frame. - Shape is (num_instances, num_objects, 4). - - This quantity is the orientation of the center of mass frame of the rigid bodies. - """ - return self.object_com_pose_w[..., 3:7] - - @property - def object_com_lin_vel_w(self) -> torch.Tensor: - """Object center of mass linear velocity in simulation world frame. Shape is (num_instances, num_objects, 3). - - This quantity is the linear velocity of the rigid bodies' center of mass frame. - """ - return self.object_com_vel_w[..., :3] - - @property - def object_com_ang_vel_w(self) -> torch.Tensor: - """Object center of mass angular velocity in simulation world frame. Shape is (num_instances, num_objects, 3). - - This quantity is the angular velocity of the rigid bodies' center of mass frame. - """ - return self.object_com_vel_w[..., 3:6] - - @property - def object_com_lin_acc_w(self) -> torch.Tensor: - """Object center of mass linear acceleration in simulation world frame. - Shape is (num_instances, num_objects, 3). - - This quantity is the linear acceleration of the rigid bodies' center of mass frame. - """ - return self.object_com_acc_w[..., :3] - - @property - def object_com_ang_acc_w(self) -> torch.Tensor: - """Object center of mass angular acceleration in simulation world frame. - Shape is (num_instances, num_objects, 3). - - This quantity is the angular acceleration of the rigid bodies' center of mass frame. - """ - return self.object_com_acc_w[..., 3:6] - - @property - def object_com_pos_b(self) -> torch.Tensor: - """Center of mass of all of the bodies in their respective body's link frame. - Shape is (num_instances, num_objects, 3). - - This quantity is the center of mass location relative to its body link frame. - """ - return self.object_com_pose_b[..., :3] - - @property - def object_com_quat_b(self) -> torch.Tensor: - """Orientation (w,x,y,z) of the principle axis of inertia of all of the bodies in simulation world frame. - Shape is (num_instances, num_objects, 4). - - This quantity is the orientation of the principles axes of inertia relative to its body link frame. - The orientation is provided in (w, x, y, z) format. - """ - return self.object_com_pose_b[..., 3:7] - - ## - # Properties for backwards compatibility. - ## - - @property - def object_pose_w(self) -> torch.Tensor: - """Same as :attr:`object_link_pose_w`.""" - return self.object_link_pose_w - - @property - def object_pos_w(self) -> torch.Tensor: - """Same as :attr:`object_link_pos_w`.""" - return self.object_link_pos_w - - @property - def object_quat_w(self) -> torch.Tensor: - """Same as :attr:`object_link_quat_w`.""" - return self.object_link_quat_w - - @property - def object_vel_w(self) -> torch.Tensor: - """Same as :attr:`object_com_vel_w`.""" - return self.object_com_vel_w - - @property - def object_lin_vel_w(self) -> torch.Tensor: - """Same as :attr:`object_com_lin_vel_w`.""" - return self.object_com_lin_vel_w - - @property - def object_ang_vel_w(self) -> torch.Tensor: - """Same as :attr:`object_com_ang_vel_w`.""" - return self.object_com_ang_vel_w - - @property - def object_lin_vel_b(self) -> torch.Tensor: - """Same as :attr:`object_com_lin_vel_b`.""" - return self.object_com_lin_vel_b - - @property - def object_ang_vel_b(self) -> torch.Tensor: - """Same as :attr:`object_com_ang_vel_b`.""" - return self.object_com_ang_vel_b - - @property - def object_acc_w(self) -> torch.Tensor: - """Same as :attr:`object_com_acc_w`.""" - return self.object_com_acc_w - - @property - def object_lin_acc_w(self) -> torch.Tensor: - """Same as :attr:`object_com_lin_acc_w`.""" - return self.object_com_lin_acc_w - - @property - def object_ang_acc_w(self) -> torch.Tensor: - """Same as :attr:`object_com_ang_acc_w`.""" - return self.object_com_ang_acc_w - - @property - def com_pos_b(self) -> torch.Tensor: - """Same as :attr:`object_com_pos_b`.""" - return self.object_com_pos_b - - @property - def com_quat_b(self) -> torch.Tensor: - """Same as :attr:`object_com_quat_b`.""" - return self.object_com_quat_b - - ## - # Helpers. - ## - - def _reshape_view_to_data(self, data: torch.Tensor) -> torch.Tensor: - """Reshapes and arranges the data from the physics view to (num_instances, num_objects, data_size). - - Args: - data: The data from the physics view. Shape is (num_instances * num_objects, data_size). - - Returns: - The reshaped data. Shape is (num_objects, num_instances, data_size). - """ - return torch.einsum("ijk -> jik", data.reshape(self.num_objects, self.num_instances, -1)) diff --git a/source/isaaclab_physx/isaaclab_physx/assets/rigid_object_collection/rigid_object_collection_old.py b/source/isaaclab_physx/isaaclab_physx/assets/rigid_object_collection/rigid_object_collection_old.py deleted file mode 100644 index b458b15b403..00000000000 --- a/source/isaaclab_physx/isaaclab_physx/assets/rigid_object_collection/rigid_object_collection_old.py +++ /dev/null @@ -1,783 +0,0 @@ -# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). -# All rights reserved. -# -# SPDX-License-Identifier: BSD-3-Clause - -from __future__ import annotations - -import logging -import re -from collections.abc import Sequence -from typing import TYPE_CHECKING - -import torch -import warp as wp - -import omni.physics.tensors.impl.api as physx -from isaacsim.core.simulation_manager import SimulationManager -from pxr import UsdPhysics - -import isaaclab.sim as sim_utils -import isaaclab.utils.math as math_utils -import isaaclab.utils.string as string_utils -from isaaclab.utils.wrench_composer import WrenchComposer - -from ..asset_base import AssetBase -from .rigid_object_collection_data import RigidObjectCollectionData - -if TYPE_CHECKING: - from .rigid_object_collection_cfg import RigidObjectCollectionCfg - -# import logger -logger = logging.getLogger(__name__) - - -class RigidObjectCollection(AssetBase): - """A rigid object collection class. - - This class represents a collection of rigid objects in the simulation, where the state of the - rigid objects can be accessed and modified using a batched ``(env_ids, object_ids)`` API. - - For each rigid body in the collection, the root prim of the asset must have the `USD RigidBodyAPI`_ - applied to it. This API is used to define the simulation properties of the rigid bodies. On playing the - simulation, the physics engine will automatically register the rigid bodies and create a corresponding - rigid body handle. This handle can be accessed using the :attr:`root_physx_view` attribute. - - Rigid objects in the collection are uniquely identified via the key of the dictionary - :attr:`~isaaclab.assets.RigidObjectCollectionCfg.rigid_objects` in the - :class:`~isaaclab.assets.RigidObjectCollectionCfg` configuration class. - This differs from the :class:`~isaaclab.assets.RigidObject` class, where a rigid object is identified by - the name of the Xform where the `USD RigidBodyAPI`_ is applied. This would not be possible for the rigid - object collection since the :attr:`~isaaclab.assets.RigidObjectCollectionCfg.rigid_objects` dictionary - could contain the same rigid object multiple times, leading to ambiguity. - - .. _`USD RigidBodyAPI`: https://openusd.org/dev/api/class_usd_physics_rigid_body_a_p_i.html - """ - - cfg: RigidObjectCollectionCfg - """Configuration instance for the rigid object collection.""" - - def __init__(self, cfg: RigidObjectCollectionCfg): - """Initialize the rigid object collection. - - Args: - cfg: A configuration instance. - """ - # Note: We never call the parent constructor as it tries to call its own spawning which we don't want. - # check that the config is valid - cfg.validate() - # store inputs - self.cfg = cfg.copy() - # flag for whether the asset is initialized - self._is_initialized = False - self._prim_paths = [] - # spawn the rigid objects - for rigid_object_cfg in self.cfg.rigid_objects.values(): - # check if the rigid object path is valid - # note: currently the spawner does not work if there is a regex pattern in the leaf - # For example, if the prim path is "/World/Object_[1,2]" since the spawner will not - # know which prim to spawn. This is a limitation of the spawner and not the asset. - asset_path = rigid_object_cfg.prim_path.split("/")[-1] - asset_path_is_regex = re.match(r"^[a-zA-Z0-9/_]+$", asset_path) is None - # spawn the asset - if rigid_object_cfg.spawn is not None and not asset_path_is_regex: - rigid_object_cfg.spawn.func( - rigid_object_cfg.prim_path, - rigid_object_cfg.spawn, - translation=rigid_object_cfg.init_state.pos, - orientation=rigid_object_cfg.init_state.rot, - ) - # check that spawn was successful - matching_prims = sim_utils.find_matching_prims(rigid_object_cfg.prim_path) - if len(matching_prims) == 0: - raise RuntimeError(f"Could not find prim with path {rigid_object_cfg.prim_path}.") - self._prim_paths.append(rigid_object_cfg.prim_path) - # stores object names - self._object_names_list = [] - - # register various callback functions - self._register_callbacks() - self._debug_vis_handle = None - - """ - Properties - """ - - @property - def data(self) -> RigidObjectCollectionData: - return self._data - - @property - def num_instances(self) -> int: - """Number of instances of the collection.""" - return self.root_physx_view.count // self.num_objects - - @property - def num_objects(self) -> int: - """Number of objects in the collection. - - This corresponds to the distinct number of rigid bodies in the collection. - """ - return len(self.object_names) - - @property - def object_names(self) -> list[str]: - """Ordered names of objects in the rigid object collection.""" - return self._object_names_list - - @property - def root_physx_view(self) -> physx.RigidBodyView: - """Rigid body view for the rigid body collection (PhysX). - - Note: - Use this view with caution. It requires handling of tensors in a specific way. - """ - return self._root_physx_view # type: ignore - - @property - def instantaneous_wrench_composer(self) -> WrenchComposer: - """Instantaneous wrench composer. - - Returns a :class:`~isaaclab.utils.wrench_composer.WrenchComposer` instance. Wrenches added or set to this wrench - composer are only valid for the current simulation step. At the end of the simulation step, the wrenches set - to this object are discarded. This is useful to apply forces that change all the time, things like drag forces - for instance. - """ - return self._instantaneous_wrench_composer - - @property - def permanent_wrench_composer(self) -> WrenchComposer: - """Permanent wrench composer. - - Returns a :class:`~isaaclab.utils.wrench_composer.WrenchComposer` instance. Wrenches added or set to this wrench - composer are persistent and are applied to the simulation at every step. This is useful to apply forces that - are constant over a period of time, things like the thrust of a motor for instance. - """ - return self._permanent_wrench_composer - - """ - Operations. - """ - - def reset(self, env_ids: torch.Tensor | None = None, object_ids: slice | torch.Tensor | None = None): - """Resets all internal buffers of selected environments and objects. - - Args: - env_ids: The indices of the object to reset. Defaults to None (all instances). - object_ids: The indices of the object to reset. Defaults to None (all objects). - """ - # resolve all indices - if env_ids is None: - env_ids = self._ALL_ENV_INDICES - if object_ids is None: - object_ids = self._ALL_OBJ_INDICES - # reset external wrench - self._instantaneous_wrench_composer.reset(env_ids) - self._permanent_wrench_composer.reset(env_ids) - - def write_data_to_sim(self): - """Write external wrench to the simulation. - - Note: - We write external wrench to the simulation here since this function is called before the simulation step. - This ensures that the external wrench is applied at every simulation step. - """ - # write external wrench - if self._instantaneous_wrench_composer.active or self._permanent_wrench_composer.active: - if self._instantaneous_wrench_composer.active: - # Compose instantaneous wrench with permanent wrench - self._instantaneous_wrench_composer.add_forces_and_torques( - forces=self._permanent_wrench_composer.composed_force, - torques=self._permanent_wrench_composer.composed_torque, - body_ids=self._ALL_OBJ_INDICES_WP, - env_ids=self._ALL_ENV_INDICES_WP, - ) - # Apply both instantaneous and permanent wrench to the simulation - self.root_physx_view.apply_forces_and_torques_at_position( - force_data=self.reshape_data_to_view(self._instantaneous_wrench_composer.composed_force_as_torch), - torque_data=self.reshape_data_to_view(self._instantaneous_wrench_composer.composed_torque_as_torch), - position_data=None, - indices=self._env_obj_ids_to_view_ids(self._ALL_ENV_INDICES, self._ALL_OBJ_INDICES), - is_global=False, - ) - else: - # Apply permanent wrench to the simulation - self.root_physx_view.apply_forces_and_torques_at_position( - force_data=self.reshape_data_to_view(self._permanent_wrench_composer.composed_force_as_torch), - torque_data=self.reshape_data_to_view(self._permanent_wrench_composer.composed_torque_as_torch), - position_data=None, - indices=self._env_obj_ids_to_view_ids(self._ALL_ENV_INDICES, self._ALL_OBJ_INDICES), - is_global=False, - ) - self._instantaneous_wrench_composer.reset() - - def update(self, dt: float): - self._data.update(dt) - - """ - Operations - Finders. - """ - - def find_objects( - self, name_keys: str | Sequence[str], preserve_order: bool = False - ) -> tuple[torch.Tensor, list[str]]: - """Find objects in the collection based on the name keys. - - Please check the :meth:`isaaclab.utils.string_utils.resolve_matching_names` function for more - information on the name matching. - - Args: - name_keys: A regular expression or a list of regular expressions to match the object names. - preserve_order: Whether to preserve the order of the name keys in the output. Defaults to False. - - Returns: - A tuple containing the object indices and names. - """ - obj_ids, obj_names = string_utils.resolve_matching_names(name_keys, self.object_names, preserve_order) - return torch.tensor(obj_ids, device=self.device), obj_names - - """ - Operations - Write to simulation. - """ - - def write_object_state_to_sim( - self, - object_state: torch.Tensor, - env_ids: torch.Tensor | None = None, - object_ids: slice | torch.Tensor | None = None, - ): - """Set the object state over selected environment and object indices into the simulation. - - The object state comprises of the cartesian position, quaternion orientation in (w, x, y, z), and linear - and angular velocity. All the quantities are in the simulation frame. - - Args: - object_state: Object state in simulation frame. Shape is (len(env_ids), len(object_ids), 13). - env_ids: Environment indices. If None, then all indices are used. - object_ids: Object indices. If None, then all indices are used. - """ - self.write_object_link_pose_to_sim(object_state[..., :7], env_ids=env_ids, object_ids=object_ids) - self.write_object_com_velocity_to_sim(object_state[..., 7:], env_ids=env_ids, object_ids=object_ids) - - def write_object_com_state_to_sim( - self, - object_state: torch.Tensor, - env_ids: torch.Tensor | None = None, - object_ids: slice | torch.Tensor | None = None, - ): - """Set the object center of mass state over selected environment indices into the simulation. - - The object state comprises of the cartesian position, quaternion orientation in (w, x, y, z), and linear - and angular velocity. All the quantities are in the simulation frame. - - Args: - object_state: Object state in simulation frame. Shape is (len(env_ids), len(object_ids), 13). - env_ids: Environment indices. If None, then all indices are used. - object_ids: Object indices. If None, then all indices are used. - """ - self.write_object_com_pose_to_sim(object_state[..., :7], env_ids=env_ids, object_ids=object_ids) - self.write_object_com_velocity_to_sim(object_state[..., 7:], env_ids=env_ids, object_ids=object_ids) - - def write_object_link_state_to_sim( - self, - object_state: torch.Tensor, - env_ids: torch.Tensor | None = None, - object_ids: slice | torch.Tensor | None = None, - ): - """Set the object link state over selected environment indices into the simulation. - - The object state comprises of the cartesian position, quaternion orientation in (w, x, y, z), and linear - and angular velocity. All the quantities are in the simulation frame. - - Args: - object_state: Object state in simulation frame. Shape is (len(env_ids), len(object_ids), 13). - env_ids: Environment indices. If None, then all indices are used. - object_ids: Object indices. If None, then all indices are used. - """ - self.write_object_link_pose_to_sim(object_state[..., :7], env_ids=env_ids, object_ids=object_ids) - self.write_object_link_velocity_to_sim(object_state[..., 7:], env_ids=env_ids, object_ids=object_ids) - - def write_object_pose_to_sim( - self, - object_pose: torch.Tensor, - env_ids: torch.Tensor | None = None, - object_ids: slice | torch.Tensor | None = None, - ): - """Set the object pose over selected environment and object indices into the simulation. - - The object pose comprises of the cartesian position and quaternion orientation in (w, x, y, z). - - Args: - object_pose: Object poses in simulation frame. Shape is (len(env_ids), len(object_ids), 7). - env_ids: Environment indices. If None, then all indices are used. - object_ids: Object indices. If None, then all indices are used. - """ - self.write_object_link_pose_to_sim(object_pose, env_ids=env_ids, object_ids=object_ids) - - def write_object_link_pose_to_sim( - self, - object_pose: torch.Tensor, - env_ids: torch.Tensor | None = None, - object_ids: slice | torch.Tensor | None = None, - ): - """Set the object pose over selected environment and object indices into the simulation. - - The object pose comprises of the cartesian position and quaternion orientation in (w, x, y, z). - - Args: - object_pose: Object poses in simulation frame. Shape is (len(env_ids), len(object_ids), 7). - env_ids: Environment indices. If None, then all indices are used. - object_ids: Object indices. If None, then all indices are used. - """ - # resolve all indices - # -- env_ids - if env_ids is None: - env_ids = self._ALL_ENV_INDICES - # -- object_ids - if object_ids is None: - object_ids = self._ALL_OBJ_INDICES - - # note: we need to do this here since tensors are not set into simulation until step. - # set into internal buffers - self._data.object_link_pose_w[env_ids[:, None], object_ids] = object_pose.clone() - # update these buffers only if the user is using them. Otherwise this adds to overhead. - if self._data._object_link_state_w.data is not None: - self._data.object_link_state_w[env_ids[:, None], object_ids, :7] = object_pose.clone() - if self._data._object_state_w.data is not None: - self._data.object_state_w[env_ids[:, None], object_ids, :7] = object_pose.clone() - if self._data._object_com_state_w.data is not None: - # get CoM pose in link frame - com_pos_b = self.data.object_com_pos_b[env_ids[:, None], object_ids] - com_quat_b = self.data.object_com_quat_b[env_ids[:, None], object_ids] - com_pos, com_quat = math_utils.combine_frame_transforms( - object_pose[..., :3], - object_pose[..., 3:7], - com_pos_b, - com_quat_b, - ) - self._data.object_com_state_w[env_ids[:, None], object_ids, :3] = com_pos - self._data.object_com_state_w[env_ids[:, None], object_ids, 3:7] = com_quat - - # convert the quaternion from wxyz to xyzw - poses_xyzw = self._data.object_link_pose_w.clone() - poses_xyzw[..., 3:] = math_utils.convert_quat(poses_xyzw[..., 3:], to="xyzw") - - # set into simulation - view_ids = self._env_obj_ids_to_view_ids(env_ids, object_ids) - self.root_physx_view.set_transforms(self.reshape_data_to_view(poses_xyzw), indices=view_ids) - - def write_object_com_pose_to_sim( - self, - object_pose: torch.Tensor, - env_ids: torch.Tensor | None = None, - object_ids: slice | torch.Tensor | None = None, - ): - """Set the object center of mass pose over selected environment indices into the simulation. - - The object pose comprises of the cartesian position and quaternion orientation in (w, x, y, z). - The orientation is the orientation of the principle axes of inertia. - - Args: - object_pose: Object poses in simulation frame. Shape is (len(env_ids), len(object_ids), 7). - env_ids: Environment indices. If None, then all indices are used. - object_ids: Object indices. If None, then all indices are used. - """ - # resolve all indices - # -- env_ids - if env_ids is None: - env_ids = self._ALL_ENV_INDICES - # -- object_ids - if object_ids is None: - object_ids = self._ALL_OBJ_INDICES - - # set into internal buffers - self._data.object_com_pose_w[env_ids[:, None], object_ids] = object_pose.clone() - # update these buffers only if the user is using them. Otherwise this adds to overhead. - if self._data._object_com_state_w.data is not None: - self._data.object_com_state_w[env_ids[:, None], object_ids, :7] = object_pose.clone() - - # get CoM pose in link frame - com_pos_b = self.data.object_com_pos_b[env_ids[:, None], object_ids] - com_quat_b = self.data.object_com_quat_b[env_ids[:, None], object_ids] - # transform input CoM pose to link frame - object_link_pos, object_link_quat = math_utils.combine_frame_transforms( - object_pose[..., :3], - object_pose[..., 3:7], - math_utils.quat_apply(math_utils.quat_inv(com_quat_b), -com_pos_b), - math_utils.quat_inv(com_quat_b), - ) - - # write transformed pose in link frame to sim - object_link_pose = torch.cat((object_link_pos, object_link_quat), dim=-1) - self.write_object_link_pose_to_sim(object_link_pose, env_ids=env_ids, object_ids=object_ids) - - def write_object_velocity_to_sim( - self, - object_velocity: torch.Tensor, - env_ids: torch.Tensor | None = None, - object_ids: slice | torch.Tensor | None = None, - ): - """Set the object velocity over selected environment and object indices into the simulation. - - Args: - object_velocity: Object velocities in simulation frame. Shape is (len(env_ids), len(object_ids), 6). - env_ids: Environment indices. If None, then all indices are used. - object_ids: Object indices. If None, then all indices are used. - """ - self.write_object_com_velocity_to_sim(object_velocity, env_ids=env_ids, object_ids=object_ids) - - def write_object_com_velocity_to_sim( - self, - object_velocity: torch.Tensor, - env_ids: torch.Tensor | None = None, - object_ids: slice | torch.Tensor | None = None, - ): - """Set the object center of mass velocity over selected environment and object indices into the simulation. - - Args: - object_velocity: Object velocities in simulation frame. Shape is (len(env_ids), len(object_ids), 6). - env_ids: Environment indices. If None, then all indices are used. - object_ids: Object indices. If None, then all indices are used. - """ - # resolve all indices - # -- env_ids - if env_ids is None: - env_ids = self._ALL_ENV_INDICES - # -- object_ids - if object_ids is None: - object_ids = self._ALL_OBJ_INDICES - - # note: we need to do this here since tensors are not set into simulation until step. - # set into internal buffers - self._data.object_com_vel_w[env_ids[:, None], object_ids] = object_velocity.clone() - # update these buffers only if the user is using them. Otherwise this adds to overhead. - if self._data._object_com_state_w.data is not None: - self._data.object_com_state_w[env_ids[:, None], object_ids, 7:] = object_velocity.clone() - if self._data._object_state_w.data is not None: - self._data.object_state_w[env_ids[:, None], object_ids, 7:] = object_velocity.clone() - if self._data._object_link_state_w.data is not None: - self._data.object_link_state_w[env_ids[:, None], object_ids, 7:] = object_velocity.clone() - # make the acceleration zero to prevent reporting old values - self._data.object_com_acc_w[env_ids[:, None], object_ids] = 0.0 - - # set into simulation - view_ids = self._env_obj_ids_to_view_ids(env_ids, object_ids) - self.root_physx_view.set_velocities(self.reshape_data_to_view(self._data.object_com_vel_w), indices=view_ids) - - def write_object_link_velocity_to_sim( - self, - object_velocity: torch.Tensor, - env_ids: torch.Tensor | None = None, - object_ids: slice | torch.Tensor | None = None, - ): - """Set the object link velocity over selected environment indices into the simulation. - - The velocity comprises linear velocity (x, y, z) and angular velocity (x, y, z) in that order. - NOTE: This sets the velocity of the object's frame rather than the objects center of mass. - - Args: - object_velocity: Object velocities in simulation frame. Shape is (len(env_ids), len(object_ids), 6). - env_ids: Environment indices. If None, then all indices are used. - object_ids: Object indices. If None, then all indices are used. - """ - # resolve all indices - # -- env_ids - if env_ids is None: - env_ids = self._ALL_ENV_INDICES - # -- object_ids - if object_ids is None: - object_ids = self._ALL_OBJ_INDICES - - # set into internal buffers - self._data.object_link_vel_w[env_ids[:, None], object_ids] = object_velocity.clone() - # update these buffers only if the user is using them. Otherwise this adds to overhead. - if self._data._object_link_state_w.data is not None: - self._data.object_link_state_w[env_ids[:, None], object_ids, 7:] = object_velocity.clone() - - # get CoM pose in link frame - quat = self.data.object_link_quat_w[env_ids[:, None], object_ids] - com_pos_b = self.data.object_com_pos_b[env_ids[:, None], object_ids] - # transform input velocity to center of mass frame - object_com_velocity = object_velocity.clone() - object_com_velocity[..., :3] += torch.linalg.cross( - object_com_velocity[..., 3:], math_utils.quat_apply(quat, com_pos_b), dim=-1 - ) - - # write center of mass velocity to sim - self.write_object_com_velocity_to_sim(object_com_velocity, env_ids=env_ids, object_ids=object_ids) - - """ - Operations - Setters. - """ - - def set_external_force_and_torque( - self, - forces: torch.Tensor, - torques: torch.Tensor, - positions: torch.Tensor | None = None, - object_ids: slice | torch.Tensor | None = None, - env_ids: torch.Tensor | None = None, - is_global: bool = False, - ): - """Set external force and torque to apply on the objects' bodies in their local frame. - - For many applications, we want to keep the applied external force on rigid bodies constant over a period of - time (for instance, during the policy control). This function allows us to store the external force and torque - into buffers which are then applied to the simulation at every step. - - .. caution:: - If the function is called with empty forces and torques, then this function disables the application - of external wrench to the simulation. - - .. code-block:: python - - # example of disabling external wrench - asset.set_external_force_and_torque(forces=torch.zeros(0, 0, 3), torques=torch.zeros(0, 0, 3)) - - .. note:: - This function does not apply the external wrench to the simulation. It only fills the buffers with - the desired values. To apply the external wrench, call the :meth:`write_data_to_sim` function - right before the simulation step. - - Args: - forces: External forces in bodies' local frame. Shape is (len(env_ids), len(object_ids), 3). - torques: External torques in bodies' local frame. Shape is (len(env_ids), len(object_ids), 3). - positions: External wrench positions in bodies' local frame. Shape is (len(env_ids), len(object_ids), 3). - object_ids: Object indices to apply external wrench to. Defaults to None (all objects). - env_ids: Environment indices to apply external wrench to. Defaults to None (all instances). - is_global: Whether to apply the external wrench in the global frame. Defaults to False. If set to False, - the external wrench is applied in the link frame of the bodies. - """ - logger.warning( - "The function 'set_external_force_and_torque' will be deprecated in a future release. Please" - " use 'permanent_wrench_composer.set_forces_and_torques' instead." - ) - - if forces is None and torques is None: - logger.warning("No forces or torques provided. No permanent external wrench will be applied.") - - # resolve all indices - # -- env_ids - if env_ids is None: - env_ids = self._ALL_ENV_INDICES_WP - elif not isinstance(env_ids, torch.Tensor): - env_ids = wp.array(env_ids, dtype=wp.int32, device=self.device) - else: - env_ids = wp.from_torch(env_ids.to(torch.int32), dtype=wp.int32) - # -- object_ids - if object_ids is None: - object_ids = self._ALL_OBJ_INDICES_WP - elif isinstance(object_ids, slice): - object_ids = wp.from_torch( - torch.arange(self.num_objects, dtype=torch.int32, device=self.device)[object_ids], dtype=wp.int32 - ) - elif not isinstance(object_ids, torch.Tensor): - object_ids = wp.array(object_ids, dtype=wp.int32, device=self.device) - else: - object_ids = wp.from_torch(object_ids.to(torch.int32), dtype=wp.int32) - - # Write to wrench composer - self._permanent_wrench_composer.set_forces_and_torques( - forces=wp.from_torch(forces, dtype=wp.vec3f) if forces is not None else None, - torques=wp.from_torch(torques, dtype=wp.vec3f) if torques is not None else None, - positions=wp.from_torch(positions, dtype=wp.vec3f) if positions is not None else None, - body_ids=object_ids, - env_ids=env_ids, - is_global=is_global, - ) - - """ - Helper functions. - """ - - def reshape_view_to_data(self, data: torch.Tensor) -> torch.Tensor: - """Reshapes and arranges the data coming from the :attr:`root_physx_view` to - (num_instances, num_objects, data_dim). - - Args: - data: The data coming from the :attr:`root_physx_view`. Shape is (num_instances * num_objects, data_dim). - - Returns: - The reshaped data. Shape is (num_instances, num_objects, data_dim). - """ - return torch.einsum("ijk -> jik", data.reshape(self.num_objects, self.num_instances, -1)) - - def reshape_data_to_view(self, data: torch.Tensor) -> torch.Tensor: - """Reshapes and arranges the data to the be consistent with data from the :attr:`root_physx_view`. - - Args: - data: The data to be reshaped. Shape is (num_instances, num_objects, data_dim). - - Returns: - The reshaped data. Shape is (num_instances * num_objects, data_dim). - """ - return torch.einsum("ijk -> jik", data).reshape(self.num_objects * self.num_instances, *data.shape[2:]) - - """ - Internal helper. - """ - - def _initialize_impl(self): - # clear object names list to prevent double counting on re-initialization - self._object_names_list.clear() - # obtain global simulation view - self._physics_sim_view = SimulationManager.get_physics_sim_view() - root_prim_path_exprs = [] - for name, rigid_object_cfg in self.cfg.rigid_objects.items(): - # obtain the first prim in the regex expression (all others are assumed to be a copy of this) - template_prim = sim_utils.find_first_matching_prim(rigid_object_cfg.prim_path) - if template_prim is None: - raise RuntimeError(f"Failed to find prim for expression: '{rigid_object_cfg.prim_path}'.") - template_prim_path = template_prim.GetPath().pathString - - # find rigid root prims - root_prims = sim_utils.get_all_matching_child_prims( - template_prim_path, - predicate=lambda prim: prim.HasAPI(UsdPhysics.RigidBodyAPI), - traverse_instance_prims=False, - ) - if len(root_prims) == 0: - raise RuntimeError( - f"Failed to find a rigid body when resolving '{rigid_object_cfg.prim_path}'." - " Please ensure that the prim has 'USD RigidBodyAPI' applied." - ) - if len(root_prims) > 1: - raise RuntimeError( - f"Failed to find a single rigid body when resolving '{rigid_object_cfg.prim_path}'." - f" Found multiple '{root_prims}' under '{template_prim_path}'." - " Please ensure that there is only one rigid body in the prim path tree." - ) - - # check that no rigid object has an articulation root API, which decreases simulation performance - articulation_prims = sim_utils.get_all_matching_child_prims( - template_prim_path, - predicate=lambda prim: prim.HasAPI(UsdPhysics.ArticulationRootAPI), - traverse_instance_prims=False, - ) - if len(articulation_prims) != 0: - if articulation_prims[0].GetAttribute("physxArticulation:articulationEnabled").Get(): - raise RuntimeError( - f"Found an articulation root when resolving '{rigid_object_cfg.prim_path}' in the rigid object" - f" collection. These are located at: '{articulation_prims}' under '{template_prim_path}'." - " Please disable the articulation root in the USD or from code by setting the parameter" - " 'ArticulationRootPropertiesCfg.articulation_enabled' to False in the spawn configuration." - ) - - # resolve root prim back into regex expression - root_prim_path = root_prims[0].GetPath().pathString - root_prim_path_expr = rigid_object_cfg.prim_path + root_prim_path[len(template_prim_path) :] - root_prim_path_exprs.append(root_prim_path_expr.replace(".*", "*")) - - self._object_names_list.append(name) - - # -- object view - self._root_physx_view = self._physics_sim_view.create_rigid_body_view(root_prim_path_exprs) - - # check if the rigid body was created - if self._root_physx_view._backend is None: - raise RuntimeError("Failed to create rigid body collection. Please check PhysX logs.") - - # log information about the rigid body - logger.info(f"Number of instances: {self.num_instances}") - logger.info(f"Number of distinct objects: {self.num_objects}") - logger.info(f"Object names: {self.object_names}") - - # container for data access - self._data = RigidObjectCollectionData(self.root_physx_view, self.num_objects, self.device) - - # create buffers - self._create_buffers() - # process configuration - self._process_cfg() - # update the rigid body data - self.update(0.0) - - def _create_buffers(self): - """Create buffers for storing data.""" - # constants - self._ALL_ENV_INDICES = torch.arange(self.num_instances, dtype=torch.long, device=self.device) - self._ALL_OBJ_INDICES = torch.arange(self.num_objects, dtype=torch.long, device=self.device) - self._ALL_ENV_INDICES_WP = wp.from_torch(self._ALL_ENV_INDICES.to(torch.int32), dtype=wp.int32) - self._ALL_OBJ_INDICES_WP = wp.from_torch(self._ALL_OBJ_INDICES.to(torch.int32), dtype=wp.int32) - - # external wrench composer - self._instantaneous_wrench_composer = WrenchComposer(self) - self._permanent_wrench_composer = WrenchComposer(self) - - # set information about rigid body into data - self._data.object_names = self.object_names - self._data.default_mass = self.reshape_view_to_data(self.root_physx_view.get_masses().clone()) - self._data.default_inertia = self.reshape_view_to_data(self.root_physx_view.get_inertias().clone()) - - def _process_cfg(self): - """Post processing of configuration parameters.""" - # default state - # -- object state - default_object_states = [] - for rigid_object_cfg in self.cfg.rigid_objects.values(): - default_object_state = ( - tuple(rigid_object_cfg.init_state.pos) - + tuple(rigid_object_cfg.init_state.rot) - + tuple(rigid_object_cfg.init_state.lin_vel) - + tuple(rigid_object_cfg.init_state.ang_vel) - ) - default_object_state = ( - torch.tensor(default_object_state, dtype=torch.float, device=self.device) - .repeat(self.num_instances, 1) - .unsqueeze(1) - ) - default_object_states.append(default_object_state) - # concatenate the default state for each object - default_object_states = torch.cat(default_object_states, dim=1) - self._data.default_object_state = default_object_states - - def _env_obj_ids_to_view_ids( - self, env_ids: torch.Tensor, object_ids: Sequence[int] | slice | torch.Tensor - ) -> torch.Tensor: - """Converts environment and object indices to indices consistent with data from :attr:`root_physx_view`. - - Args: - env_ids: Environment indices. - object_ids: Object indices. - - Returns: - The view indices. - """ - # the order is env_0/object_0, env_0/object_1, env_0/object_..., env_1/object_0, env_1/object_1, ... - # return a flat tensor of indices - if isinstance(object_ids, slice): - object_ids = self._ALL_OBJ_INDICES - elif isinstance(object_ids, Sequence): - object_ids = torch.tensor(object_ids, device=self.device) - return (object_ids.unsqueeze(1) * self.num_instances + env_ids).flatten() - - """ - Internal simulation callbacks. - """ - - def _invalidate_initialize_callback(self, event): - """Invalidates the scene elements.""" - # call parent - super()._invalidate_initialize_callback(event) - # set all existing views to None to invalidate them - self._root_physx_view = None - - def _on_prim_deletion(self, prim_path: str) -> None: - """Invalidates and deletes the callbacks when the prim is deleted. - - Args: - prim_path: The path to the prim that is being deleted. - - Note: - This function is called when the prim is deleted. - """ - if prim_path == "/": - self._clear_callbacks() - return - for prim_path_expr in self._prim_paths: - result = re.match( - pattern="^" + "/".join(prim_path_expr.split("/")[: prim_path.count("/") + 1]) + "$", string=prim_path - ) - if result: - self._clear_callbacks() - return From 44160c86ed886416a5434da630dd558bf649dd4f Mon Sep 17 00:00:00 2001 From: Antoine Richard Date: Tue, 27 Jan 2026 16:43:06 +0100 Subject: [PATCH 04/38] added thin wrapper to allow usage of object_X mehtods on RigidObjectCollection --- .../rigid_object_collection.py | 181 ++++++++++++++++-- 1 file changed, 161 insertions(+), 20 deletions(-) diff --git a/source/isaaclab_physx/isaaclab_physx/assets/rigid_object_collection/rigid_object_collection.py b/source/isaaclab_physx/isaaclab_physx/assets/rigid_object_collection/rigid_object_collection.py index 908ca382f23..edc68661667 100644 --- a/source/isaaclab_physx/isaaclab_physx/assets/rigid_object_collection/rigid_object_collection.py +++ b/source/isaaclab_physx/isaaclab_physx/assets/rigid_object_collection/rigid_object_collection.py @@ -147,7 +147,7 @@ def permanent_wrench_composer(self) -> WrenchComposer: Operations. """ - def reset(self, env_ids: Sequence[int] | None = None, object_ids: slice | torch.Tensor | None = None) -> None: + def reset(self, env_ids: torch.Tensor | None = None, object_ids: slice | torch.Tensor | None = None) -> None: """Resets all internal buffers of selected environments and objects. Args: @@ -236,7 +236,7 @@ def find_bodies( def write_body_state_to_sim( self, body_states: torch.Tensor, - env_ids: Sequence[int] | None = None, + env_ids: torch.Tensor | None = None, body_ids: slice | torch.Tensor | None = None, ) -> None: """Set the bodies state over selected environment indices into the simulation. @@ -255,7 +255,7 @@ def write_body_state_to_sim( def write_body_com_state_to_sim( self, body_states: torch.Tensor, - env_ids: Sequence[int] | None = None, + env_ids: torch.Tensor | None = None, body_ids: slice | torch.Tensor | None = None, ) -> None: """Set the body center of mass state over selected environment and body indices into the simulation. @@ -274,7 +274,7 @@ def write_body_com_state_to_sim( def write_body_link_state_to_sim( self, body_states: torch.Tensor, - env_ids: Sequence[int] | None = None, + env_ids: torch.Tensor | None = None, body_ids: slice | torch.Tensor | None = None, ) -> None: """Set the body link state over selected environment and body indices into the simulation. @@ -293,7 +293,7 @@ def write_body_link_state_to_sim( def write_body_pose_to_sim( self, body_poses: torch.Tensor, - env_ids: Sequence[int] | None = None, + env_ids: torch.Tensor | None = None, body_ids: slice | torch.Tensor | None = None, ) -> None: """Set the body poses over selected environment and body indices into the simulation. @@ -310,7 +310,7 @@ def write_body_pose_to_sim( def write_body_link_pose_to_sim( self, body_poses: torch.Tensor, - env_ids: Sequence[int] | None = None, + env_ids: torch.Tensor | None = None, body_ids: slice | torch.Tensor | None = None, ) -> None: """Set the body link pose over selected environment and body indices into the simulation. @@ -362,7 +362,7 @@ def write_body_link_pose_to_sim( def write_body_com_pose_to_sim( self, body_poses: torch.Tensor, - env_ids: Sequence[int] | None = None, + env_ids: torch.Tensor | None = None, body_ids: slice | torch.Tensor | None = None, ) -> None: """Set the body center of mass pose over selected environment and body indices into the simulation. @@ -407,7 +407,7 @@ def write_body_com_pose_to_sim( def write_body_velocity_to_sim( self, body_velocities: torch.Tensor, - env_ids: Sequence[int] | None = None, + env_ids: torch.Tensor | None = None, body_ids: slice | torch.Tensor | None = None, ) -> None: """Set the body velocity over selected environment and body indices into the simulation. @@ -425,7 +425,7 @@ def write_body_velocity_to_sim( def write_body_com_velocity_to_sim( self, body_velocities: torch.Tensor, - env_ids: Sequence[int] | None = None, + env_ids: torch.Tensor | None = None, body_ids: slice | torch.Tensor | None = None, ) -> None: """Set the body center of mass velocity over selected environment and body indices into the simulation. @@ -466,7 +466,7 @@ def write_body_com_velocity_to_sim( def write_body_link_velocity_to_sim( self, body_velocities: torch.Tensor, - env_ids: Sequence[int] | None = None, + env_ids: torch.Tensor | None = None, body_ids: slice | torch.Tensor | None = None, ) -> None: """Set the body link velocity over selected environment and body indices into the simulation. @@ -512,8 +512,8 @@ def write_body_link_velocity_to_sim( def set_masses( self, masses: torch.Tensor, - body_ids: Sequence[int] | None = None, - env_ids: Sequence[int] | None = None, + body_ids: torch.Tensor | None = None, + env_ids: torch.Tensor | None = None, ): """Set masses of all bodies. @@ -526,8 +526,8 @@ def set_masses( def set_coms( self, coms: torch.Tensor, - body_ids: Sequence[int] | None = None, - env_ids: Sequence[int] | None = None, + body_ids: torch.Tensor | None = None, + env_ids: torch.Tensor | None = None, ): """Set center of mass positions of all bodies. @@ -540,8 +540,8 @@ def set_coms( def set_inertias( self, inertias: torch.Tensor, - body_ids: Sequence[int] | None = None, - env_ids: Sequence[int] | None = None, + body_ids: torch.Tensor | None = None, + env_ids: torch.Tensor | None = None, ): """Set inertias of all bodies. @@ -556,8 +556,8 @@ def set_external_force_and_torque( forces: torch.Tensor, torques: torch.Tensor, positions: torch.Tensor | None = None, - body_ids: Sequence[int] | slice | None = None, - env_ids: Sequence[int] | None = None, + body_ids: torch.Tensor | slice | None = None, + env_ids: torch.Tensor | None = None, is_global: bool = False, ) -> None: """Set external force and torque to apply on the rigid object collection's bodies in their local frame. @@ -794,7 +794,7 @@ def _process_cfg(self) -> None: self.data.default_body_vel = default_body_vels def _env_body_ids_to_view_ids( - self, env_ids: torch.Tensor, body_ids: Sequence[int] | slice | torch.Tensor + self, env_ids: torch.Tensor, body_ids: torch.Tensor | slice | torch.Tensor ) -> torch.Tensor: """Converts environment and body indices to indices consistent with data from :attr:`root_physx_view`. @@ -842,4 +842,145 @@ def _on_prim_deletion(self, prim_path: str) -> None: ) if result: self._clear_callbacks() - return \ No newline at end of file + return + + @property + def num_objects(self) -> int: + """Deprecated property. Please use :attr:`num_bodies` instead.""" + logger.warning( + "The `num_objects` property will be deprecated in a future release. Please use `num_bodies` instead." + ) + return self.num_bodies + + @property + def object_names(self) -> list[str]: + """Deprecated property. Please use :attr:`body_names` instead.""" + logger.warning( + "The `object_names` property will be deprecated in a future release. Please use `body_names` instead." + ) + return self.body_names + + @property + def root_physx_view(self) -> physx.RigidBodyView: + """Deprecated property. Please use :attr:`root_view` instead.""" + logger.warning( + "The `root_physx_view` property will be deprecated in a future release. Please use `root_view` instead." + ) + return self.root_view + + def write_object_state_to_sim( + self, + object_state: torch.Tensor, + env_ids: torch.Tensor | None = None, + object_ids: slice | torch.Tensor | None = None, + ) -> None: + """Deprecated method. Please use :meth:`write_body_state_to_sim` instead.""" + logger.warning( + "The `write_object_state_to_sim` method will be deprecated in a future release. Please use" + " `write_body_state_to_sim` instead." + ) + self.write_body_state_to_sim(object_state, env_ids=env_ids, body_ids=object_ids) + + def write_object_com_state_to_sim( + self, + object_state: torch.Tensor, + env_ids: torch.Tensor | None = None, + object_ids: slice | torch.Tensor | None = None, + ) -> None: + """Deprecated method. Please use :meth:`write_body_com_state_to_sim` instead.""" + logger.warning( + "The `write_object_com_state_to_sim` method will be deprecated in a future release. Please use" + " `write_body_com_state_to_sim` instead." + ) + self.write_body_com_state_to_sim(object_state, env_ids=env_ids, body_ids=object_ids) + + def write_object_link_state_to_sim( + self, + object_state: torch.Tensor, + env_ids: torch.Tensor | None = None, + object_ids: slice | torch.Tensor | None = None, + ): + """Deprecated method. Please use :meth:`write_body_link_state_to_sim` instead.""" + logger.warning( + "The `write_object_link_state_to_sim` method will be deprecated in a future release. Please use" + " `write_body_link_state_to_sim` instead." + ) + self.write_body_link_state_to_sim(object_state, env_ids=env_ids, body_ids=object_ids) + + def write_object_pose_to_sim( + self, + object_pose: torch.Tensor, + env_ids: torch.Tensor | None = None, + object_ids: slice | torch.Tensor | None = None, + ): + """Deprecated method. Please use :meth:`write_body_pose_to_sim` instead.""" + logger.warning( + "The `write_object_pose_to_sim` method will be deprecated in a future release. Please use" + " `write_body_pose_to_sim` instead." + ) + self.write_body_pose_to_sim(object_pose, env_ids=env_ids, body_ids=object_ids) + + def write_object_link_pose_to_sim( + self, + object_pose: torch.Tensor, + env_ids: torch.Tensor | None = None, + object_ids: slice | torch.Tensor | None = None, + ) -> None: + """Deprecated method. Please use :meth:`write_body_link_pose_to_sim` instead.""" + logger.warning( + "The `write_object_link_pose_to_sim` method will be deprecated in a future release. Please use" + " `write_body_link_pose_to_sim` instead." + ) + self.write_body_link_pose_to_sim(object_pose, env_ids=env_ids, body_ids=object_ids) + + def write_object_com_pose_to_sim( + self, + object_pose: torch.Tensor, + env_ids: torch.Tensor | None = None, + object_ids: slice | torch.Tensor | None = None, + ) -> None: + """Deprecated method. Please use :meth:`write_body_com_pose_to_sim` instead.""" + logger.warning( + "The `write_object_com_pose_to_sim` method will be deprecated in a future release. Please use" + " `write_body_com_pose_to_sim` instead." + ) + self.write_body_com_pose_to_sim(object_pose, env_ids=env_ids, body_ids=object_ids) + + def write_object_velocity_to_sim( + self, + object_velocity: torch.Tensor, + env_ids: torch.Tensor | None = None, + object_ids: slice | torch.Tensor | None = None, + ) -> None: + """Deprecated method. Please use :meth:`write_body_com_velocity_to_sim` instead.""" + logger.warning( + "The `write_object_velocity_to_sim` method will be deprecated in a future release. Please use" + " `write_body_com_velocity_to_sim` instead." + ) + self.write_body_com_velocity_to_sim(object_velocity, env_ids=env_ids, body_ids=object_ids) + + def write_object_com_velocity_to_sim( + self, + object_velocity: torch.Tensor, + env_ids: torch.Tensor | None = None, + object_ids: slice | torch.Tensor | None = None, + ) -> None: + """Deprecated method. Please use :meth:`write_body_com_velocity_to_sim` instead.""" + logger.warning( + "The `write_object_com_velocity_to_sim` method will be deprecated in a future release. Please use" + " `write_body_com_velocity_to_sim` instead." + ) + self.write_body_com_velocity_to_sim(object_velocity, env_ids=env_ids, body_ids=object_ids) + + def write_object_link_velocity_to_sim( + self, + object_velocity: torch.Tensor, + env_ids: torch.Tensor | None = None, + object_ids: slice | torch.Tensor | None = None, + ) -> None: + """Deprecated method. Please use :meth:`write_body_link_velocity_to_sim` instead.""" + logger.warning( + "The `write_object_link_velocity_to_sim` method will be deprecated in a future release. Please use" + " `write_body_link_velocity_to_sim` instead." + ) + self.write_body_link_velocity_to_sim(object_velocity, env_ids=env_ids, body_ids=object_ids) From 1e6f4f07c69e1ce187f88329404bed54734b3a55 Mon Sep 17 00:00:00 2001 From: Antoine Richard Date: Wed, 28 Jan 2026 09:18:32 +0100 Subject: [PATCH 05/38] All tests for the assets are passing. --- .../rigid_object/base_rigid_object_data.py | 4 +- .../base_rigid_object_collection.py | 2 +- .../base_rigid_object_collection_data.py | 68 +++++++++---------- source/isaaclab/isaaclab/envs/mdp/events.py | 23 +++++-- .../isaaclab/envs/utils/io_descriptors.py | 8 +-- .../assets/articulation/articulation.py | 14 +++- .../assets/rigid_object/rigid_object.py | 12 +++- .../assets/rigid_object/rigid_object_data.py | 2 + .../rigid_object_collection.py | 65 ++++++++++-------- .../rigid_object_collection_data.py | 37 ++++++++-- .../test/assets/test_articulation.py | 25 +++---- .../test/assets/test_deformable_object.py | 0 .../test/assets/test_rigid_object.py | 7 +- .../assets/test_rigid_object_collection.py | 25 +++---- .../test/assets/test_surface_gripper.py | 0 15 files changed, 181 insertions(+), 111 deletions(-) rename source/{isaaclab => isaaclab_physx}/test/assets/test_articulation.py (98%) rename source/{isaaclab => isaaclab_physx}/test/assets/test_deformable_object.py (100%) rename source/{isaaclab => isaaclab_physx}/test/assets/test_rigid_object.py (99%) rename source/{isaaclab => isaaclab_physx}/test/assets/test_rigid_object_collection.py (98%) rename source/{isaaclab => isaaclab_physx}/test/assets/test_surface_gripper.py (100%) diff --git a/source/isaaclab/isaaclab/assets/rigid_object/base_rigid_object_data.py b/source/isaaclab/isaaclab/assets/rigid_object/base_rigid_object_data.py index 5aa7c26c065..fe05cfc4d7b 100644 --- a/source/isaaclab/isaaclab/assets/rigid_object/base_rigid_object_data.py +++ b/source/isaaclab/isaaclab/assets/rigid_object/base_rigid_object_data.py @@ -232,7 +232,7 @@ def body_link_state_w(self) -> torch.Tensor: @abstractmethod def body_com_state_w(self) -> torch.Tensor: """State of all bodies ``[pos, quat, lin_vel, ang_vel]`` in simulation world frame. - Shape is (num_instances, num_bodies, 13). + Shape is (num_instances, 1, 13). The position, quaternion, and linear/angular velocity are of the body's center of mass frame relative to the world. Center of mass frame is assumed to be the same orientation as the link rather than the orientation of the @@ -270,7 +270,7 @@ def body_mass(self) -> torch.Tensor: @property @abstractmethod def body_inertia(self) -> torch.Tensor: - """Inertia of all bodies in the simulation world frame. Shape is (num_instances, 1, 3, 3).""" + """Inertia of all bodies in the simulation world frame. Shape is (num_instances, 1, 9).""" raise NotImplementedError() ## diff --git a/source/isaaclab/isaaclab/assets/rigid_object_collection/base_rigid_object_collection.py b/source/isaaclab/isaaclab/assets/rigid_object_collection/base_rigid_object_collection.py index bf444bacdf2..f32858a0296 100644 --- a/source/isaaclab/isaaclab/assets/rigid_object_collection/base_rigid_object_collection.py +++ b/source/isaaclab/isaaclab/assets/rigid_object_collection/base_rigid_object_collection.py @@ -53,7 +53,7 @@ def __init__(self, cfg: RigidObjectCollectionCfg): Args: cfg: A configuration instance. """ - super().__init__(cfg) + pass """ Properties diff --git a/source/isaaclab/isaaclab/assets/rigid_object_collection/base_rigid_object_collection_data.py b/source/isaaclab/isaaclab/assets/rigid_object_collection/base_rigid_object_collection_data.py index a1464ebf17d..785e9579de4 100644 --- a/source/isaaclab/isaaclab/assets/rigid_object_collection/base_rigid_object_collection_data.py +++ b/source/isaaclab/isaaclab/assets/rigid_object_collection/base_rigid_object_collection_data.py @@ -62,8 +62,8 @@ def update(self, dt: float) -> None: @property @abstractmethod - def default_root_pose(self) -> torch.Tensor: - """Default root pose ``[pos, quat]`` in local environment frame. Shape is (num_instances, 7). + def default_body_pose(self) -> torch.Tensor: + """Default body pose ``[pos, quat]`` in local environment frame. Shape is (num_instances, num_bodies, 7). The position and quaternion are of the rigid body's actor frame. """ @@ -71,8 +71,8 @@ def default_root_pose(self) -> torch.Tensor: @property @abstractmethod - def default_root_vel(self) -> torch.Tensor: - """Default root velocity ``[lin_vel, ang_vel]`` in local environment frame. Shape is (num_instances, 6). + def default_body_vel(self) -> torch.Tensor: + """Default body velocity ``[lin_vel, ang_vel]`` in local environment frame. Shape is (num_instances, num_bodies, 6). The linear and angular velocities are of the rigid body's center of mass frame. """ @@ -80,8 +80,8 @@ def default_root_vel(self) -> torch.Tensor: @property @abstractmethod - def default_root_state(self) -> torch.Tensor: - """Default root state ``[pos, quat, lin_vel, ang_vel]`` in local environment frame. Shape is (num_instances, 13). + def default_body_state(self) -> torch.Tensor: + """Default body state ``[pos, quat, lin_vel, ang_vel]`` in local environment frame. Shape is (num_instances, num_bodies, 13). The position and quaternion are of the rigid body's actor frame. Meanwhile, the linear and angular velocities are of the center of mass frame. @@ -95,7 +95,7 @@ def default_root_state(self) -> torch.Tensor: @property @abstractmethod def body_link_pose_w(self) -> torch.Tensor: - """Body link pose ``[pos, quat]`` in simulation world frame. Shape is (num_instances, 1, 7). + """Body link pose ``[pos, quat]`` in simulation world frame. Shape is (num_instances, num_bodies, 7). This quantity is the pose of the actor frame of the rigid body relative to the world. The orientation is provided in (x, y, z, w) format. @@ -105,7 +105,7 @@ def body_link_pose_w(self) -> torch.Tensor: @property @abstractmethod def body_link_vel_w(self) -> torch.Tensor: - """Body link velocity ``[lin_vel, ang_vel]`` in simulation world frame. Shape is (num_instances, 1, 6). + """Body link velocity ``[lin_vel, ang_vel]`` in simulation world frame. Shape is (num_instances, num_bodies, 6). This quantity contains the linear and angular velocities of the actor frame of the root rigid body relative to the world. @@ -115,7 +115,7 @@ def body_link_vel_w(self) -> torch.Tensor: @property @abstractmethod def body_com_pose_w(self) -> torch.Tensor: - """Body center of mass pose ``[pos, quat]`` in simulation world frame. Shape is (num_instances, 1, 7). + """Body center of mass pose ``[pos, quat]`` in simulation world frame. Shape is (num_instances, num_bodies, 7). This quantity is the pose of the center of mass frame of the rigid body relative to the world. The orientation is provided in (x, y, z, w) format. @@ -126,7 +126,7 @@ def body_com_pose_w(self) -> torch.Tensor: @abstractmethod def body_com_vel_w(self) -> torch.Tensor: """Body center of mass velocity ``[lin_vel, ang_vel]`` in simulation world frame. - Shape is (num_instances, 1, 6). + Shape is (num_instances, num_bodies, 6). This quantity contains the linear and angular velocities of the root rigid body's center of mass frame relative to the world. @@ -137,7 +137,7 @@ def body_com_vel_w(self) -> torch.Tensor: @abstractmethod def body_state_w(self) -> torch.Tensor: """State of all bodies `[pos, quat, lin_vel, ang_vel]` in simulation world frame. - Shape is (num_instances, 1, 13). + Shape is (num_instances, num_bodies, 13). The position and orientation are of the rigid bodies' actor frame. Meanwhile, the linear and angular velocities are of the rigid bodies' center of mass frame. @@ -148,7 +148,7 @@ def body_state_w(self) -> torch.Tensor: @abstractmethod def body_link_state_w(self) -> torch.Tensor: """State of all bodies ``[pos, quat, lin_vel, ang_vel]`` in simulation world frame. - Shape is (num_instances, 1, 13). + Shape is (num_instances, num_bodies, 13). The position, quaternion, and linear/angular velocity are of the body's link frame relative to the world. The orientation is provided in (x, y, z, w) format. @@ -171,7 +171,7 @@ def body_com_state_w(self) -> torch.Tensor: @abstractmethod def body_com_acc_w(self) -> torch.Tensor: """Acceleration of all bodies ``[lin_acc, ang_acc]`` in the simulation world frame. - Shape is (num_instances, 1, 6). + Shape is (num_instances, num_bodies, 6). This quantity is the acceleration of the rigid bodies' center of mass frame relative to the world. """ @@ -181,7 +181,7 @@ def body_com_acc_w(self) -> torch.Tensor: @abstractmethod def body_com_pose_b(self) -> torch.Tensor: """Center of mass pose ``[pos, quat]`` of all bodies in their respective body's link frames. - Shape is (num_instances, 1, 7). + Shape is (num_instances, num_bodies, 7). This quantity is the pose of the center of mass frame of the rigid body relative to the body's link frame. The orientation is provided in (x, y, z, w) format. @@ -191,13 +191,13 @@ def body_com_pose_b(self) -> torch.Tensor: @property @abstractmethod def body_mass(self) -> torch.Tensor: - """Mass of all bodies in the simulation world frame. Shape is (num_instances, 1, 1).""" + """Mass of all bodies in the simulation world frame. Shape is (num_instances, num_bodies).""" raise NotImplementedError() @property @abstractmethod def body_inertia(self) -> torch.Tensor: - """Inertia of all bodies in the simulation world frame. Shape is (num_instances, 1, 3, 3).""" + """Inertia of all bodies in the simulation world frame. Shape is (num_instances, num_bodies, 9).""" raise NotImplementedError() ## @@ -207,13 +207,13 @@ def body_inertia(self) -> torch.Tensor: @property @abstractmethod def projected_gravity_b(self) -> torch.Tensor: - """Projection of the gravity direction on base frame. Shape is (num_instances, 3).""" + """Projection of the gravity direction on base frame. Shape is (num_instances, num_bodies, 3).""" raise NotImplementedError() @property @abstractmethod def heading_w(self) -> torch.Tensor: - """Yaw heading of the base frame (in radians). Shape is (num_instances,). + """Yaw heading of the base frame (in radians). Shape is (num_instances, num_bodies). Note: This quantity is computed by assuming that the forward-direction of the base @@ -224,7 +224,7 @@ def heading_w(self) -> torch.Tensor: @property @abstractmethod def body_link_lin_vel_b(self) -> torch.Tensor: - """Root link linear velocity in base frame. Shape is (num_instances, 3). + """Root link linear velocity in base frame. Shape is (num_instances, num_bodies, 3). This quantity is the linear velocity of the actor frame of the root rigid body frame with respect to the rigid body's actor frame. @@ -234,7 +234,7 @@ def body_link_lin_vel_b(self) -> torch.Tensor: @property @abstractmethod def body_link_ang_vel_b(self) -> torch.Tensor: - """Root link angular velocity in base world frame. Shape is (num_instances, 3). + """Root link angular velocity in base world frame. Shape is (num_instances, num_bodies, 3). This quantity is the angular velocity of the actor frame of the root rigid body frame with respect to the rigid body's actor frame. @@ -244,7 +244,7 @@ def body_link_ang_vel_b(self) -> torch.Tensor: @property @abstractmethod def body_com_lin_vel_b(self) -> torch.Tensor: - """Root center of mass linear velocity in base frame. Shape is (num_instances, 3). + """Root center of mass linear velocity in base frame. Shape is (num_instances, num_bodies, 3). This quantity is the linear velocity of the root rigid body's center of mass frame with respect to the rigid body's actor frame. @@ -254,7 +254,7 @@ def body_com_lin_vel_b(self) -> torch.Tensor: @property @abstractmethod def body_com_ang_vel_b(self) -> torch.Tensor: - """Root center of mass angular velocity in base world frame. Shape is (num_instances, 3). + """Root center of mass angular velocity in base world frame. Shape is (num_instances, num_bodies, 3). This quantity is the angular velocity of the root rigid body's center of mass frame with respect to the rigid body's actor frame. @@ -268,7 +268,7 @@ def body_com_ang_vel_b(self) -> torch.Tensor: @property @abstractmethod def body_link_pos_w(self) -> torch.Tensor: - """Positions of all bodies in simulation world frame. Shape is (num_instances, 1, 3). + """Positions of all bodies in simulation world frame. Shape is (num_instances, num_bodies, 3). This quantity is the position of the rigid bodies' actor frame relative to the world. """ @@ -277,7 +277,7 @@ def body_link_pos_w(self) -> torch.Tensor: @property @abstractmethod def body_link_quat_w(self) -> torch.Tensor: - """Orientation (x, y, z, w) of all bodies in simulation world frame. Shape is (num_instances, 1, 4). + """Orientation (x, y, z, w) of all bodies in simulation world frame. Shape is (num_instances, num_bodies, 4). This quantity is the orientation of the rigid bodies' actor frame relative to the world. """ @@ -286,7 +286,7 @@ def body_link_quat_w(self) -> torch.Tensor: @property @abstractmethod def body_link_lin_vel_w(self) -> torch.Tensor: - """Linear velocity of all bodies in simulation world frame. Shape is (num_instances, 1, 3). + """Linear velocity of all bodies in simulation world frame. Shape is (num_instances, num_bodies, 3). This quantity is the linear velocity of the rigid bodies' center of mass frame relative to the world. """ @@ -295,7 +295,7 @@ def body_link_lin_vel_w(self) -> torch.Tensor: @property @abstractmethod def body_link_ang_vel_w(self) -> torch.Tensor: - """Angular velocity of all bodies in simulation world frame. Shape is (num_instances, 1, 3). + """Angular velocity of all bodies in simulation world frame. Shape is (num_instances, num_bodies, 3). This quantity is the angular velocity of the rigid bodies' center of mass frame relative to the world. """ @@ -304,7 +304,7 @@ def body_link_ang_vel_w(self) -> torch.Tensor: @property @abstractmethod def body_com_pos_w(self) -> torch.Tensor: - """Positions of all bodies in simulation world frame. Shape is (num_instances, 1, 3). + """Positions of all bodies in simulation world frame. Shape is (num_instances, num_bodies, 3). This quantity is the position of the rigid bodies' actor frame. """ @@ -315,14 +315,14 @@ def body_com_pos_w(self) -> torch.Tensor: def body_com_quat_w(self) -> torch.Tensor: """Orientation (x, y, z, w) of the principle axis of inertia of all bodies in simulation world frame. - Shape is (num_instances, 1, 4). This quantity is the orientation of the rigid bodies' actor frame. + Shape is (num_instances, num_bodies, 4). This quantity is the orientation of the rigid bodies' actor frame. """ raise NotImplementedError() @property @abstractmethod def body_com_lin_vel_w(self) -> torch.Tensor: - """Linear velocity of all bodies in simulation world frame. Shape is (num_instances, 1, 3). + """Linear velocity of all bodies in simulation world frame. Shape is (num_instances, num_bodies, 3). This quantity is the linear velocity of the rigid bodies' center of mass frame. """ @@ -331,7 +331,7 @@ def body_com_lin_vel_w(self) -> torch.Tensor: @property @abstractmethod def body_com_ang_vel_w(self) -> torch.Tensor: - """Angular velocity of all bodies in simulation world frame. Shape is (num_instances, 1, 3). + """Angular velocity of all bodies in simulation world frame. Shape is (num_instances, num_bodies, 3). This quantity is the angular velocity of the rigid bodies' center of mass frame. """ @@ -340,7 +340,7 @@ def body_com_ang_vel_w(self) -> torch.Tensor: @property @abstractmethod def body_com_lin_acc_w(self) -> torch.Tensor: - """Linear acceleration of all bodies in simulation world frame. Shape is (num_instances, 1, 3). + """Linear acceleration of all bodies in simulation world frame. Shape is (num_instances, num_bodies, 3). This quantity is the linear acceleration of the rigid bodies' center of mass frame. """ @@ -349,7 +349,7 @@ def body_com_lin_acc_w(self) -> torch.Tensor: @property @abstractmethod def body_com_ang_acc_w(self) -> torch.Tensor: - """Angular acceleration of all bodies in simulation world frame. Shape is (num_instances, 1, 3). + """Angular acceleration of all bodies in simulation world frame. Shape is (num_instances, num_bodies, 3). This quantity is the angular acceleration of the rigid bodies' center of mass frame. """ @@ -359,7 +359,7 @@ def body_com_ang_acc_w(self) -> torch.Tensor: @abstractmethod def body_com_pos_b(self) -> torch.Tensor: """Center of mass position of all of the bodies in their respective link frames. - Shape is (num_instances, 1, 3). + Shape is (num_instances, num_bodies, 3). This quantity is the center of mass location relative to its body'slink frame. """ @@ -369,7 +369,7 @@ def body_com_pos_b(self) -> torch.Tensor: @abstractmethod def body_com_quat_b(self) -> torch.Tensor: """Orientation (x, y, z, w) of the principle axis of inertia of all of the bodies in their - respective link frames. Shape is (num_instances, 1, 4). + respective link frames. Shape is (num_instances, num_bodies, 4). This quantity is the orientation of the principles axes of inertia relative to its body's link frame. """ diff --git a/source/isaaclab/isaaclab/envs/mdp/events.py b/source/isaaclab/isaaclab/envs/mdp/events.py index fe76c887115..35e1f425f70 100644 --- a/source/isaaclab/isaaclab/envs/mdp/events.py +++ b/source/isaaclab/isaaclab/envs/mdp/events.py @@ -571,6 +571,10 @@ def __init__(self, cfg: EventTermCfg, env: ManagerBasedEnv): # extract the used quantities (to enable type-hinting) self.asset_cfg: SceneEntityCfg = cfg.params["asset_cfg"] self.asset: RigidObject | Articulation = env.scene[self.asset_cfg.name] + + self.default_joint_stiffness = self.asset.data.joint_stiffness.clone() + self.default_joint_damping = self.asset.data.joint_damping.clone() + # check for valid operation if cfg.params["operation"] == "scale": if "stiffness_distribution_params" in cfg.params: @@ -631,7 +635,7 @@ def randomize(data: torch.Tensor, params: tuple[float, float]) -> torch.Tensor: # Randomize stiffness if stiffness_distribution_params is not None: stiffness = actuator.stiffness[env_ids].clone() - stiffness[:, actuator_indices] = self.asset.data.default_joint_stiffness[env_ids][ + stiffness[:, actuator_indices] = self.default_joint_stiffness[env_ids][ :, global_indices ].clone() randomize(stiffness, stiffness_distribution_params) @@ -643,7 +647,7 @@ def randomize(data: torch.Tensor, params: tuple[float, float]) -> torch.Tensor: # Randomize damping if damping_distribution_params is not None: damping = actuator.damping[env_ids].clone() - damping[:, actuator_indices] = self.asset.data.default_joint_damping[env_ids][:, global_indices].clone() + damping[:, actuator_indices] = self.default_joint_damping[env_ids][:, global_indices].clone() randomize(damping, damping_distribution_params) actuator.damping[env_ids] = damping if isinstance(actuator, ImplicitActuator): @@ -684,6 +688,13 @@ def __init__(self, cfg: EventTermCfg, env: ManagerBasedEnv): # extract the used quantities (to enable type-hinting) self.asset_cfg: SceneEntityCfg = cfg.params["asset_cfg"] self.asset: RigidObject | Articulation = env.scene[self.asset_cfg.name] + + self.default_joint_friction_coeff = self.asset.data.joint_friction_coeff.clone() + self.default_dynamic_joint_friction_coeff = self.asset.data.joint_dynamic_friction_coeff.clone() + self.default_viscous_joint_friction_coeff = self.asset.data.joint_viscous_friction_coeff.clone() + self.default_joint_armature = self.asset.data.joint_armature.clone() + self.default_joint_pos_limits = self.asset.data.joint_pos_limits.clone() + # check for valid operation if cfg.params["operation"] == "scale": if "friction_distribution_params" in cfg.params: @@ -727,7 +738,7 @@ def __call__( # joint friction coefficient if friction_distribution_params is not None: friction_coeff = _randomize_prop_by_op( - self.asset.data.default_joint_friction_coeff.clone(), + self.default_joint_friction_coeff.clone(), friction_distribution_params, env_ids, joint_ids, @@ -745,7 +756,7 @@ def __call__( if get_isaac_sim_version().major >= 5: # Randomize raw tensors dynamic_friction_coeff = _randomize_prop_by_op( - self.asset.data.default_joint_dynamic_friction_coeff.clone(), + self.default_dynamic_joint_friction_coeff.clone(), friction_distribution_params, env_ids, joint_ids, @@ -753,7 +764,7 @@ def __call__( distribution=distribution, ) viscous_friction_coeff = _randomize_prop_by_op( - self.asset.data.default_joint_viscous_friction_coeff.clone(), + self.default_viscous_joint_friction_coeff.clone(), friction_distribution_params, env_ids, joint_ids, @@ -801,7 +812,7 @@ def __call__( # joint position limits if lower_limit_distribution_params is not None or upper_limit_distribution_params is not None: - joint_pos_limits = self.asset.data.default_joint_pos_limits.clone() + joint_pos_limits = self.default_joint_pos_limits.clone() # -- randomize the lower limits if lower_limit_distribution_params is not None: joint_pos_limits[..., 0] = _randomize_prop_by_op( diff --git a/source/isaaclab/isaaclab/envs/utils/io_descriptors.py b/source/isaaclab/isaaclab/envs/utils/io_descriptors.py index 84b8a5cf8a0..9b0f8bb509a 100644 --- a/source/isaaclab/isaaclab/envs/utils/io_descriptors.py +++ b/source/isaaclab/isaaclab/envs/utils/io_descriptors.py @@ -365,16 +365,16 @@ def export_articulations_data(env: ManagerBasedEnv) -> dict[str, dict[str, list[ articulation.data.default_joint_pos_limits[0].detach().cpu().numpy().tolist() ) articulation_joint_data[articulation_name]["default_joint_damping"] = ( - articulation.data.default_joint_damping[0].detach().cpu().numpy().tolist() + articulation.data.joint_damping[0].detach().cpu().numpy().tolist() ) articulation_joint_data[articulation_name]["default_joint_stiffness"] = ( - articulation.data.default_joint_stiffness[0].detach().cpu().numpy().tolist() + articulation.data.joint_stiffness[0].detach().cpu().numpy().tolist() ) articulation_joint_data[articulation_name]["default_joint_friction"] = ( - articulation.data.default_joint_friction[0].detach().cpu().numpy().tolist() + articulation.data.joint_friction_coeff[0].detach().cpu().numpy().tolist() ) articulation_joint_data[articulation_name]["default_joint_armature"] = ( - articulation.data.default_joint_armature[0].detach().cpu().numpy().tolist() + articulation.data.joint_armature[0].detach().cpu().numpy().tolist() ) return articulation_joint_data diff --git a/source/isaaclab_physx/isaaclab_physx/assets/articulation/articulation.py b/source/isaaclab_physx/isaaclab_physx/assets/articulation/articulation.py index 2d0ae4be5eb..22f8b80ba1f 100644 --- a/source/isaaclab_physx/isaaclab_physx/assets/articulation/articulation.py +++ b/source/isaaclab_physx/isaaclab_physx/assets/articulation/articulation.py @@ -176,12 +176,12 @@ def root_view(self): @property def instantaneous_wrench_composer(self) -> WrenchComposer: """Instantaneous wrench composer for the articulation.""" - raise self._instantaneous_wrench_composer + return self._instantaneous_wrench_composer @property def permanent_wrench_composer(self) -> WrenchComposer: """Permanent wrench composer for the articulation.""" - raise self._permanent_wrench_composer + return self._permanent_wrench_composer """ Operations. @@ -2174,4 +2174,12 @@ def set_fixed_tendon_limit( "The function 'set_fixed_tendon_limit' will be deprecated in a future release. Please" " use 'set_fixed_tendon_position_limit' instead." ) - self.set_fixed_tendon_position_limit(limit, fixed_tendon_ids=fixed_tendon_ids, env_ids=env_ids) \ No newline at end of file + self.set_fixed_tendon_position_limit(limit, fixed_tendon_ids=fixed_tendon_ids, env_ids=env_ids) + + @property + def root_physx_view(self) -> physx.RigidBodyView: + """Deprecated property. Please use :attr:`root_view` instead.""" + logger.warning( + "The `root_physx_view` property will be deprecated in a future release. Please use `root_view` instead." + ) + return self.root_view \ No newline at end of file diff --git a/source/isaaclab_physx/isaaclab_physx/assets/rigid_object/rigid_object.py b/source/isaaclab_physx/isaaclab_physx/assets/rigid_object/rigid_object.py index 0ed4515165e..17a1d6ff73b 100644 --- a/source/isaaclab_physx/isaaclab_physx/assets/rigid_object/rigid_object.py +++ b/source/isaaclab_physx/isaaclab_physx/assets/rigid_object/rigid_object.py @@ -40,7 +40,7 @@ class RigidObject(BaseRigidObject): For an asset to be considered a rigid object, the root prim of the asset must have the `USD RigidBodyAPI`_ applied to it. This API is used to define the simulation properties of the rigid body. On playing the simulation, the physics engine will automatically register the rigid body and create a corresponding - rigid body handle. This handle can be accessed using the :attr:`root_physx_view` attribute. + rigid body handle. This handle can be accessed using the :attr:`root_view` attribute. .. note:: @@ -695,4 +695,12 @@ def _invalidate_initialize_callback(self, event): # call parent super()._invalidate_initialize_callback(event) # set all existing views to None to invalidate them - self._root_view = None \ No newline at end of file + self._root_view = None + + @property + def root_physx_view(self) -> physx.RigidBodyView: + """Deprecated property. Please use :attr:`root_view` instead.""" + logger.warning( + "The `root_physx_view` property will be deprecated in a future release. Please use `root_view` instead." + ) + return self.root_view \ No newline at end of file diff --git a/source/isaaclab_physx/isaaclab_physx/assets/rigid_object/rigid_object_data.py b/source/isaaclab_physx/isaaclab_physx/assets/rigid_object/rigid_object_data.py index f7b1e52441f..b71e9620faf 100644 --- a/source/isaaclab_physx/isaaclab_physx/assets/rigid_object/rigid_object_data.py +++ b/source/isaaclab_physx/isaaclab_physx/assets/rigid_object/rigid_object_data.py @@ -72,6 +72,8 @@ def __init__(self, root_view: physx.RigidBodyView, device: str): self.GRAVITY_VEC_W = gravity_dir.repeat(self._root_view.count, 1) self.FORWARD_VEC_B = torch.tensor((1.0, 0.0, 0.0), device=self.device).repeat(self._root_view.count, 1) + self._create_buffers() + @property def is_primed(self) -> bool: """Whether the rigid object data is fully instantiated and ready to use.""" diff --git a/source/isaaclab_physx/isaaclab_physx/assets/rigid_object_collection/rigid_object_collection.py b/source/isaaclab_physx/isaaclab_physx/assets/rigid_object_collection/rigid_object_collection.py index edc68661667..ad693e1da40 100644 --- a/source/isaaclab_physx/isaaclab_physx/assets/rigid_object_collection/rigid_object_collection.py +++ b/source/isaaclab_physx/isaaclab_physx/assets/rigid_object_collection/rigid_object_collection.py @@ -40,7 +40,7 @@ class RigidObjectCollection(BaseRigidObjectCollection): For each rigid body in the collection, the root prim of the asset must have the `USD RigidBodyAPI`_ applied to it. This API is used to define the simulation properties of the rigid bodies. On playing the simulation, the physics engine will automatically register the rigid bodies and create a corresponding - rigid body handle. This handle can be accessed using the :attr:`root_physx_view` attribute. + rigid body handle. This handle can be accessed using the :attr:`root_view` attribute. Rigid objects in the collection are uniquely identified via the key of the dictionary :attr:`~isaaclab.assets.RigidObjectCollectionCfg.rigid_objects` in the @@ -65,7 +65,6 @@ def __init__(self, cfg: RigidObjectCollectionCfg): Args: cfg: A configuration instance. """ - super().__init__(cfg) # Note: We never call the parent constructor as it tries to call its own spawning which we don't want. # check that the config is valid cfg.validate() @@ -158,7 +157,7 @@ def reset(self, env_ids: torch.Tensor | None = None, object_ids: slice | torch.T if env_ids is None: env_ids = self._ALL_ENV_INDICES if object_ids is None: - object_ids = self._ALL_OBJ_INDICES + object_ids = self._ALL_BODY_INDICES # reset external wrench self._instantaneous_wrench_composer.reset(env_ids) self._permanent_wrench_composer.reset(env_ids) @@ -177,24 +176,24 @@ def write_data_to_sim(self) -> None: self._instantaneous_wrench_composer.add_forces_and_torques( forces=self._permanent_wrench_composer.composed_force, torques=self._permanent_wrench_composer.composed_torque, - body_ids=self._ALL_OBJ_INDICES_WP, + body_ids=self._ALL_BODY_INDICES_WP, env_ids=self._ALL_ENV_INDICES_WP, ) # Apply both instantaneous and permanent wrench to the simulation - self.root_physx_view.apply_forces_and_torques_at_position( + self.root_view.apply_forces_and_torques_at_position( force_data=self.reshape_data_to_view(self._instantaneous_wrench_composer.composed_force_as_torch), torque_data=self.reshape_data_to_view(self._instantaneous_wrench_composer.composed_torque_as_torch), position_data=None, - indices=self._env_body_ids_to_view_ids(self._ALL_ENV_INDICES, self._ALL_OBJ_INDICES), + indices=self._env_body_ids_to_view_ids(self._ALL_ENV_INDICES, self._ALL_BODY_INDICES), is_global=False, ) else: # Apply permanent wrench to the simulation - self.root_physx_view.apply_forces_and_torques_at_position( + self.root_view.apply_forces_and_torques_at_position( force_data=self.reshape_data_to_view(self._permanent_wrench_composer.composed_force_as_torch), torque_data=self.reshape_data_to_view(self._permanent_wrench_composer.composed_torque_as_torch), position_data=None, - indices=self._env_body_ids_to_view_ids(self._ALL_ENV_INDICES, self._ALL_OBJ_INDICES), + indices=self._env_body_ids_to_view_ids(self._ALL_ENV_INDICES, self._ALL_BODY_INDICES), is_global=False, ) self._instantaneous_wrench_composer.reset() @@ -328,7 +327,7 @@ def write_body_link_pose_to_sim( env_ids = self._ALL_ENV_INDICES # -- body_ids if body_ids is None: - body_ids = self._ALL_OBJ_INDICES + body_ids = self._ALL_BODY_INDICES # note: we need to do this here since tensors are not set into simulation until step. # set into internal buffers @@ -381,7 +380,7 @@ def write_body_com_pose_to_sim( env_ids = self._ALL_ENV_INDICES # -- body_ids if body_ids is None: - body_ids = self._ALL_OBJ_INDICES + body_ids = self._ALL_BODY_INDICES # set into internal buffers self.data.body_com_pose_w[env_ids[:, None], body_ids] = body_poses.clone() @@ -444,7 +443,7 @@ def write_body_com_velocity_to_sim( env_ids = self._ALL_ENV_INDICES # -- body_ids if body_ids is None: - body_ids = self._ALL_OBJ_INDICES + body_ids = self._ALL_BODY_INDICES # note: we need to do this here since tensors are not set into simulation until step. # set into internal buffers @@ -485,7 +484,7 @@ def write_body_link_velocity_to_sim( env_ids = self._ALL_ENV_INDICES # -- body_ids if body_ids is None: - body_ids = self._ALL_OBJ_INDICES + body_ids = self._ALL_BODY_INDICES # set into internal buffers self.data.body_link_vel_w[env_ids[:, None], body_ids] = body_velocities.clone() @@ -620,7 +619,7 @@ def set_external_force_and_torque( env_ids = wp.from_torch(env_ids.to(torch.int32), dtype=wp.int32) # -- body_ids if body_ids is None: - body_ids = self._ALL_OBJ_INDICES_WP + body_ids = self._ALL_BODY_INDICES_WP elif isinstance(body_ids, slice): body_ids = wp.from_torch( torch.arange(self.num_bodies, dtype=torch.int32, device=self.device)[body_ids], dtype=wp.int32 @@ -645,27 +644,27 @@ def set_external_force_and_torque( """ def reshape_view_to_data(self, data: torch.Tensor) -> torch.Tensor: - """Reshapes and arranges the data coming from the :attr:`root_physx_view` to - (num_instances, num_objects, data_dim). + """Reshapes and arranges the data coming from the :attr:`root_view` to + (num_instances, num_bodies, data_dim). Args: - data: The data coming from the :attr:`root_physx_view`. Shape is (num_instances * num_objects, data_dim). + data: The data coming from the :attr:`root_view`. Shape is (num_instances * num_bodies, data_dim). Returns: - The reshaped data. Shape is (num_instances, num_objects, data_dim). + The reshaped data. Shape is (num_instances, num_bodies, data_dim). """ - return torch.einsum("ijk -> jik", data.reshape(self.num_objects, self.num_instances, -1)) + return torch.einsum("ijk -> jik", data.reshape(self.num_bodies, self.num_instances, -1)) def reshape_data_to_view(self, data: torch.Tensor) -> torch.Tensor: - """Reshapes and arranges the data to the be consistent with data from the :attr:`root_physx_view`. + """Reshapes and arranges the data to the be consistent with data from the :attr:`root_view`. Args: - data: The data to be reshaped. Shape is (num_instances, num_objects, data_dim). + data: The data to be reshaped. Shape is (num_instances, num_bodies, data_dim). Returns: - The reshaped data. Shape is (num_instances * num_objects, data_dim). + The reshaped data. Shape is (num_instances * num_bodies, data_dim). """ - return torch.einsum("ijk -> jik", data).reshape(self.num_objects * self.num_instances, *data.shape[2:]) + return torch.einsum("ijk -> jik", data).reshape(self.num_bodies * self.num_instances, *data.shape[2:]) """ Internal helper. @@ -677,7 +676,7 @@ def _initialize_impl(self): # obtain global simulation view self._physics_sim_view = SimulationManager.get_physics_sim_view() root_prim_path_exprs = [] - for name, rigid_body_cfg in self.cfg.rigid_object_cfg.items(): + for name, rigid_body_cfg in self.cfg.rigid_objects.items(): # obtain the first prim in the regex expression (all others are assumed to be a copy of this) template_prim = sim_utils.find_first_matching_prim(rigid_body_cfg.prim_path) if template_prim is None: @@ -725,10 +724,10 @@ def _initialize_impl(self): self._body_names_list.append(name) # -- object view - self._root_physx_view = self._physics_sim_view.create_rigid_body_view(root_prim_path_exprs) + self._root_view = self._physics_sim_view.create_rigid_body_view(root_prim_path_exprs) # check if the rigid body was created - if self._root_physx_view._backend is None: + if self._root_view._backend is None: raise RuntimeError("Failed to create rigid body collection. Please check PhysX logs.") # log information about the rigid body @@ -737,7 +736,7 @@ def _initialize_impl(self): logger.info(f"Body names: {self.body_names}") # container for data access - self._data = RigidObjectCollectionData(self.root_physx_view, self.num_bodies, self.device) + self._data = RigidObjectCollectionData(self.root_view, self.num_bodies, self.device) # create buffers self._create_buffers() @@ -796,7 +795,7 @@ def _process_cfg(self) -> None: def _env_body_ids_to_view_ids( self, env_ids: torch.Tensor, body_ids: torch.Tensor | slice | torch.Tensor ) -> torch.Tensor: - """Converts environment and body indices to indices consistent with data from :attr:`root_physx_view`. + """Converts environment and body indices to indices consistent with data from :attr:`root_view`. Args: env_ids: Environment indices. @@ -848,7 +847,7 @@ def _on_prim_deletion(self, prim_path: str) -> None: def num_objects(self) -> int: """Deprecated property. Please use :attr:`num_bodies` instead.""" logger.warning( - "The `num_objects` property will be deprecated in a future release. Please use `num_bodies` instead." + "The `num_bodies` property will be deprecated in a future release. Please use `num_bodies` instead." ) return self.num_bodies @@ -984,3 +983,13 @@ def write_object_link_velocity_to_sim( " `write_body_link_velocity_to_sim` instead." ) self.write_body_link_velocity_to_sim(object_velocity, env_ids=env_ids, body_ids=object_ids) + + def find_objects( + self, name_keys: str | Sequence[str], preserve_order: bool = False + ) -> tuple[torch.Tensor, list[str], list[int]]: + """Deprecated method. Please use :meth:`find_bodies` instead.""" + logger.warning( + "The `find_objects` method will be deprecated in a future release. Please use" + " `find_bodies` instead." + ) + return self.find_bodies(name_keys, preserve_order) \ No newline at end of file diff --git a/source/isaaclab_physx/isaaclab_physx/assets/rigid_object_collection/rigid_object_collection_data.py b/source/isaaclab_physx/isaaclab_physx/assets/rigid_object_collection/rigid_object_collection_data.py index df3b6432fea..9b51d65b72f 100644 --- a/source/isaaclab_physx/isaaclab_physx/assets/rigid_object_collection/rigid_object_collection_data.py +++ b/source/isaaclab_physx/isaaclab_physx/assets/rigid_object_collection/rigid_object_collection_data.py @@ -78,6 +78,8 @@ def __init__(self, root_view: physx.RigidBodyView, num_bodies: int, device: str) self.num_instances, self.num_bodies, 1 ) + self._create_buffers() + @property def is_primed(self) -> bool: """Whether the rigid object collection data is fully instantiated and ready to use.""" @@ -125,7 +127,7 @@ def default_body_pose(self) -> torch.Tensor: The position and quaternion are of the rigid body's actor frame. """ - return self._default_root_pose + return self._default_body_pose @default_body_pose.setter def default_body_pose(self, value: torch.Tensor) -> None: @@ -172,7 +174,7 @@ def default_body_state(self) -> torch.Tensor: of the center of mass frame. """ logger.warning("Reading the body state directly is deprecated since IsaacLab 3.0 and will be removed in a future version. Please use the default_body_pose and default_body_vel properties instead.") - return torch.cat([self.default_body_pose, self.default_body_vel], dim=1) + return torch.cat([self.default_body_pose, self.default_body_vel], dim=-1) @default_body_state.setter def default_body_state(self, value: torch.Tensor) -> None: @@ -340,12 +342,12 @@ def body_com_pose_b(self) -> torch.Tensor: @property def body_mass(self) -> torch.Tensor: """Mass of all bodies in the simulation world frame. Shape is (num_instances, 1, 1).""" - return self._body_mass + return self._reshape_view_to_data(self._body_mass) @property def body_inertia(self) -> torch.Tensor: """Inertia of all bodies in the simulation world frame. Shape is (num_instances, 1, 3, 3).""" - return self._body_inertia + return self._reshape_view_to_data(self._body_inertia) """ Derived Properties. @@ -532,6 +534,33 @@ def _create_buffers(self) -> None: Properties for backwards compatibility. """ + @property + def default_object_pose(self) -> torch.Tensor: + """Deprecated property. Please use :attr:`default_body_pose` instead.""" + logger.warning( + "The `default_object_pose` property will be deprecated in a future release. Please use" + " `default_body_pose` instead." + ) + return self.default_body_pose + + @property + def default_object_vel(self) -> torch.Tensor: + """Deprecated property. Please use :attr:`default_body_vel` instead.""" + logger.warning( + "The `default_object_vel` property will be deprecated in a future release. Please use" + " `default_body_vel` instead." + ) + return self.default_body_vel + + @property + def default_object_state(self) -> torch.Tensor: + """Deprecated property. Please use :attr:`default_body_state` instead.""" + logger.warning( + "The `default_object_state` property will be deprecated in a future release. Please use" + " `default_body_state` instead." + ) + return self.default_body_state + @property def object_link_pose_w(self): """Deprecated property. Please use :attr:`body_link_pose_w` instead.""" diff --git a/source/isaaclab/test/assets/test_articulation.py b/source/isaaclab_physx/test/assets/test_articulation.py similarity index 98% rename from source/isaaclab/test/assets/test_articulation.py rename to source/isaaclab_physx/test/assets/test_articulation.py index 9a983ab34c1..12bdd5c0651 100644 --- a/source/isaaclab/test/assets/test_articulation.py +++ b/source/isaaclab_physx/test/assets/test_articulation.py @@ -26,7 +26,8 @@ import isaaclab.utils.math as math_utils import isaaclab.utils.string as string_utils from isaaclab.actuators import ActuatorBase, IdealPDActuatorCfg, ImplicitActuatorCfg -from isaaclab.assets import Articulation, ArticulationCfg +from isaaclab_physx.assets import Articulation +from isaaclab.assets import ArticulationCfg from isaaclab.envs.mdp.terminations import joint_effort_out_of_limit from isaaclab.managers import SceneEntityCfg from isaaclab.sim import build_simulation_context @@ -290,8 +291,8 @@ def test_initialization_floating_base(sim, num_articulations, device, add_ground assert articulation.data.root_pos_w.shape == (num_articulations, 3) assert articulation.data.root_quat_w.shape == (num_articulations, 4) assert articulation.data.joint_pos.shape == (num_articulations, 12) - assert articulation.data.default_mass.shape == (num_articulations, articulation.num_bodies) - assert articulation.data.default_inertia.shape == (num_articulations, articulation.num_bodies, 9) + assert articulation.data.body_mass.shape == (num_articulations, articulation.num_bodies) + assert articulation.data.body_inertia.shape == (num_articulations, articulation.num_bodies, 9) # Check some internal physx data for debugging # -- joint related @@ -347,8 +348,8 @@ def test_initialization_fixed_base(sim, num_articulations, device): assert articulation.data.root_pos_w.shape == (num_articulations, 3) assert articulation.data.root_quat_w.shape == (num_articulations, 4) assert articulation.data.joint_pos.shape == (num_articulations, 9) - assert articulation.data.default_mass.shape == (num_articulations, articulation.num_bodies) - assert articulation.data.default_inertia.shape == (num_articulations, articulation.num_bodies, 9) + assert articulation.data.body_mass.shape == (num_articulations, articulation.num_bodies) + assert articulation.data.body_inertia.shape == (num_articulations, articulation.num_bodies, 9) # Check some internal physx data for debugging # -- joint related @@ -411,8 +412,8 @@ def test_initialization_fixed_base_single_joint(sim, num_articulations, device, assert articulation.data.root_pos_w.shape == (num_articulations, 3) assert articulation.data.root_quat_w.shape == (num_articulations, 4) assert articulation.data.joint_pos.shape == (num_articulations, 1) - assert articulation.data.default_mass.shape == (num_articulations, articulation.num_bodies) - assert articulation.data.default_inertia.shape == (num_articulations, articulation.num_bodies, 9) + assert articulation.data.body_mass.shape == (num_articulations, articulation.num_bodies) + assert articulation.data.body_inertia.shape == (num_articulations, articulation.num_bodies, 9) # Check some internal physx data for debugging # -- joint related @@ -474,8 +475,8 @@ def test_initialization_hand_with_tendons(sim, num_articulations, device): assert articulation.data.root_pos_w.shape == (num_articulations, 3) assert articulation.data.root_quat_w.shape == (num_articulations, 4) assert articulation.data.joint_pos.shape == (num_articulations, 24) - assert articulation.data.default_mass.shape == (num_articulations, articulation.num_bodies) - assert articulation.data.default_inertia.shape == (num_articulations, articulation.num_bodies, 9) + assert articulation.data.body_mass.shape == (num_articulations, articulation.num_bodies) + assert articulation.data.body_inertia.shape == (num_articulations, articulation.num_bodies, 9) # Check some internal physx data for debugging # -- joint related @@ -1860,7 +1861,7 @@ def test_body_incoming_joint_wrench_b_single_joint(sim, num_articulations, devic assert articulation.data.body_incoming_joint_wrench_b.shape == (num_articulations, articulation.num_bodies, 6) # calculate expected static - mass = articulation.data.default_mass + mass = articulation.data.body_mass.to("cpu") pos_w = articulation.data.body_pos_w quat_w = articulation.data.body_quat_w @@ -2050,8 +2051,8 @@ def test_spatial_tendons(sim, num_articulations, device): assert articulation.data.root_pos_w.shape == (num_articulations, 3) assert articulation.data.root_quat_w.shape == (num_articulations, 4) assert articulation.data.joint_pos.shape == (num_articulations, 3) - assert articulation.data.default_mass.shape == (num_articulations, articulation.num_bodies) - assert articulation.data.default_inertia.shape == (num_articulations, articulation.num_bodies, 9) + assert articulation.data.body_mass.shape == (num_articulations, articulation.num_bodies) + assert articulation.data.body_inertia.shape == (num_articulations, articulation.num_bodies, 9) assert articulation.num_spatial_tendons == 1 articulation.set_spatial_tendon_stiffness(torch.tensor([10.0])) diff --git a/source/isaaclab/test/assets/test_deformable_object.py b/source/isaaclab_physx/test/assets/test_deformable_object.py similarity index 100% rename from source/isaaclab/test/assets/test_deformable_object.py rename to source/isaaclab_physx/test/assets/test_deformable_object.py diff --git a/source/isaaclab/test/assets/test_rigid_object.py b/source/isaaclab_physx/test/assets/test_rigid_object.py similarity index 99% rename from source/isaaclab/test/assets/test_rigid_object.py rename to source/isaaclab_physx/test/assets/test_rigid_object.py index 8de5361e29f..cfe81175033 100644 --- a/source/isaaclab/test/assets/test_rigid_object.py +++ b/source/isaaclab_physx/test/assets/test_rigid_object.py @@ -24,7 +24,8 @@ from flaky import flaky import isaaclab.sim as sim_utils -from isaaclab.assets import RigidObject, RigidObjectCfg +from isaaclab_physx.assets import RigidObject +from isaaclab.assets import RigidObjectCfg from isaaclab.sim import build_simulation_context from isaaclab.sim.spawners import materials from isaaclab.utils.assets import ISAAC_NUCLEUS_DIR, ISAACLAB_NUCLEUS_DIR @@ -118,8 +119,8 @@ def test_initialization(num_cubes, device): # Check buffers that exists and have correct shapes assert cube_object.data.root_pos_w.shape == (num_cubes, 3) assert cube_object.data.root_quat_w.shape == (num_cubes, 4) - assert cube_object.data.default_mass.shape == (num_cubes, 1) - assert cube_object.data.default_inertia.shape == (num_cubes, 9) + assert cube_object.data.body_mass.shape == (num_cubes, 1) + assert cube_object.data.body_inertia.shape == (num_cubes, 9) # Simulate physics for _ in range(2): diff --git a/source/isaaclab/test/assets/test_rigid_object_collection.py b/source/isaaclab_physx/test/assets/test_rigid_object_collection.py similarity index 98% rename from source/isaaclab/test/assets/test_rigid_object_collection.py rename to source/isaaclab_physx/test/assets/test_rigid_object_collection.py index 8d919bf4d7a..5fbc82cc0dd 100644 --- a/source/isaaclab/test/assets/test_rigid_object_collection.py +++ b/source/isaaclab_physx/test/assets/test_rigid_object_collection.py @@ -22,7 +22,8 @@ import torch import isaaclab.sim as sim_utils -from isaaclab.assets import RigidObjectCfg, RigidObjectCollection, RigidObjectCollectionCfg +from isaaclab_physx.assets import RigidObjectCollection +from isaaclab.assets import RigidObjectCollectionCfg, RigidObjectCfg from isaaclab.sim import build_simulation_context from isaaclab.utils.assets import ISAAC_NUCLEUS_DIR from isaaclab.utils.math import ( @@ -126,8 +127,8 @@ def test_initialization(sim, num_envs, num_cubes, device): # Check buffers that exist and have correct shapes assert object_collection.data.object_link_pos_w.shape == (num_envs, num_cubes, 3) assert object_collection.data.object_link_quat_w.shape == (num_envs, num_cubes, 4) - assert object_collection.data.default_mass.shape == (num_envs, num_cubes, 1) - assert object_collection.data.default_inertia.shape == (num_envs, num_cubes, 9) + assert object_collection.data.body_mass.shape == (num_envs, num_cubes, 1) + assert object_collection.data.body_inertia.shape == (num_envs, num_cubes, 9) # Simulate physics for _ in range(2): @@ -150,20 +151,20 @@ def test_id_conversion(sim, device): torch.tensor([1, 3, 5], device=device, dtype=torch.long), ] - view_ids = object_collection._env_obj_ids_to_view_ids( - object_collection._ALL_ENV_INDICES, object_collection._ALL_OBJ_INDICES[None, 2] + view_ids = object_collection._env_body_ids_to_view_ids( + object_collection._ALL_ENV_INDICES, object_collection._ALL_BODY_INDICES[None, 2] ) assert (view_ids == expected[0]).all() - view_ids = object_collection._env_obj_ids_to_view_ids( - object_collection._ALL_ENV_INDICES[None, 0], object_collection._ALL_OBJ_INDICES[None, 2] + view_ids = object_collection._env_body_ids_to_view_ids( + object_collection._ALL_ENV_INDICES[None, 0], object_collection._ALL_BODY_INDICES[None, 2] ) assert (view_ids == expected[1]).all() - view_ids = object_collection._env_obj_ids_to_view_ids( - object_collection._ALL_ENV_INDICES[None, 0], object_collection._ALL_OBJ_INDICES + view_ids = object_collection._env_body_ids_to_view_ids( + object_collection._ALL_ENV_INDICES[None, 0], object_collection._ALL_BODY_INDICES ) assert (view_ids == expected[2]).all() - view_ids = object_collection._env_obj_ids_to_view_ids( - object_collection._ALL_ENV_INDICES[None, 1], object_collection._ALL_OBJ_INDICES + view_ids = object_collection._env_body_ids_to_view_ids( + object_collection._ALL_ENV_INDICES[None, 1], object_collection._ALL_BODY_INDICES ) assert (view_ids == expected[3]).all() @@ -281,7 +282,7 @@ def test_external_force_on_single_body(sim, num_envs, num_cubes, device): # Sample a force equal to the weight of the object external_wrench_b = torch.zeros(object_collection.num_instances, len(object_ids), 6, device=sim.device) # Every 2nd cube should have a force applied to it - external_wrench_b[:, 0::2, 2] = 9.81 * object_collection.data.default_mass[:, 0::2, 0] + external_wrench_b[:, 0::2, 2] = 9.81 * object_collection.data.body_mass[:, 0::2, 0] for i in range(5): # reset object state diff --git a/source/isaaclab/test/assets/test_surface_gripper.py b/source/isaaclab_physx/test/assets/test_surface_gripper.py similarity index 100% rename from source/isaaclab/test/assets/test_surface_gripper.py rename to source/isaaclab_physx/test/assets/test_surface_gripper.py From 39be13677d7f0fb82bb41a736b335f81db96df54 Mon Sep 17 00:00:00 2001 From: Antoine Richard Date: Wed, 28 Jan 2026 09:53:58 +0100 Subject: [PATCH 06/38] Doc + test update --- docs/conf.py | 2 + docs/source/api/index.rst | 12 + docs/source/api/lab/isaaclab.assets.rst | 25 +- docs/source/api/lab_physx/isaaclab.assets.rst | 131 +++++++++ .../migration/migrating_to_isaaclab_3-0.rst | 253 ++++++++++++++++++ source/isaaclab/docs/CHANGELOG.rst | 69 +++++ .../assets/rigid_object/base_rigid_object.py | 2 +- .../base_rigid_object_collection.py | 2 +- source/isaaclab_physx/docs/CHANGELOG.rst | 85 +++++- .../deformable_object/deformable_object.py | 34 ++- .../deformable_object_data.py | 28 +- .../test/assets/test_articulation.py | 66 ++--- .../test/assets/test_deformable_object.py | 4 +- .../test/assets/test_rigid_object.py | 40 +-- .../assets/test_rigid_object_collection.py | 22 +- 15 files changed, 654 insertions(+), 121 deletions(-) create mode 100644 docs/source/api/lab_physx/isaaclab.assets.rst create mode 100644 docs/source/migration/migrating_to_isaaclab_3-0.rst diff --git a/docs/conf.py b/docs/conf.py index 248e14c3f89..676b9307399 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -24,6 +24,8 @@ sys.path.insert(0, os.path.abspath("../source/isaaclab_assets/isaaclab_assets")) sys.path.insert(0, os.path.abspath("../source/isaaclab_tasks")) sys.path.insert(0, os.path.abspath("../source/isaaclab_tasks/isaaclab_tasks")) +sys.path.insert(0, os.path.abspath("../source/isaaclab_physx")) +sys.path.insert(0, os.path.abspath("../source/isaaclab_physx/isaaclab_physx")) sys.path.insert(0, os.path.abspath("../source/isaaclab_rl")) sys.path.insert(0, os.path.abspath("../source/isaaclab_rl/isaaclab_rl")) sys.path.insert(0, os.path.abspath("../source/isaaclab_mimic")) diff --git a/docs/source/api/index.rst b/docs/source/api/index.rst index 6e0457d93e4..3d0e151cde5 100644 --- a/docs/source/api/index.rst +++ b/docs/source/api/index.rst @@ -94,3 +94,15 @@ It includes the following modules: :toctree: lab_tasks utils + +isaaclab_physx extension +------------------------ + +The following modules are available in the ``isaaclab_physx`` extension: + +.. currentmodule:: isaaclab_physx + +.. autosummary:: + :toctree: lab_physx + + assets \ No newline at end of file diff --git a/docs/source/api/lab/isaaclab.assets.rst b/docs/source/api/lab/isaaclab.assets.rst index c91066966e8..5d8e6699b4d 100644 --- a/docs/source/api/lab/isaaclab.assets.rst +++ b/docs/source/api/lab/isaaclab.assets.rst @@ -18,9 +18,6 @@ Articulation ArticulationData ArticulationCfg - DeformableObject - DeformableObjectData - DeformableObjectCfg .. currentmodule:: isaaclab.assets @@ -92,24 +89,4 @@ Articulation :members: :inherited-members: :show-inheritance: - :exclude-members: __init__, class_type - -Deformable Object ------------------ - -.. autoclass:: DeformableObject - :members: - :inherited-members: - :show-inheritance: - -.. autoclass:: DeformableObjectData - :members: - :inherited-members: - :show-inheritance: - :exclude-members: __init__ - -.. autoclass:: DeformableObjectCfg - :members: - :inherited-members: - :show-inheritance: - :exclude-members: __init__, class_type + :exclude-members: __init__, class_type \ No newline at end of file diff --git a/docs/source/api/lab_physx/isaaclab.assets.rst b/docs/source/api/lab_physx/isaaclab.assets.rst new file mode 100644 index 00000000000..dd5ee6148b7 --- /dev/null +++ b/docs/source/api/lab_physx/isaaclab.assets.rst @@ -0,0 +1,131 @@ +isaaclab_physx.assets +=============== + +.. automodule:: isaaclab_physx.assets + + .. rubric:: Classes + + .. autosummary:: + + AssetBase + AssetBaseCfg + RigidObject + RigidObjectData + RigidObjectCfg + RigidObjectCollection + RigidObjectCollectionData + RigidObjectCollectionCfg + Articulation + ArticulationData + ArticulationCfg + DeformableObject + DeformableObjectData + DeformableObjectCfg + SurfaceGripper + SurfaceGripperCfg + +.. currentmodule:: isaaclab_physx.assets + +Asset Base +---------- + +.. autoclass:: AssetBase + :members: + +.. autoclass:: AssetBaseCfg + :members: + :exclude-members: __init__, class_type, InitialStateCfg + +Rigid Object +------------ + +.. autoclass:: RigidObject + :members: + :inherited-members: + :show-inheritance: + +.. autoclass:: RigidObjectData + :members: + :inherited-members: + :show-inheritance: + :exclude-members: __init__ + +.. autoclass:: RigidObjectCfg + :members: + :inherited-members: + :show-inheritance: + :exclude-members: __init__, class_type + +Rigid Object Collection +----------------------- + +.. autoclass:: RigidObjectCollection + :members: + :inherited-members: + :show-inheritance: + +.. autoclass:: RigidObjectCollectionData + :members: + :inherited-members: + :show-inheritance: + :exclude-members: __init__ + +.. autoclass:: RigidObjectCollectionCfg + :members: + :inherited-members: + :show-inheritance: + :exclude-members: __init__, class_type + +Articulation +------------ + +.. autoclass:: Articulation + :members: + :inherited-members: + :show-inheritance: + +.. autoclass:: ArticulationData + :members: + :inherited-members: + :show-inheritance: + :exclude-members: __init__ + +.. autoclass:: ArticulationCfg + :members: + :inherited-members: + :show-inheritance: + :exclude-members: __init__, class_type + +Deformable Object +----------------- + +.. autoclass:: DeformableObject + :members: + :inherited-members: + :show-inheritance: + +.. autoclass:: DeformableObjectData + :members: + :inherited-members: + :show-inheritance: + :exclude-members: __init__ + +.. autoclass:: DeformableObjectCfg + :members: + :inherited-members: + :show-inheritance: + :exclude-members: __init__, class_type + +Surface Gripper +--------------- + +.. autoclass:: SurfaceGripper + :members: + :inherited-members: + :show-inheritance: + +.. autoclass:: SurfaceGripperCfg + :members: + :inherited-members: + :show-inheritance: + :exclude-members: __init__, class_type \ No newline at end of file diff --git a/docs/source/migration/migrating_to_isaaclab_3-0.rst b/docs/source/migration/migrating_to_isaaclab_3-0.rst new file mode 100644 index 00000000000..4bc9194cf7f --- /dev/null +++ b/docs/source/migration/migrating_to_isaaclab_3-0.rst @@ -0,0 +1,253 @@ +.. _migrating-to-isaaclab-3-0: + +Migrating to Isaac Lab 3.0 +========================== + +.. currentmodule:: isaaclab + +Isaac Lab 3.0 introduces a multi-backend architecture that separates simulation backend-specific code +from the core Isaac Lab API. This allows for future support of different physics backends while +maintaining a consistent user-facing API. + +This guide covers the main breaking changes and deprecations you need to address when migrating +from Isaac Lab 2.x to Isaac Lab 3.0. + + +New ``isaaclab_physx`` Extension +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +A new extension ``isaaclab_physx`` has been introduced to contain PhysX-specific implementations +of asset classes. The following classes have been moved to this new extension: + ++--------------------------------------------------+--------------------------------------------------+ +| Isaac Lab 2.x | Isaac Lab 3.0 | ++==================================================+==================================================+ +| ``from isaaclab.assets import DeformableObject`` | ``from isaaclab_physx.assets import | +| | DeformableObject`` | ++--------------------------------------------------+--------------------------------------------------+ +| ``from isaaclab.assets import DeformableObjectCfg``| ``from isaaclab_physx.assets import | +| | DeformableObjectCfg`` | ++--------------------------------------------------+--------------------------------------------------+ +| ``from isaaclab.assets import DeformableObjectData``| ``from isaaclab_physx.assets import | +| | DeformableObjectData`` | ++--------------------------------------------------+--------------------------------------------------+ +| ``from isaaclab.assets import SurfaceGripper`` | ``from isaaclab_physx.assets import | +| | SurfaceGripper`` | ++--------------------------------------------------+--------------------------------------------------+ +| ``from isaaclab.assets import SurfaceGripperCfg``| ``from isaaclab_physx.assets import | +| | SurfaceGripperCfg`` | ++--------------------------------------------------+--------------------------------------------------+ + +.. note:: + + The ``isaaclab_physx`` extension is installed automatically with Isaac Lab. No additional + installation steps are required. + + +Unchanged Imports +----------------- + +The following asset classes remain in the ``isaaclab`` package and can still be imported as before: + +- :class:`~isaaclab.assets.Articulation`, :class:`~isaaclab.assets.ArticulationCfg`, :class:`~isaaclab.assets.ArticulationData` +- :class:`~isaaclab.assets.RigidObject`, :class:`~isaaclab.assets.RigidObjectCfg`, :class:`~isaaclab.assets.RigidObjectData` +- :class:`~isaaclab.assets.RigidObjectCollection`, :class:`~isaaclab.assets.RigidObjectCollectionCfg`, :class:`~isaaclab.assets.RigidObjectCollectionData` + +These classes now inherit from new abstract base classes but maintain full backward compatibility. + + +``root_physx_view`` Deprecation +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The ``root_physx_view`` property has been deprecated on :class:`~isaaclab.assets.Articulation`, +:class:`~isaaclab.assets.RigidObject`, :class:`~isaaclab.assets.RigidObjectCollection`, and +:class:`~isaaclab_physx.assets.DeformableObject` in favor of the backend-agnostic ``root_view`` property. + ++----------------------------------------------+------------------------------------------+ +| Deprecated (2.x) | New (3.0) | ++==============================================+==========================================+ +| ``articulation.root_physx_view`` | ``articulation.root_view`` | ++----------------------------------------------+------------------------------------------+ +| ``rigid_object.root_physx_view`` | ``rigid_object.root_view`` | ++----------------------------------------------+------------------------------------------+ +| ``rigid_object_collection.root_physx_view`` | ``rigid_object_collection.root_view`` | ++----------------------------------------------+------------------------------------------+ +| ``deformable_object.root_physx_view`` | ``deformable_object.root_view`` | ++----------------------------------------------+------------------------------------------+ + +.. note:: + + The ``root_view`` property returns the same underlying PhysX view object. This rename is part of + the multi-backend architecture to provide a consistent API across different physics backends. + The ``root_physx_view`` property will continue to work but will issue a deprecation warning. + + +RigidObjectCollection API Renaming +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The :class:`~isaaclab_physx.assets.RigidObjectCollection` and +:class:`~isaaclab_physx.assets.RigidObjectCollectionData` classes have undergone an API rename +to provide consistency with other asset classes. The ``object_*`` naming convention has been +deprecated in favor of ``body_*``. + + +Method Renames +-------------- + +The following methods have been renamed. The old methods are deprecated and will be removed in a +future release: + ++------------------------------------------+------------------------------------------+ +| Deprecated (2.x) | New (3.0) | ++==========================================+==========================================+ +| ``write_object_state_to_sim()`` | ``write_body_state_to_sim()`` | ++------------------------------------------+------------------------------------------+ +| ``write_object_link_state_to_sim()`` | ``write_body_link_state_to_sim()`` | ++------------------------------------------+------------------------------------------+ +| ``write_object_pose_to_sim()`` | ``write_body_pose_to_sim()`` | ++------------------------------------------+------------------------------------------+ +| ``write_object_link_pose_to_sim()`` | ``write_body_link_pose_to_sim()`` | ++------------------------------------------+------------------------------------------+ +| ``write_object_com_pose_to_sim()`` | ``write_body_com_pose_to_sim()`` | ++------------------------------------------+------------------------------------------+ +| ``write_object_velocity_to_sim()`` | ``write_body_com_velocity_to_sim()`` | ++------------------------------------------+------------------------------------------+ +| ``write_object_com_velocity_to_sim()`` | ``write_body_com_velocity_to_sim()`` | ++------------------------------------------+------------------------------------------+ +| ``write_object_link_velocity_to_sim()`` | ``write_body_link_velocity_to_sim()`` | ++------------------------------------------+------------------------------------------+ +| ``find_objects()`` | ``find_bodies()`` | ++------------------------------------------+------------------------------------------+ + + +Property Renames (Data Class) +----------------------------- + +The following properties on :class:`~isaaclab_physx.assets.RigidObjectCollectionData` have been +renamed. The old properties are deprecated and will be removed in a future release: + ++------------------------------------------+------------------------------------------+ +| Deprecated (2.x) | New (3.0) | ++==========================================+==========================================+ +| ``default_object_state`` | ``default_body_state`` | ++------------------------------------------+------------------------------------------+ +| ``object_names`` | ``body_names`` | ++------------------------------------------+------------------------------------------+ +| ``object_link_pose_w`` | ``body_link_pose_w`` | ++------------------------------------------+------------------------------------------+ +| ``object_link_vel_w`` | ``body_link_vel_w`` | ++------------------------------------------+------------------------------------------+ +| ``object_com_pose_w`` | ``body_com_pose_w`` | ++------------------------------------------+------------------------------------------+ +| ``object_com_vel_w`` | ``body_com_vel_w`` | ++------------------------------------------+------------------------------------------+ +| ``object_state_w`` | ``body_state_w`` | ++------------------------------------------+------------------------------------------+ +| ``object_link_state_w`` | ``body_link_state_w`` | ++------------------------------------------+------------------------------------------+ +| ``object_com_state_w`` | ``body_com_state_w`` | ++------------------------------------------+------------------------------------------+ +| ``object_com_acc_w`` | ``body_com_acc_w`` | ++------------------------------------------+------------------------------------------+ +| ``object_com_pose_b`` | ``body_com_pose_b`` | ++------------------------------------------+------------------------------------------+ +| ``object_link_pos_w`` | ``body_link_pos_w`` | ++------------------------------------------+------------------------------------------+ +| ``object_link_quat_w`` | ``body_link_quat_w`` | ++------------------------------------------+------------------------------------------+ +| ``object_link_lin_vel_w`` | ``body_link_lin_vel_w`` | ++------------------------------------------+------------------------------------------+ +| ``object_link_ang_vel_w`` | ``body_link_ang_vel_w`` | ++------------------------------------------+------------------------------------------+ +| ``object_com_pos_w`` | ``body_com_pos_w`` | ++------------------------------------------+------------------------------------------+ +| ``object_com_quat_w`` | ``body_com_quat_w`` | ++------------------------------------------+------------------------------------------+ +| ``object_com_lin_vel_w`` | ``body_com_lin_vel_w`` | ++------------------------------------------+------------------------------------------+ +| ``object_com_ang_vel_w`` | ``body_com_ang_vel_w`` | ++------------------------------------------+------------------------------------------+ +| ``object_com_lin_acc_w`` | ``body_com_lin_acc_w`` | ++------------------------------------------+------------------------------------------+ +| ``object_com_ang_acc_w`` | ``body_com_ang_acc_w`` | ++------------------------------------------+------------------------------------------+ +| ``object_com_pos_b`` | ``body_com_pos_b`` | ++------------------------------------------+------------------------------------------+ +| ``object_com_quat_b`` | ``body_com_quat_b`` | ++------------------------------------------+------------------------------------------+ +| ``object_link_lin_vel_b`` | ``body_link_lin_vel_b`` | ++------------------------------------------+------------------------------------------+ +| ``object_link_ang_vel_b`` | ``body_link_ang_vel_b`` | ++------------------------------------------+------------------------------------------+ +| ``object_com_lin_vel_b`` | ``body_com_lin_vel_b`` | ++------------------------------------------+------------------------------------------+ +| ``object_com_ang_vel_b`` | ``body_com_ang_vel_b`` | ++------------------------------------------+------------------------------------------+ +| ``object_pose_w`` | ``body_pose_w`` | ++------------------------------------------+------------------------------------------+ +| ``object_pos_w`` | ``body_pos_w`` | ++------------------------------------------+------------------------------------------+ +| ``object_quat_w`` | ``body_quat_w`` | ++------------------------------------------+------------------------------------------+ +| ``object_vel_w`` | ``body_vel_w`` | ++------------------------------------------+------------------------------------------+ +| ``object_lin_vel_w`` | ``body_lin_vel_w`` | ++------------------------------------------+------------------------------------------+ +| ``object_ang_vel_w`` | ``body_ang_vel_w`` | ++------------------------------------------+------------------------------------------+ +| ``object_lin_vel_b`` | ``body_lin_vel_b`` | ++------------------------------------------+------------------------------------------+ +| ``object_ang_vel_b`` | ``body_ang_vel_b`` | ++------------------------------------------+------------------------------------------+ +| ``object_acc_w`` | ``body_acc_w`` | ++------------------------------------------+------------------------------------------+ +| ``object_lin_acc_w`` | ``body_lin_acc_w`` | ++------------------------------------------+------------------------------------------+ +| ``object_ang_acc_w`` | ``body_ang_acc_w`` | ++------------------------------------------+------------------------------------------+ + +.. note:: + + All deprecated methods and properties will issue a deprecation warning when used. Your existing + code will continue to work, but you should migrate to the new API to avoid issues in future releases. + + +Migration Example +~~~~~~~~~~~~~~~~~ + +Here's a complete example showing how to update your code: + +**Before (Isaac Lab 2.x):** + +.. code-block:: python + + from isaaclab.assets import DeformableObject, DeformableObjectCfg + from isaaclab.assets import SurfaceGripper, SurfaceGripperCfg + from isaaclab.assets import RigidObjectCollection + + # Using deprecated root_physx_view + robot = scene["robot"] + masses = robot.root_physx_view.get_masses() + + # Using deprecated object_* API + collection = scene["object_collection"] + poses = collection.data.object_pose_w + collection.write_object_state_to_sim(state, env_ids=env_ids, object_ids=object_ids) + +**After (Isaac Lab 3.0):** + +.. code-block:: python + + from isaaclab_physx.assets import DeformableObject, DeformableObjectCfg + from isaaclab_physx.assets import SurfaceGripper, SurfaceGripperCfg + from isaaclab.assets import RigidObjectCollection # unchanged + + # Using new root_view property + robot = scene["robot"] + masses = robot.root_view.get_masses() + + # Using new body_* API + collection = scene["object_collection"] + poses = collection.data.body_pose_w + collection.write_body_state_to_sim(state, env_ids=env_ids, body_ids=object_ids) + diff --git a/source/isaaclab/docs/CHANGELOG.rst b/source/isaaclab/docs/CHANGELOG.rst index 407f4698188..ff56f74d069 100644 --- a/source/isaaclab/docs/CHANGELOG.rst +++ b/source/isaaclab/docs/CHANGELOG.rst @@ -1,6 +1,75 @@ Changelog --------- +0.54.1 (2026-01-28) +~~~~~~~~~~~~~~~~~~~ + +Added +^^^^^ + +* Added :class:`~isaaclab.assets.BaseArticulation` and :class:`~isaaclab.assets.BaseArticulationData` + abstract base classes that define the interface for articulation assets. These classes provide + a backend-agnostic API for articulation operations. +* Added :class:`~isaaclab.assets.BaseRigidObject` and :class:`~isaaclab.assets.BaseRigidObjectData` + abstract base classes that define the interface for rigid object assets. These classes provide + a backend-agnostic API for rigid object operations. +* Added :class:`~isaaclab.assets.BaseRigidObjectCollection` and :class:`~isaaclab.assets.BaseRigidObjectCollectionData` + abstract base classes that define the interface for rigid object collection assets. These classes + provide a backend-agnostic API for managing collections of rigid objects. +* Added :mod:`~isaaclab.utils.backend_utils` module with utilities for managing simulation backends. + +Changed +^^^^^^^ + +* Refactored the asset classes to follow a multi-backend architecture. The core :mod:`isaaclab.assets` + module now provides abstract base classes that define the interface, while backend-specific + implementations are provided in separate packages (e.g., ``isaaclab_physx``). +* The concrete :class:`~isaaclab.assets.Articulation`, :class:`~isaaclab.assets.RigidObject`, + and :class:`~isaaclab.assets.RigidObjectCollection` classes in the ``isaaclab`` package + now inherit from their respective base classes, and using the backend-specific implementations provided + in the ``isaaclab_physx`` package, provide the default PhysX-based implementation. +* Moved :class:`DeformableObject`, :class:`DeformableObjectCfg`, and :class:`DeformableObjectData` + to the ``isaaclab_physx`` package since deformable bodies are specific to PhysX simulation. +* Moved :class:`SurfaceGripper` and :class:`SurfaceGripperCfg` to the ``isaaclab_physx`` package + since surface grippers rely on PhysX-specific contact APIs. + +Deprecated +^^^^^^^^^^ + +* Deprecated the ``root_physx_view`` property on :class:`~isaaclab.assets.Articulation`, + :class:`~isaaclab.assets.RigidObject`, and :class:`~isaaclab.assets.RigidObjectCollection` + in favor of the backend-agnostic ``root_view`` property. + +* Deprecated the ``object_*`` naming convention in :class:`~isaaclab.assets.RigidObjectCollection` + and :class:`~isaaclab.assets.RigidObjectCollectionData` in favor of ``body_*``: + + **RigidObjectCollection methods:** + + * ``write_object_state_to_sim()`` → use ``write_body_state_to_sim()`` + * ``write_object_link_state_to_sim()`` → use ``write_body_link_state_to_sim()`` + * ``write_object_pose_to_sim()`` → use ``write_body_pose_to_sim()`` + * ``write_object_link_pose_to_sim()`` → use ``write_body_link_pose_to_sim()`` + * ``write_object_com_pose_to_sim()`` → use ``write_body_com_pose_to_sim()`` + * ``write_object_velocity_to_sim()`` → use ``write_body_com_velocity_to_sim()`` + * ``write_object_com_velocity_to_sim()`` → use ``write_body_com_velocity_to_sim()`` + * ``write_object_link_velocity_to_sim()`` → use ``write_body_link_velocity_to_sim()`` + * ``find_objects()`` → use ``find_bodies()`` + + **RigidObjectCollectionData properties:** + + * ``default_object_state`` → use ``default_body_state`` + * ``object_names`` → use ``body_names`` + * ``object_pose_w``, ``object_pos_w``, ``object_quat_w`` → use ``body_pose_w``, ``body_pos_w``, ``body_quat_w`` + * ``object_vel_w``, ``object_lin_vel_w``, ``object_ang_vel_w`` → use ``body_vel_w``, ``body_lin_vel_w``, ``body_ang_vel_w`` + * ``object_acc_w``, ``object_lin_acc_w``, ``object_ang_acc_w`` → use ``body_acc_w``, ``body_lin_acc_w``, ``body_ang_acc_w`` + * And all other ``object_*`` properties (see :ref:`migrating-to-isaaclab-3-0` for complete list). + +Migration +^^^^^^^^^ + +* See :ref:`migrating-to-isaaclab-3-0` for detailed migration instructions. + + 0.54.0 (2026-01-13) ~~~~~~~~~~~~~~~~~~~ diff --git a/source/isaaclab/isaaclab/assets/rigid_object/base_rigid_object.py b/source/isaaclab/isaaclab/assets/rigid_object/base_rigid_object.py index 7519e2f6c7e..c6268deea65 100644 --- a/source/isaaclab/isaaclab/assets/rigid_object/base_rigid_object.py +++ b/source/isaaclab/isaaclab/assets/rigid_object/base_rigid_object.py @@ -28,7 +28,7 @@ class BaseRigidObject(AssetBase): For an asset to be considered a rigid object, the root prim of the asset must have the `USD RigidBodyAPI`_ applied to it. This API is used to define the simulation properties of the rigid body. On playing the simulation, the physics engine will automatically register the rigid body and create a corresponding - rigid body handle. This handle can be accessed using the :attr:`root_physx_view` attribute. + rigid body handle. This handle can be accessed using the :attr:`root_view` attribute. .. note:: diff --git a/source/isaaclab/isaaclab/assets/rigid_object_collection/base_rigid_object_collection.py b/source/isaaclab/isaaclab/assets/rigid_object_collection/base_rigid_object_collection.py index f32858a0296..55246ed5625 100644 --- a/source/isaaclab/isaaclab/assets/rigid_object_collection/base_rigid_object_collection.py +++ b/source/isaaclab/isaaclab/assets/rigid_object_collection/base_rigid_object_collection.py @@ -28,7 +28,7 @@ class BaseRigidObjectCollection(AssetBase): For each rigid body in the collection, the root prim of the asset must have the `USD RigidBodyAPI`_ applied to it. This API is used to define the simulation properties of the rigid bodies. On playing the simulation, the physics engine will automatically register the rigid bodies and create a corresponding - rigid body handle. This handle can be accessed using the :attr:`root_physx_view` attribute. + rigid body handle. This handle can be accessed using the :attr:`root_view` attribute. Rigid objects in the collection are uniquely identified via the key of the dictionary :attr:`~isaaclab.assets.RigidObjectCollectionCfg.rigid_objects` in the diff --git a/source/isaaclab_physx/docs/CHANGELOG.rst b/source/isaaclab_physx/docs/CHANGELOG.rst index 2539cd9655d..82f53ffd249 100644 --- a/source/isaaclab_physx/docs/CHANGELOG.rst +++ b/source/isaaclab_physx/docs/CHANGELOG.rst @@ -1,10 +1,91 @@ Changelog --------- -0.1.0 (2026-01-26) +0.1.0 (2026-01-28) ~~~~~~~~~~~~~~~~~~~ +This is the initial release of the ``isaaclab_physx`` extension, which provides PhysX-specific +implementations of Isaac Lab asset classes. This extension enables a multi-backend architecture +where simulation backend-specific code is separated from the core Isaac Lab API. + Added ^^^^^ -* Added PhysX simulation interfaces for IsaacLab core package. \ No newline at end of file +* Added :mod:`isaaclab_physx.assets` module containing PhysX-specific asset implementations: + + * :class:`~isaaclab_physx.assets.Articulation` and :class:`~isaaclab_physx.assets.ArticulationData`: + PhysX-specific implementation for articulated rigid body systems (e.g., robots). Extends + :class:`~isaaclab.assets.BaseArticulation` with PhysX tensor API integration for efficient + GPU-accelerated simulation of multi-joint systems. + + * :class:`~isaaclab_physx.assets.RigidObject` and :class:`~isaaclab_physx.assets.RigidObjectData`: + PhysX-specific implementation for single rigid body assets. Extends + :class:`~isaaclab.assets.BaseRigidObject` with PhysX tensor API for efficient rigid body + state queries and writes. + + * :class:`~isaaclab_physx.assets.RigidObjectCollection` and :class:`~isaaclab_physx.assets.RigidObjectCollectionData`: + PhysX-specific implementation for collections of rigid objects. Extends + :class:`~isaaclab.assets.BaseRigidObjectCollection` with batched ``(env_ids, object_ids)`` + API for efficient multi-object state management. + + * :class:`~isaaclab_physx.assets.DeformableObject`, :class:`~isaaclab_physx.assets.DeformableObjectCfg`, + and :class:`~isaaclab_physx.assets.DeformableObjectData`: PhysX-specific implementation for + soft body simulation using finite element methods (FEM). Moved from ``isaaclab.assets``. + + * :class:`~isaaclab_physx.assets.SurfaceGripper` and :class:`~isaaclab_physx.assets.SurfaceGripperCfg`: + PhysX-specific implementation for surface gripper simulation using contact APIs. Moved from + ``isaaclab.assets``. + +* Added backward-compatible wrapper methods in :class:`~isaaclab_physx.assets.RigidObjectCollection` + and :class:`~isaaclab_physx.assets.RigidObjectCollectionData` that delegate to the new + ``body_*`` naming convention. + +Deprecated +^^^^^^^^^^ + +* Deprecated the ``root_physx_view`` property on :class:`~isaaclab_physx.assets.Articulation`, + :class:`~isaaclab_physx.assets.RigidObject`, :class:`~isaaclab_physx.assets.RigidObjectCollection`, + and :class:`~isaaclab_physx.assets.DeformableObject` in favor of the ``root_view`` property. + The ``root_physx_view`` property will be removed in a future release. + +* Deprecated the ``object_*`` naming convention in :class:`~isaaclab_physx.assets.RigidObjectCollection` + and :class:`~isaaclab_physx.assets.RigidObjectCollectionData` in favor of ``body_*``. The following + methods and properties are deprecated and will be removed in a future release: + + **RigidObjectCollection methods:** + + * ``write_object_state_to_sim()`` → use ``write_body_state_to_sim()`` + * ``write_object_link_state_to_sim()`` → use ``write_body_link_state_to_sim()`` + * ``write_object_pose_to_sim()`` → use ``write_body_pose_to_sim()`` + * ``write_object_link_pose_to_sim()`` → use ``write_body_link_pose_to_sim()`` + * ``write_object_com_pose_to_sim()`` → use ``write_body_com_pose_to_sim()`` + * ``write_object_velocity_to_sim()`` → use ``write_body_com_velocity_to_sim()`` + * ``write_object_com_velocity_to_sim()`` → use ``write_body_com_velocity_to_sim()`` + * ``write_object_link_velocity_to_sim()`` → use ``write_body_link_velocity_to_sim()`` + * ``find_objects()`` → use ``find_bodies()`` + + **RigidObjectCollectionData properties:** + + * ``default_object_state`` → use ``default_body_state`` + * ``object_names`` → use ``body_names`` + * ``object_link_pose_w`` → use ``body_link_pose_w`` + * ``object_link_vel_w`` → use ``body_link_vel_w`` + * ``object_com_pose_w`` → use ``body_com_pose_w`` + * ``object_com_vel_w`` → use ``body_com_vel_w`` + * ``object_state_w`` → use ``body_state_w`` + * ``object_link_state_w`` → use ``body_link_state_w`` + * ``object_com_state_w`` → use ``body_com_state_w`` + * ``object_com_acc_w`` → use ``body_com_acc_w`` + * ``object_pose_w`` → use ``body_pose_w`` + * ``object_pos_w`` → use ``body_pos_w`` + * ``object_quat_w`` → use ``body_quat_w`` + * ``object_vel_w`` → use ``body_vel_w`` + * ``object_lin_vel_w`` → use ``body_lin_vel_w`` + * ``object_ang_vel_w`` → use ``body_ang_vel_w`` + * ``object_acc_w`` → use ``body_acc_w`` + * And all other ``object_*`` properties (see :ref:`migrating-to-isaaclab-3-0` for complete list). + +Migration +^^^^^^^^^ + +* See :ref:`migrating-to-isaaclab-3-0` for detailed migration instructions. \ No newline at end of file diff --git a/source/isaaclab_physx/isaaclab_physx/assets/deformable_object/deformable_object.py b/source/isaaclab_physx/isaaclab_physx/assets/deformable_object/deformable_object.py index ea03f402a12..5fe4bdf509c 100644 --- a/source/isaaclab_physx/isaaclab_physx/assets/deformable_object/deformable_object.py +++ b/source/isaaclab_physx/isaaclab_physx/assets/deformable_object/deformable_object.py @@ -78,7 +78,7 @@ def data(self) -> DeformableObjectData: @property def num_instances(self) -> int: - return self.root_physx_view.count + return self.root_view.count @property def num_bodies(self) -> int: @@ -89,14 +89,22 @@ def num_bodies(self) -> int: return 1 @property - def root_physx_view(self) -> physx.SoftBodyView: - """Deformable body view for the asset (PhysX). + def root_view(self) -> physx.SoftBodyView: + """Deformable body view for the asset. Note: Use this view with caution. It requires handling of tensors in a specific way. """ return self._root_physx_view + @property + def root_physx_view(self) -> physx.SoftBodyView: + """Deprecated property. Please use :attr:`root_view` instead.""" + logger.warning( + "The `root_physx_view` property will be deprecated in a future release. Please use `root_view` instead." + ) + return self.root_view + @property def material_physx_view(self) -> physx.SoftBodyMaterialView | None: """Deformable material view for the asset (PhysX). @@ -112,22 +120,22 @@ def material_physx_view(self) -> physx.SoftBodyMaterialView | None: @property def max_sim_elements_per_body(self) -> int: """The maximum number of simulation mesh elements per deformable body.""" - return self.root_physx_view.max_sim_elements_per_body + return self.root_view.max_sim_elements_per_body @property def max_collision_elements_per_body(self) -> int: """The maximum number of collision mesh elements per deformable body.""" - return self.root_physx_view.max_elements_per_body + return self.root_view.max_elements_per_body @property def max_sim_vertices_per_body(self) -> int: """The maximum number of simulation mesh vertices per deformable body.""" - return self.root_physx_view.max_sim_vertices_per_body + return self.root_view.max_sim_vertices_per_body @property def max_collision_vertices_per_body(self) -> int: """The maximum number of collision mesh vertices per deformable body.""" - return self.root_physx_view.max_vertices_per_body + return self.root_view.max_vertices_per_body """ Operations. @@ -183,7 +191,7 @@ def write_nodal_pos_to_sim(self, nodal_pos: torch.Tensor, env_ids: Sequence[int] # set into internal buffers self._data.nodal_pos_w[env_ids] = nodal_pos.clone() # set into simulation - self.root_physx_view.set_sim_nodal_positions(self._data.nodal_pos_w, indices=physx_env_ids) + self.root_view.set_sim_nodal_positions(self._data.nodal_pos_w, indices=physx_env_ids) def write_nodal_velocity_to_sim(self, nodal_vel: torch.Tensor, env_ids: Sequence[int] | None = None): """Set the nodal velocity over selected environment indices into the simulation. @@ -206,7 +214,7 @@ def write_nodal_velocity_to_sim(self, nodal_vel: torch.Tensor, env_ids: Sequence # set into internal buffers self._data.nodal_vel_w[env_ids] = nodal_vel.clone() # set into simulation - self.root_physx_view.set_sim_nodal_velocities(self._data.nodal_vel_w, indices=physx_env_ids) + self.root_view.set_sim_nodal_velocities(self._data.nodal_vel_w, indices=physx_env_ids) def write_nodal_kinematic_target_to_sim(self, targets: torch.Tensor, env_ids: Sequence[int] | None = None): """Set the kinematic targets of the simulation mesh for the deformable bodies indicated by the indices. @@ -231,7 +239,7 @@ def write_nodal_kinematic_target_to_sim(self, targets: torch.Tensor, env_ids: Se # store into internal buffers self._data.nodal_kinematic_target[env_ids] = targets.clone() # set into simulation - self.root_physx_view.set_sim_kinematic_targets(self._data.nodal_kinematic_target, indices=physx_env_ids) + self.root_view.set_sim_kinematic_targets(self._data.nodal_kinematic_target, indices=physx_env_ids) """ Operations - Helper. @@ -358,7 +366,7 @@ def _initialize_impl(self): logger.info("No deformable material found. Material properties will be set to default values.") # container for data access - self._data = DeformableObjectData(self.root_physx_view, self.device) + self._data = DeformableObjectData(self.root_view, self.device) # create buffers self._create_buffers() @@ -378,12 +386,12 @@ def _create_buffers(self): # default state # we use the initial nodal positions at spawn time as the default state # note: these are all in the simulation frame - nodal_positions = self.root_physx_view.get_sim_nodal_positions() + nodal_positions = self.root_view.get_sim_nodal_positions() nodal_velocities = torch.zeros_like(nodal_positions) self._data.default_nodal_state_w = torch.cat((nodal_positions, nodal_velocities), dim=-1) # kinematic targets - self._data.nodal_kinematic_target = self.root_physx_view.get_sim_kinematic_targets() + self._data.nodal_kinematic_target = self.root_view.get_sim_kinematic_targets() # set all nodes as non-kinematic targets by default self._data.nodal_kinematic_target[..., -1] = 1.0 diff --git a/source/isaaclab_physx/isaaclab_physx/assets/deformable_object/deformable_object_data.py b/source/isaaclab_physx/isaaclab_physx/assets/deformable_object/deformable_object_data.py index bb7714383e0..a68a88e67c4 100644 --- a/source/isaaclab_physx/isaaclab_physx/assets/deformable_object/deformable_object_data.py +++ b/source/isaaclab_physx/isaaclab_physx/assets/deformable_object/deformable_object_data.py @@ -33,11 +33,11 @@ class DeformableObjectData: is older than the current simulation timestamp. The timestamp is updated whenever the data is updated. """ - def __init__(self, root_physx_view: physx.SoftBodyView, device: str): + def __init__(self, root_view: physx.SoftBodyView, device: str): """Initializes the deformable object data. Args: - root_physx_view: The root deformable body view of the object. + root_view: The root deformable body view of the object. device: The device used for processing. """ # Set the parameters @@ -45,7 +45,7 @@ def __init__(self, root_physx_view: physx.SoftBodyView, device: str): # Set the root deformable body view # note: this is stored as a weak reference to avoid circular references between the asset class # and the data container. This is important to avoid memory leaks. - self._root_physx_view: physx.SoftBodyView = weakref.proxy(root_physx_view) + self._root_view: physx.SoftBodyView = weakref.proxy(root_view) # Set initial time stamp self._sim_timestamp = 0.0 @@ -105,7 +105,7 @@ def update(self, dt: float): def nodal_pos_w(self): """Nodal positions in simulation world frame. Shape is (num_instances, max_sim_vertices_per_body, 3).""" if self._nodal_pos_w.timestamp < self._sim_timestamp: - self._nodal_pos_w.data = self._root_physx_view.get_sim_nodal_positions() + self._nodal_pos_w.data = self._root_view.get_sim_nodal_positions() self._nodal_pos_w.timestamp = self._sim_timestamp return self._nodal_pos_w.data @@ -113,7 +113,7 @@ def nodal_pos_w(self): def nodal_vel_w(self): """Nodal velocities in simulation world frame. Shape is (num_instances, max_sim_vertices_per_body, 3).""" if self._nodal_vel_w.timestamp < self._sim_timestamp: - self._nodal_vel_w.data = self._root_physx_view.get_sim_nodal_velocities() + self._nodal_vel_w.data = self._root_view.get_sim_nodal_velocities() self._nodal_vel_w.timestamp = self._sim_timestamp return self._nodal_vel_w.data @@ -136,7 +136,7 @@ def sim_element_quat_w(self): """ if self._sim_element_quat_w.timestamp < self._sim_timestamp: # convert from xyzw to wxyz - quats = self._root_physx_view.get_sim_element_rotations().view(self._root_physx_view.count, -1, 4) + quats = self._root_view.get_sim_element_rotations().view(self._root_view.count, -1, 4) quats = math_utils.convert_quat(quats, to="wxyz") # set the buffer data and timestamp self._sim_element_quat_w.data = quats @@ -152,7 +152,7 @@ def collision_element_quat_w(self): """ if self._collision_element_quat_w.timestamp < self._sim_timestamp: # convert from xyzw to wxyz - quats = self._root_physx_view.get_element_rotations().view(self._root_physx_view.count, -1, 4) + quats = self._root_view.get_element_rotations().view(self._root_view.count, -1, 4) quats = math_utils.convert_quat(quats, to="wxyz") # set the buffer data and timestamp self._collision_element_quat_w.data = quats @@ -167,8 +167,8 @@ def sim_element_deform_gradient_w(self): if self._sim_element_deform_gradient_w.timestamp < self._sim_timestamp: # set the buffer data and timestamp self._sim_element_deform_gradient_w.data = ( - self._root_physx_view.get_sim_element_deformation_gradients().view( - self._root_physx_view.count, -1, 3, 3 + self._root_view.get_sim_element_deformation_gradients().view( + self._root_view.count, -1, 3, 3 ) ) self._sim_element_deform_gradient_w.timestamp = self._sim_timestamp @@ -182,7 +182,7 @@ def collision_element_deform_gradient_w(self): if self._collision_element_deform_gradient_w.timestamp < self._sim_timestamp: # set the buffer data and timestamp self._collision_element_deform_gradient_w.data = ( - self._root_physx_view.get_element_deformation_gradients().view(self._root_physx_view.count, -1, 3, 3) + self._root_view.get_element_deformation_gradients().view(self._root_view.count, -1, 3, 3) ) self._collision_element_deform_gradient_w.timestamp = self._sim_timestamp return self._collision_element_deform_gradient_w.data @@ -194,8 +194,8 @@ def sim_element_stress_w(self): """ if self._sim_element_stress_w.timestamp < self._sim_timestamp: # set the buffer data and timestamp - self._sim_element_stress_w.data = self._root_physx_view.get_sim_element_stresses().view( - self._root_physx_view.count, -1, 3, 3 + self._sim_element_stress_w.data = self._root_view.get_sim_element_stresses().view( + self._root_view.count, -1, 3, 3 ) self._sim_element_stress_w.timestamp = self._sim_timestamp return self._sim_element_stress_w.data @@ -207,8 +207,8 @@ def collision_element_stress_w(self): """ if self._collision_element_stress_w.timestamp < self._sim_timestamp: # set the buffer data and timestamp - self._collision_element_stress_w.data = self._root_physx_view.get_element_stresses().view( - self._root_physx_view.count, -1, 3, 3 + self._collision_element_stress_w.data = self._root_view.get_element_stresses().view( + self._root_view.count, -1, 3, 3 ) self._collision_element_stress_w.timestamp = self._sim_timestamp return self._collision_element_stress_w.data diff --git a/source/isaaclab_physx/test/assets/test_articulation.py b/source/isaaclab_physx/test/assets/test_articulation.py index 12bdd5c0651..f760b7289ce 100644 --- a/source/isaaclab_physx/test/assets/test_articulation.py +++ b/source/isaaclab_physx/test/assets/test_articulation.py @@ -238,11 +238,11 @@ def test_initialization_floating_base_non_root(sim, num_articulations, device, a # Check some internal physx data for debugging # -- joint related - assert articulation.root_physx_view.max_dofs == articulation.root_physx_view.shared_metatype.dof_count + assert articulation.root_view.max_dofs == articulation.root_view.shared_metatype.dof_count # -- link related - assert articulation.root_physx_view.max_links == articulation.root_physx_view.shared_metatype.link_count + assert articulation.root_view.max_links == articulation.root_view.shared_metatype.link_count # -- link names (check within articulation ordering is correct) - prim_path_body_names = [path.split("/")[-1] for path in articulation.root_physx_view.link_paths[0]] + prim_path_body_names = [path.split("/")[-1] for path in articulation.root_view.link_paths[0]] assert prim_path_body_names == articulation.body_names # -- actuator type for actuator_name, actuator in articulation.actuators.items(): @@ -296,11 +296,11 @@ def test_initialization_floating_base(sim, num_articulations, device, add_ground # Check some internal physx data for debugging # -- joint related - assert articulation.root_physx_view.max_dofs == articulation.root_physx_view.shared_metatype.dof_count + assert articulation.root_view.max_dofs == articulation.root_view.shared_metatype.dof_count # -- link related - assert articulation.root_physx_view.max_links == articulation.root_physx_view.shared_metatype.link_count + assert articulation.root_view.max_links == articulation.root_view.shared_metatype.link_count # -- link names (check within articulation ordering is correct) - prim_path_body_names = [path.split("/")[-1] for path in articulation.root_physx_view.link_paths[0]] + prim_path_body_names = [path.split("/")[-1] for path in articulation.root_view.link_paths[0]] assert prim_path_body_names == articulation.body_names # -- actuator type for actuator_name, actuator in articulation.actuators.items(): @@ -353,11 +353,11 @@ def test_initialization_fixed_base(sim, num_articulations, device): # Check some internal physx data for debugging # -- joint related - assert articulation.root_physx_view.max_dofs == articulation.root_physx_view.shared_metatype.dof_count + assert articulation.root_view.max_dofs == articulation.root_view.shared_metatype.dof_count # -- link related - assert articulation.root_physx_view.max_links == articulation.root_physx_view.shared_metatype.link_count + assert articulation.root_view.max_links == articulation.root_view.shared_metatype.link_count # -- link names (check within articulation ordering is correct) - prim_path_body_names = [path.split("/")[-1] for path in articulation.root_physx_view.link_paths[0]] + prim_path_body_names = [path.split("/")[-1] for path in articulation.root_view.link_paths[0]] assert prim_path_body_names == articulation.body_names # -- actuator type for actuator_name, actuator in articulation.actuators.items(): @@ -417,11 +417,11 @@ def test_initialization_fixed_base_single_joint(sim, num_articulations, device, # Check some internal physx data for debugging # -- joint related - assert articulation.root_physx_view.max_dofs == articulation.root_physx_view.shared_metatype.dof_count + assert articulation.root_view.max_dofs == articulation.root_view.shared_metatype.dof_count # -- link related - assert articulation.root_physx_view.max_links == articulation.root_physx_view.shared_metatype.link_count + assert articulation.root_view.max_links == articulation.root_view.shared_metatype.link_count # -- link names (check within articulation ordering is correct) - prim_path_body_names = [path.split("/")[-1] for path in articulation.root_physx_view.link_paths[0]] + prim_path_body_names = [path.split("/")[-1] for path in articulation.root_view.link_paths[0]] assert prim_path_body_names == articulation.body_names # -- actuator type for actuator_name, actuator in articulation.actuators.items(): @@ -480,9 +480,9 @@ def test_initialization_hand_with_tendons(sim, num_articulations, device): # Check some internal physx data for debugging # -- joint related - assert articulation.root_physx_view.max_dofs == articulation.root_physx_view.shared_metatype.dof_count + assert articulation.root_view.max_dofs == articulation.root_view.shared_metatype.dof_count # -- link related - assert articulation.root_physx_view.max_links == articulation.root_physx_view.shared_metatype.link_count + assert articulation.root_view.max_links == articulation.root_view.shared_metatype.link_count # -- actuator type for actuator_name, actuator in articulation.actuators.items(): is_implicit_model_cfg = isinstance(articulation_cfg.actuators[actuator_name], ImplicitActuatorCfg) @@ -534,11 +534,11 @@ def test_initialization_floating_base_made_fixed_base(sim, num_articulations, de # Check some internal physx data for debugging # -- joint related - assert articulation.root_physx_view.max_dofs == articulation.root_physx_view.shared_metatype.dof_count + assert articulation.root_view.max_dofs == articulation.root_view.shared_metatype.dof_count # -- link related - assert articulation.root_physx_view.max_links == articulation.root_physx_view.shared_metatype.link_count + assert articulation.root_view.max_links == articulation.root_view.shared_metatype.link_count # -- link names (check within articulation ordering is correct) - prim_path_body_names = [path.split("/")[-1] for path in articulation.root_physx_view.link_paths[0]] + prim_path_body_names = [path.split("/")[-1] for path in articulation.root_view.link_paths[0]] assert prim_path_body_names == articulation.body_names # Simulate physics @@ -593,11 +593,11 @@ def test_initialization_fixed_base_made_floating_base(sim, num_articulations, de # Check some internal physx data for debugging # -- joint related - assert articulation.root_physx_view.max_dofs == articulation.root_physx_view.shared_metatype.dof_count + assert articulation.root_view.max_dofs == articulation.root_view.shared_metatype.dof_count # -- link related - assert articulation.root_physx_view.max_links == articulation.root_physx_view.shared_metatype.link_count + assert articulation.root_view.max_links == articulation.root_view.shared_metatype.link_count # -- link names (check within articulation ordering is correct) - prim_path_body_names = [path.split("/")[-1] for path in articulation.root_physx_view.link_paths[0]] + prim_path_body_names = [path.split("/")[-1] for path in articulation.root_view.link_paths[0]] assert prim_path_body_names == articulation.body_names # Simulate physics @@ -1335,7 +1335,7 @@ def test_setting_velocity_limit_implicit(sim, num_articulations, device, vel_lim sim.reset() # read the values set into the simulation - physx_vel_limit = articulation.root_physx_view.get_dof_max_velocities().to(device) + physx_vel_limit = articulation.root_view.get_dof_max_velocities().to(device) # check data buffer torch.testing.assert_close(articulation.data.joint_velocity_limits, physx_vel_limit) # check actuator has simulation velocity limit @@ -1384,7 +1384,7 @@ def test_setting_velocity_limit_explicit(sim, num_articulations, device, vel_lim sim.reset() # collect limit init values - physx_vel_limit = articulation.root_physx_view.get_dof_max_velocities().to(device) + physx_vel_limit = articulation.root_view.get_dof_max_velocities().to(device) actuator_vel_limit = articulation.actuators["joint"].velocity_limit actuator_vel_limit_sim = articulation.actuators["joint"].velocity_limit_sim @@ -1448,7 +1448,7 @@ def test_setting_effort_limit_implicit(sim, num_articulations, device, effort_li sim.reset() # obtain the physx effort limits - physx_effort_limit = articulation.root_physx_view.get_dof_max_forces().to(device=device) + physx_effort_limit = articulation.root_view.get_dof_max_forces().to(device=device) # check that the two are equivalent torch.testing.assert_close( @@ -1502,7 +1502,7 @@ def test_setting_effort_limit_explicit(sim, num_articulations, device, effort_li usd_default_effort_limit = 80.0 # collect limit init values - physx_effort_limit = articulation.root_physx_view.get_dof_max_forces().to(device) + physx_effort_limit = articulation.root_view.get_dof_max_forces().to(device) actuator_effort_limit = articulation.actuators["joint"].effort_limit actuator_effort_limit_sim = articulation.actuators["joint"].effort_limit_sim @@ -1660,14 +1660,14 @@ def test_body_root_state(sim, num_articulations, device, with_offset): # create com offsets num_bodies = articulation.num_bodies - com = articulation.root_physx_view.get_coms() + com = articulation.root_view.get_coms() link_offset = [1.0, 0.0, 0.0] # the offset from CenterPivot to Arm frames new_com = torch.tensor(offset, device=device).repeat(num_articulations, 1, 1) com[:, 1, :3] = new_com.squeeze(-2) - articulation.root_physx_view.set_coms(com.cpu(), env_idx.cpu()) + articulation.root_view.set_coms(com.cpu(), env_idx.cpu()) # check they are set - torch.testing.assert_close(articulation.root_physx_view.get_coms(), com.cpu()) + torch.testing.assert_close(articulation.root_view.get_coms(), com.cpu()) for i in range(50): # perform step @@ -1774,13 +1774,13 @@ def test_write_root_state(sim, num_articulations, device, with_offset, state_loc offset = torch.tensor([0.0, 0.0, 0.0]).repeat(num_articulations, 1, 1) # create com offsets - com = articulation.root_physx_view.get_coms() + com = articulation.root_view.get_coms() new_com = offset com[:, 0, :3] = new_com.squeeze(-2) - articulation.root_physx_view.set_coms(com, env_idx) + articulation.root_view.set_coms(com, env_idx) # check they are set - torch.testing.assert_close(articulation.root_physx_view.get_coms(), com) + torch.testing.assert_close(articulation.root_view.get_coms(), com) rand_state = torch.zeros_like(articulation.data.root_state_w) rand_state[..., :7] = articulation.data.default_root_state[..., :7] @@ -2110,14 +2110,14 @@ def test_write_joint_frictions_to_sim(sim, num_articulations, device, add_ground articulation.update(sim.cfg.dt) if get_isaac_sim_version().major >= 5: - friction_props_from_sim = articulation.root_physx_view.get_dof_friction_properties() + friction_props_from_sim = articulation.root_view.get_dof_friction_properties() joint_friction_coeff_sim = friction_props_from_sim[:, :, 0] joint_dynamic_friction_coeff_sim = friction_props_from_sim[:, :, 1] joint_viscous_friction_coeff_sim = friction_props_from_sim[:, :, 2] assert torch.allclose(joint_dynamic_friction_coeff_sim, dynamic_friction.cpu()) assert torch.allclose(joint_viscous_friction_coeff_sim, viscous_friction.cpu()) else: - joint_friction_coeff_sim = articulation.root_physx_view.get_dof_friction_properties() + joint_friction_coeff_sim = articulation.root_view.get_dof_friction_properties() assert torch.allclose(joint_friction_coeff_sim, friction.cpu()) @@ -2153,7 +2153,7 @@ def test_write_joint_frictions_to_sim(sim, num_articulations, device, add_ground sim.step() articulation.update(sim.cfg.dt) - friction_props_from_sim_2 = articulation.root_physx_view.get_dof_friction_properties() + friction_props_from_sim_2 = articulation.root_view.get_dof_friction_properties() joint_friction_coeff_sim_2 = friction_props_from_sim_2[:, :, 0] friction_dynamic_coef_sim_2 = friction_props_from_sim_2[:, :, 1] friction_viscous_coeff_sim_2 = friction_props_from_sim_2[:, :, 2] diff --git a/source/isaaclab_physx/test/assets/test_deformable_object.py b/source/isaaclab_physx/test/assets/test_deformable_object.py index a593d31707d..a17cf58d204 100644 --- a/source/isaaclab_physx/test/assets/test_deformable_object.py +++ b/source/isaaclab_physx/test/assets/test_deformable_object.py @@ -114,7 +114,7 @@ def test_initialization(sim, num_cubes, material_path): # Check correct number of cubes assert cube_object.num_instances == num_cubes - assert cube_object.root_physx_view.count == num_cubes + assert cube_object.root_view.count == num_cubes # Check correct number of materials in the view if material_path: @@ -314,7 +314,7 @@ def test_set_kinematic_targets(sim, num_cubes): sim.reset() - nodal_kinematic_targets = cube_object.root_physx_view.get_sim_kinematic_targets().clone() + nodal_kinematic_targets = cube_object.root_view.get_sim_kinematic_targets().clone() for _ in range(5): cube_object.write_nodal_state_to_sim(cube_object.data.default_nodal_state_w) diff --git a/source/isaaclab_physx/test/assets/test_rigid_object.py b/source/isaaclab_physx/test/assets/test_rigid_object.py index cfe81175033..257c2ce4919 100644 --- a/source/isaaclab_physx/test/assets/test_rigid_object.py +++ b/source/isaaclab_physx/test/assets/test_rigid_object.py @@ -296,7 +296,7 @@ def test_external_force_on_single_body(num_cubes, device): # Sample a force equal to the weight of the object external_wrench_b = torch.zeros(cube_object.num_instances, len(body_ids), 6, device=sim.device) # Every 2nd cube should have a force applied to it - external_wrench_b[0::2, :, 2] = 9.81 * cube_object.root_physx_view.get_masses()[0] + external_wrench_b[0::2, :, 2] = 9.81 * cube_object.root_view.get_masses()[0] # Now we are ready! for i in range(5): @@ -582,7 +582,7 @@ def test_rigid_body_set_material_properties(num_cubes, device): indices = torch.tensor(range(num_cubes), dtype=torch.int) # Add friction to cube - cube_object.root_physx_view.set_material_properties(materials, indices) + cube_object.root_view.set_material_properties(materials, indices) # Simulate physics # perform rendering @@ -591,7 +591,7 @@ def test_rigid_body_set_material_properties(num_cubes, device): cube_object.update(sim.cfg.dt) # Get material properties - materials_to_check = cube_object.root_physx_view.get_material_properties() + materials_to_check = cube_object.root_view.get_material_properties() # Check if material properties are set correctly torch.testing.assert_close(materials_to_check.reshape(num_cubes, 3), materials) @@ -628,7 +628,7 @@ def test_rigid_body_no_friction(num_cubes, device): cube_object_materials = torch.cat([static_friction, dynamic_friction, restitution], dim=-1) indices = torch.tensor(range(num_cubes), dtype=torch.int) - cube_object.root_physx_view.set_material_properties(cube_object_materials, indices) + cube_object.root_view.set_material_properties(cube_object_materials, indices) # Set initial velocity # Initial velocity in X to get the block moving @@ -694,14 +694,14 @@ def test_rigid_body_with_static_friction(num_cubes, device): indices = torch.tensor(range(num_cubes), dtype=torch.int) # Add friction to cube - cube_object.root_physx_view.set_material_properties(cube_object_materials, indices) + cube_object.root_view.set_material_properties(cube_object_materials, indices) # let everything settle for _ in range(100): sim.step() cube_object.update(sim.cfg.dt) cube_object.write_root_velocity_to_sim(torch.zeros((num_cubes, 6), device=sim.device)) - cube_mass = cube_object.root_physx_view.get_masses() + cube_mass = cube_object.root_view.get_masses() gravity_magnitude = abs(sim.cfg.gravity[2]) # 2 cases: force applied is below and above mu # below mu: block should not move as the force applied is <= mu @@ -790,7 +790,7 @@ def test_rigid_body_with_restitution(num_cubes, device): cube_object_materials = torch.cat([static_friction, dynamic_friction, restitution], dim=-1) # Add restitution to cube - cube_object.root_physx_view.set_material_properties(cube_object_materials, indices) + cube_object.root_view.set_material_properties(cube_object_materials, indices) curr_z_velocity = cube_object.data.root_lin_vel_w[:, 2].clone() @@ -834,7 +834,7 @@ def test_rigid_body_set_mass(num_cubes, device): sim.reset() # Get masses before increasing - original_masses = cube_object.root_physx_view.get_masses() + original_masses = cube_object.root_view.get_masses() assert original_masses.shape == (num_cubes, 1) @@ -844,9 +844,9 @@ def test_rigid_body_set_mass(num_cubes, device): indices = torch.tensor(range(num_cubes), dtype=torch.int) # Add friction to cube - cube_object.root_physx_view.set_masses(masses, indices) + cube_object.root_view.set_masses(masses, indices) - torch.testing.assert_close(cube_object.root_physx_view.get_masses(), masses) + torch.testing.assert_close(cube_object.root_view.get_masses(), masses) # Simulate physics # perform rendering @@ -854,7 +854,7 @@ def test_rigid_body_set_mass(num_cubes, device): # update object cube_object.update(sim.cfg.dt) - masses_to_check = cube_object.root_physx_view.get_masses() + masses_to_check = cube_object.root_view.get_masses() # Check if mass is set correctly torch.testing.assert_close(masses, masses_to_check) @@ -925,12 +925,12 @@ def test_body_root_state_properties(num_cubes, device, with_offset): else: offset = torch.tensor([0.0, 0.0, 0.0], device=device).repeat(num_cubes, 1) - com = cube_object.root_physx_view.get_coms() + com = cube_object.root_view.get_coms() com[..., :3] = offset.to("cpu") - cube_object.root_physx_view.set_coms(com, env_idx) + cube_object.root_view.set_coms(com, env_idx) # check ceter of mass has been set - torch.testing.assert_close(cube_object.root_physx_view.get_coms(), com) + torch.testing.assert_close(cube_object.root_view.get_coms(), com) # random z spin velocity spin_twist = torch.zeros(6, device=device) @@ -1030,12 +1030,12 @@ def test_write_root_state(num_cubes, device, with_offset, state_location): else: offset = torch.tensor([0.0, 0.0, 0.0], device=device).repeat(num_cubes, 1) - com = cube_object.root_physx_view.get_coms() + com = cube_object.root_view.get_coms() com[..., :3] = offset.to("cpu") - cube_object.root_physx_view.set_coms(com, env_idx) + cube_object.root_view.set_coms(com, env_idx) # check center of mass has been set - torch.testing.assert_close(cube_object.root_physx_view.get_coms(), com) + torch.testing.assert_close(cube_object.root_view.get_coms(), com) rand_state = torch.zeros_like(cube_object.data.root_state_w) rand_state[..., :7] = cube_object.data.default_root_state[..., :7] @@ -1092,12 +1092,12 @@ def test_write_state_functions_data_consistency(num_cubes, device, with_offset, else: offset = torch.tensor([0.0, 0.0, 0.0], device=device).repeat(num_cubes, 1) - com = cube_object.root_physx_view.get_coms() + com = cube_object.root_view.get_coms() com[..., :3] = offset.to("cpu") - cube_object.root_physx_view.set_coms(com, env_idx) + cube_object.root_view.set_coms(com, env_idx) # check ceter of mass has been set - torch.testing.assert_close(cube_object.root_physx_view.get_coms(), com) + torch.testing.assert_close(cube_object.root_view.get_coms(), com) rand_state = torch.rand_like(cube_object.data.root_state_w) # rand_state[..., :7] = cube_object.data.default_root_state[..., :7] diff --git a/source/isaaclab_physx/test/assets/test_rigid_object_collection.py b/source/isaaclab_physx/test/assets/test_rigid_object_collection.py index 5fbc82cc0dd..9c85847b3a1 100644 --- a/source/isaaclab_physx/test/assets/test_rigid_object_collection.py +++ b/source/isaaclab_physx/test/assets/test_rigid_object_collection.py @@ -490,12 +490,12 @@ def test_object_state_properties(sim, num_envs, num_cubes, device, with_offset, else torch.tensor([0.0, 0.0, 0.0], device=device).repeat(num_envs, num_cubes, 1) ) - com = cube_object.reshape_view_to_data(cube_object.root_physx_view.get_coms()) + com = cube_object.reshape_view_to_data(cube_object.root_view.get_coms()) com[..., :3] = offset.to("cpu") - cube_object.root_physx_view.set_coms(cube_object.reshape_data_to_view(com.clone()), view_ids) + cube_object.root_view.set_coms(cube_object.reshape_data_to_view(com.clone()), view_ids) # check center of mass has been set - torch.testing.assert_close(cube_object.reshape_view_to_data(cube_object.root_physx_view.get_coms()), com) + torch.testing.assert_close(cube_object.reshape_view_to_data(cube_object.root_view.get_coms()), com) # random z spin velocity spin_twist = torch.zeros(6, device=device) @@ -584,12 +584,12 @@ def test_write_object_state(sim, num_envs, num_cubes, device, with_offset, state else torch.tensor([0.0, 0.0, 0.0], device=device).repeat(num_envs, num_cubes, 1) ) - com = cube_object.reshape_view_to_data(cube_object.root_physx_view.get_coms()) + com = cube_object.reshape_view_to_data(cube_object.root_view.get_coms()) com[..., :3] = offset.to("cpu") - cube_object.root_physx_view.set_coms(cube_object.reshape_data_to_view(com.clone()), view_ids) + cube_object.root_view.set_coms(cube_object.reshape_data_to_view(com.clone()), view_ids) # check center of mass has been set - torch.testing.assert_close(cube_object.reshape_view_to_data(cube_object.root_physx_view.get_coms()), com) + torch.testing.assert_close(cube_object.reshape_view_to_data(cube_object.root_view.get_coms()), com) rand_state = torch.zeros_like(cube_object.data.object_link_state_w) rand_state[..., :7] = cube_object.data.default_object_state[..., :7] @@ -668,7 +668,7 @@ def test_set_material_properties(sim, num_envs, num_cubes, device): # Add friction to cube indices = torch.tensor(range(num_cubes * num_envs), dtype=torch.int) - object_collection.root_physx_view.set_material_properties( + object_collection.root_view.set_material_properties( object_collection.reshape_data_to_view(materials), indices ) @@ -677,7 +677,7 @@ def test_set_material_properties(sim, num_envs, num_cubes, device): object_collection.update(sim.cfg.dt) # Get material properties - materials_to_check = object_collection.root_physx_view.get_material_properties() + materials_to_check = object_collection.root_view.get_material_properties() # Check if material properties are set correctly torch.testing.assert_close(object_collection.reshape_view_to_data(materials_to_check), materials) @@ -743,12 +743,12 @@ def test_write_object_state_functions_data_consistency( else torch.tensor([0.0, 0.0, 0.0], device=device).repeat(num_envs, num_cubes, 1) ) - com = cube_object.reshape_view_to_data(cube_object.root_physx_view.get_coms()) + com = cube_object.reshape_view_to_data(cube_object.root_view.get_coms()) com[..., :3] = offset.to("cpu") - cube_object.root_physx_view.set_coms(cube_object.reshape_data_to_view(com.clone()), view_ids) + cube_object.root_view.set_coms(cube_object.reshape_data_to_view(com.clone()), view_ids) # check center of mass has been set - torch.testing.assert_close(cube_object.reshape_view_to_data(cube_object.root_physx_view.get_coms()), com) + torch.testing.assert_close(cube_object.reshape_view_to_data(cube_object.root_view.get_coms()), com) rand_state = torch.rand_like(cube_object.data.object_link_state_w) rand_state[..., :3] += cube_object.data.object_link_pos_w From 6711a05c19302af49d573e9df7577c3500abfc06 Mon Sep 17 00:00:00 2001 From: Antoine Richard Date: Wed, 28 Jan 2026 09:54:54 +0100 Subject: [PATCH 07/38] Now uses root_view instead of root_physx_view --- scripts/demos/bin_packing.py | 8 ++--- scripts/demos/haply_teleoperation.py | 2 +- scripts/demos/quadcopter.py | 2 +- .../tutorials/05_controllers/run_diff_ik.py | 2 +- scripts/tutorials/05_controllers/run_osc.py | 6 ++-- .../mdp/actions/pink_task_space_actions.py | 4 +-- .../mdp/actions/rmpflow_task_space_actions.py | 2 +- .../envs/mdp/actions/task_space_actions.py | 8 ++--- source/isaaclab/isaaclab/envs/mdp/events.py | 30 +++++++++---------- .../test/controllers/test_differential_ik.py | 2 +- .../controllers/test_operational_space.py | 6 ++-- .../assets/multirotor/multirotor.py | 4 +-- .../direct/automate/assembly_env.py | 12 ++++---- .../direct/automate/disassembly_env.py | 12 ++++---- .../direct/factory/factory_env.py | 4 +-- .../direct/factory/factory_utils.py | 8 ++--- .../isaaclab_tasks/direct/forge/forge_env.py | 2 +- .../inhand_manipulation_env.py | 4 +-- .../direct/quadcopter/quadcopter_env.py | 2 +- .../shadow_hand/shadow_hand_vision_env.py | 2 +- .../shadow_hand_over/shadow_hand_over_env.py | 2 +- .../manipulation/deploy/mdp/events.py | 2 +- 22 files changed, 63 insertions(+), 63 deletions(-) diff --git a/scripts/demos/bin_packing.py b/scripts/demos/bin_packing.py index a43cbf199b2..9f38fe6c1ef 100644 --- a/scripts/demos/bin_packing.py +++ b/scripts/demos/bin_packing.py @@ -188,8 +188,8 @@ def reset_object_collections( view_states[view_ids, :7] = torch.concat((positions, orientations), dim=-1) view_states[view_ids, 7:] = new_velocities - rigid_object_collection.root_physx_view.set_transforms(view_states[:, :7], indices=view_ids) - rigid_object_collection.root_physx_view.set_velocities(view_states[:, 7:], indices=view_ids) + rigid_object_collection.root_view.set_transforms(view_states[:, :7], indices=view_ids) + rigid_object_collection.root_view.set_velocities(view_states[:, 7:], indices=view_ids) def build_grocery_defaults( @@ -301,8 +301,8 @@ def run_simulator(sim: SimulationContext, scene: InteractiveScene) -> None: reset_object_collections(scene, "groceries", spawn_w, view_indices[groceries_mask.view(-1)], noise=True) # Vary the mass and gravity settings so cached objects stay parked. random_masses = torch.rand(groceries.num_instances * num_objects, device=device) * 0.2 + 0.2 - groceries.root_physx_view.set_masses(random_masses.cpu(), view_indices.cpu()) - groceries.root_physx_view.set_disable_gravities((~groceries_mask).cpu(), indices=view_indices.cpu()) + groceries.root_view.set_masses(random_masses.cpu(), view_indices.cpu()) + groceries.root_view.set_disable_gravities((~groceries_mask).cpu(), indices=view_indices.cpu()) scene.reset() # Write data to sim diff --git a/scripts/demos/haply_teleoperation.py b/scripts/demos/haply_teleoperation.py index b6d02900baf..7df17ac8649 100644 --- a/scripts/demos/haply_teleoperation.py +++ b/scripts/demos/haply_teleoperation.py @@ -303,7 +303,7 @@ def run_simulator( ee_quat_w = robot.data.body_quat_w[:, ee_body_idx] # get jacobian to IK controller - jacobian = robot.root_physx_view.get_jacobians()[:, ee_body_idx, :, arm_joint_indices] + jacobian = robot.root_view.get_jacobians()[:, ee_body_idx, :, arm_joint_indices] ik_controller.set_command(command=target_pos_tensor, ee_quat=ee_quat_w) joint_pos_des = ik_controller.compute(ee_pos_w, ee_quat_w, jacobian, current_joint_pos) diff --git a/scripts/demos/quadcopter.py b/scripts/demos/quadcopter.py index bf42a04f850..ffd030bfb11 100644 --- a/scripts/demos/quadcopter.py +++ b/scripts/demos/quadcopter.py @@ -72,7 +72,7 @@ def main(): # Fetch relevant parameters to make the quadcopter hover in place prop_body_ids = robot.find_bodies("m.*_prop")[0] - robot_mass = robot.root_physx_view.get_masses().sum() + robot_mass = robot.root_view.get_masses().sum() gravity = torch.tensor(sim.cfg.gravity, device=sim.device).norm() # Now we are ready! diff --git a/scripts/tutorials/05_controllers/run_diff_ik.py b/scripts/tutorials/05_controllers/run_diff_ik.py index 22d17963235..2175ce30c6c 100644 --- a/scripts/tutorials/05_controllers/run_diff_ik.py +++ b/scripts/tutorials/05_controllers/run_diff_ik.py @@ -159,7 +159,7 @@ def run_simulator(sim: sim_utils.SimulationContext, scene: InteractiveScene): current_goal_idx = (current_goal_idx + 1) % len(ee_goals) else: # obtain quantities from simulation - jacobian = robot.root_physx_view.get_jacobians()[:, ee_jacobi_idx, :, robot_entity_cfg.joint_ids] + jacobian = robot.root_view.get_jacobians()[:, ee_jacobi_idx, :, robot_entity_cfg.joint_ids] ee_pose_w = robot.data.body_pose_w[:, robot_entity_cfg.body_ids[0]] root_pose_w = robot.data.root_pose_w joint_pos = robot.data.joint_pos[:, robot_entity_cfg.joint_ids] diff --git a/scripts/tutorials/05_controllers/run_osc.py b/scripts/tutorials/05_controllers/run_osc.py index 98b2532a0a2..203f7151283 100644 --- a/scripts/tutorials/05_controllers/run_osc.py +++ b/scripts/tutorials/05_controllers/run_osc.py @@ -313,9 +313,9 @@ def update_states( """ # obtain dynamics related quantities from simulation ee_jacobi_idx = ee_frame_idx - 1 - jacobian_w = robot.root_physx_view.get_jacobians()[:, ee_jacobi_idx, :, arm_joint_ids] - mass_matrix = robot.root_physx_view.get_generalized_mass_matrices()[:, arm_joint_ids, :][:, :, arm_joint_ids] - gravity = robot.root_physx_view.get_gravity_compensation_forces()[:, arm_joint_ids] + jacobian_w = robot.root_view.get_jacobians()[:, ee_jacobi_idx, :, arm_joint_ids] + mass_matrix = robot.root_view.get_generalized_mass_matrices()[:, arm_joint_ids, :][:, :, arm_joint_ids] + gravity = robot.root_view.get_gravity_compensation_forces()[:, arm_joint_ids] # Convert the Jacobian from world to root frame jacobian_b = jacobian_w.clone() root_rot_matrix = matrix_from_quat(quat_inv(robot.data.root_quat_w)) diff --git a/source/isaaclab/isaaclab/envs/mdp/actions/pink_task_space_actions.py b/source/isaaclab/isaaclab/envs/mdp/actions/pink_task_space_actions.py index 8ae90af3a4a..e7e527ab790 100644 --- a/source/isaaclab/isaaclab/envs/mdp/actions/pink_task_space_actions.py +++ b/source/isaaclab/isaaclab/envs/mdp/actions/pink_task_space_actions.py @@ -326,11 +326,11 @@ def _apply_gravity_compensation(self) -> None: # Get gravity compensation forces using cached tensor if self._asset.is_fixed_base: gravity = torch.zeros_like( - self._asset.root_physx_view.get_gravity_compensation_forces()[:, self._controlled_joint_ids_tensor] + self._asset.root_view.get_gravity_compensation_forces()[:, self._controlled_joint_ids_tensor] ) else: # If floating base, then need to skip the first 6 joints (base) - gravity = self._asset.root_physx_view.get_gravity_compensation_forces()[ + gravity = self._asset.root_view.get_gravity_compensation_forces()[ :, self._controlled_joint_ids_tensor + self._physx_floating_joint_indices_offset ] diff --git a/source/isaaclab/isaaclab/envs/mdp/actions/rmpflow_task_space_actions.py b/source/isaaclab/isaaclab/envs/mdp/actions/rmpflow_task_space_actions.py index 83e7bd3e365..128c4b70cb6 100644 --- a/source/isaaclab/isaaclab/envs/mdp/actions/rmpflow_task_space_actions.py +++ b/source/isaaclab/isaaclab/envs/mdp/actions/rmpflow_task_space_actions.py @@ -128,7 +128,7 @@ def processed_actions(self) -> torch.Tensor: @property def jacobian_w(self) -> torch.Tensor: - return self._asset.root_physx_view.get_jacobians()[:, self._jacobi_body_idx, :, self._jacobi_joint_ids] + return self._asset.root_view.get_jacobians()[:, self._jacobi_body_idx, :, self._jacobi_joint_ids] @property def jacobian_b(self) -> torch.Tensor: diff --git a/source/isaaclab/isaaclab/envs/mdp/actions/task_space_actions.py b/source/isaaclab/isaaclab/envs/mdp/actions/task_space_actions.py index 47f9cec2349..beaf5e1e389 100644 --- a/source/isaaclab/isaaclab/envs/mdp/actions/task_space_actions.py +++ b/source/isaaclab/isaaclab/envs/mdp/actions/task_space_actions.py @@ -142,7 +142,7 @@ def processed_actions(self) -> torch.Tensor: @property def jacobian_w(self) -> torch.Tensor: - return self._asset.root_physx_view.get_jacobians()[:, self._jacobi_body_idx, :, self._jacobi_joint_ids] + return self._asset.root_view.get_jacobians()[:, self._jacobi_body_idx, :, self._jacobi_joint_ids] @property def jacobian_b(self) -> torch.Tensor: @@ -434,7 +434,7 @@ def processed_actions(self) -> torch.Tensor: @property def jacobian_w(self) -> torch.Tensor: - return self._asset.root_physx_view.get_jacobians()[:, self._jacobi_ee_body_idx, :, self._jacobi_joint_idx] + return self._asset.root_view.get_jacobians()[:, self._jacobi_ee_body_idx, :, self._jacobi_joint_idx] @property def jacobian_b(self) -> torch.Tensor: @@ -645,10 +645,10 @@ def _resolve_nullspace_joint_pos_targets(self): def _compute_dynamic_quantities(self): """Computes the dynamic quantities for operational space control.""" - self._mass_matrix[:] = self._asset.root_physx_view.get_generalized_mass_matrices()[:, self._joint_ids, :][ + self._mass_matrix[:] = self._asset.root_view.get_generalized_mass_matrices()[:, self._joint_ids, :][ :, :, self._joint_ids ] - self._gravity[:] = self._asset.root_physx_view.get_gravity_compensation_forces()[:, self._joint_ids] + self._gravity[:] = self._asset.root_view.get_gravity_compensation_forces()[:, self._joint_ids] def _compute_ee_jacobian(self): """Computes the geometric Jacobian of the ee body frame in root frame. diff --git a/source/isaaclab/isaaclab/envs/mdp/events.py b/source/isaaclab/isaaclab/envs/mdp/events.py index 35e1f425f70..6c4b0df5d0a 100644 --- a/source/isaaclab/isaaclab/envs/mdp/events.py +++ b/source/isaaclab/isaaclab/envs/mdp/events.py @@ -208,12 +208,12 @@ def __init__(self, cfg: EventTermCfg, env: ManagerBasedEnv): # per body. We use the physics simulation view to obtain the number of shapes per body. if isinstance(self.asset, Articulation) and self.asset_cfg.body_ids != slice(None): self.num_shapes_per_body = [] - for link_path in self.asset.root_physx_view.link_paths[0]: + for link_path in self.asset.root_view.link_paths[0]: link_physx_view = self.asset._physics_sim_view.create_rigid_body_view(link_path) # type: ignore self.num_shapes_per_body.append(link_physx_view.max_shapes) # ensure the parsing is correct num_shapes = sum(self.num_shapes_per_body) - expected_shapes = self.asset.root_physx_view.max_shapes + expected_shapes = self.asset.root_view.max_shapes if num_shapes != expected_shapes: raise ValueError( "Randomization term 'randomize_rigid_body_material' failed to parse the number of shapes per body." @@ -259,12 +259,12 @@ def __call__( env_ids = env_ids.cpu() # randomly assign material IDs to the geometries - total_num_shapes = self.asset.root_physx_view.max_shapes + total_num_shapes = self.asset.root_view.max_shapes bucket_ids = torch.randint(0, num_buckets, (len(env_ids), total_num_shapes), device="cpu") material_samples = self.material_buckets[bucket_ids] # retrieve material buffer from the physics simulation - materials = self.asset.root_physx_view.get_material_properties() + materials = self.asset.root_view.get_material_properties() # update material buffer with new samples if self.num_shapes_per_body is not None: @@ -281,7 +281,7 @@ def __call__( materials[env_ids] = material_samples[:] # apply to simulation - self.asset.root_physx_view.set_material_properties(materials, env_ids) + self.asset.root_view.set_material_properties(materials, env_ids) class randomize_rigid_body_mass(ManagerTermBase): @@ -361,7 +361,7 @@ def __call__( body_ids = torch.tensor(self.asset_cfg.body_ids, dtype=torch.int, device="cpu") # get the current masses of the bodies (num_assets, num_bodies) - masses = self.asset.root_physx_view.get_masses() + masses = self.asset.root_view.get_masses() # apply randomization on default values # this is to make sure when calling the function multiple times, the randomization is applied on the @@ -377,7 +377,7 @@ def __call__( masses = torch.clamp(masses, min=min_mass) # ensure masses are positive # set the mass into the physics simulation - self.asset.root_physx_view.set_masses(masses, env_ids) + self.asset.root_view.set_masses(masses, env_ids) # recompute inertia tensors if needed if recompute_inertia: @@ -385,7 +385,7 @@ def __call__( ratios = masses[env_ids[:, None], body_ids] / self.asset.data.default_mass[env_ids[:, None], body_ids] # scale the inertia tensors by the the ratios # since mass randomization is done on default values, we can use the default inertia tensors - inertias = self.asset.root_physx_view.get_inertias() + inertias = self.asset.root_view.get_inertias() if isinstance(self.asset, Articulation): # inertia has shape: (num_envs, num_bodies, 9) for articulation inertias[env_ids[:, None], body_ids] = ( @@ -395,7 +395,7 @@ def __call__( # inertia has shape: (num_envs, 9) for rigid object inertias[env_ids] = self.asset.data.default_inertia[env_ids] * ratios # set the inertia tensors into the physics simulation - self.asset.root_physx_view.set_inertias(inertias, env_ids) + self.asset.root_view.set_inertias(inertias, env_ids) def randomize_rigid_body_com( @@ -430,13 +430,13 @@ def randomize_rigid_body_com( rand_samples = math_utils.sample_uniform(ranges[:, 0], ranges[:, 1], (len(env_ids), 3), device="cpu").unsqueeze(1) # get the current com of the bodies (num_assets, num_bodies) - coms = asset.root_physx_view.get_coms().clone() + coms = asset.root_view.get_coms().clone() # Randomize the com in range coms[env_ids[:, None], body_ids, :3] += rand_samples # Set the new coms - asset.root_physx_view.set_coms(coms, env_ids) + asset.root_view.set_coms(coms, env_ids) def randomize_rigid_body_collider_offsets( @@ -472,7 +472,7 @@ def randomize_rigid_body_collider_offsets( # sample collider properties from the given ranges and set into the physics simulation # -- rest offsets if rest_offset_distribution_params is not None: - rest_offset = asset.root_physx_view.get_rest_offsets().clone() + rest_offset = asset.root_view.get_rest_offsets().clone() rest_offset = _randomize_prop_by_op( rest_offset, rest_offset_distribution_params, @@ -481,10 +481,10 @@ def randomize_rigid_body_collider_offsets( operation="abs", distribution=distribution, ) - asset.root_physx_view.set_rest_offsets(rest_offset, env_ids.cpu()) + asset.root_view.set_rest_offsets(rest_offset, env_ids.cpu()) # -- contact offsets if contact_offset_distribution_params is not None: - contact_offset = asset.root_physx_view.get_contact_offsets().clone() + contact_offset = asset.root_view.get_contact_offsets().clone() contact_offset = _randomize_prop_by_op( contact_offset, contact_offset_distribution_params, @@ -493,7 +493,7 @@ def randomize_rigid_body_collider_offsets( operation="abs", distribution=distribution, ) - asset.root_physx_view.set_contact_offsets(contact_offset, env_ids.cpu()) + asset.root_view.set_contact_offsets(contact_offset, env_ids.cpu()) def randomize_physics_scene_gravity( diff --git a/source/isaaclab/test/controllers/test_differential_ik.py b/source/isaaclab/test/controllers/test_differential_ik.py index 65ce828129f..43b64174536 100644 --- a/source/isaaclab/test/controllers/test_differential_ik.py +++ b/source/isaaclab/test/controllers/test_differential_ik.py @@ -204,7 +204,7 @@ def _run_ik_controller( # at reset, the jacobians are not updated to the latest state # so we MUST skip the first step # obtain quantities from simulation - jacobian = robot.root_physx_view.get_jacobians()[:, ee_jacobi_idx, :, arm_joint_ids] + jacobian = robot.root_view.get_jacobians()[:, ee_jacobi_idx, :, arm_joint_ids] ee_pose_w = robot.data.body_pose_w[:, ee_frame_idx] root_pose_w = robot.data.root_pose_w base_rot = root_pose_w[:, 3:7] diff --git a/source/isaaclab/test/controllers/test_operational_space.py b/source/isaaclab/test/controllers/test_operational_space.py index eeec14d877d..880680a1976 100644 --- a/source/isaaclab/test/controllers/test_operational_space.py +++ b/source/isaaclab/test/controllers/test_operational_space.py @@ -1434,9 +1434,9 @@ def _update_states( """ # obtain dynamics related quantities from simulation ee_jacobi_idx = ee_frame_idx - 1 - jacobian_w = robot.root_physx_view.get_jacobians()[:, ee_jacobi_idx, :, arm_joint_ids] - mass_matrix = robot.root_physx_view.get_generalized_mass_matrices()[:, arm_joint_ids, :][:, :, arm_joint_ids] - gravity = robot.root_physx_view.get_gravity_compensation_forces()[:, arm_joint_ids] + jacobian_w = robot.root_view.get_jacobians()[:, ee_jacobi_idx, :, arm_joint_ids] + mass_matrix = robot.root_view.get_generalized_mass_matrices()[:, arm_joint_ids, :][:, :, arm_joint_ids] + gravity = robot.root_view.get_gravity_compensation_forces()[:, arm_joint_ids] # Convert the Jacobian from world to root frame jacobian_b = jacobian_w.clone() root_rot_matrix = matrix_from_quat(quat_inv(robot.data.root_quat_w)) diff --git a/source/isaaclab_contrib/isaaclab_contrib/assets/multirotor/multirotor.py b/source/isaaclab_contrib/isaaclab_contrib/assets/multirotor/multirotor.py index 6f8800c3221..0949c77818a 100644 --- a/source/isaaclab_contrib/isaaclab_contrib/assets/multirotor/multirotor.py +++ b/source/isaaclab_contrib/isaaclab_contrib/assets/multirotor/multirotor.py @@ -286,7 +286,7 @@ def _initialize_impl(self): super()._initialize_impl() # Replace data container with MultirotorData - self._data = MultirotorData(self.root_physx_view, self.device) + self._data = MultirotorData(self.root_view, self.device) # Create thruster buffers with correct size (SINGLE PHASE) self._create_thruster_buffers() @@ -508,7 +508,7 @@ def _apply_combined_wrench(self): # Combine individual thrusts into a wrench vector self._combine_thrusts() - self.root_physx_view.apply_forces_and_torques_at_position( + self.root_view.apply_forces_and_torques_at_position( force_data=self._internal_force_target_sim.view(-1, 3), # Shape: (num_envs * num_bodies, 3) torque_data=self._internal_torque_target_sim.view(-1, 3), # Shape: (num_envs * num_bodies, 3) position_data=None, # Apply at center of mass diff --git a/source/isaaclab_tasks/isaaclab_tasks/direct/automate/assembly_env.py b/source/isaaclab_tasks/isaaclab_tasks/direct/automate/assembly_env.py index 35e99912038..eee870d1dd7 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/direct/automate/assembly_env.py +++ b/source/isaaclab_tasks/isaaclab_tasks/direct/automate/assembly_env.py @@ -82,11 +82,11 @@ def _init_eval_logging(self): def _set_body_inertias(self): """Note: this is to account for the asset_options.armature parameter in IGE.""" - inertias = self._robot.root_physx_view.get_inertias() + inertias = self._robot.root_view.get_inertias() offset = torch.zeros_like(inertias) offset[:, :, [0, 4, 8]] += 0.01 new_inertias = inertias + offset - self._robot.root_physx_view.set_inertias(new_inertias, torch.arange(self.num_envs)) + self._robot.root_view.set_inertias(new_inertias, torch.arange(self.num_envs)) def _set_default_dynamics_parameters(self): """Set parameters defining dynamic interactions.""" @@ -108,11 +108,11 @@ def _set_default_dynamics_parameters(self): def _set_friction(self, asset, value): """Update material properties for a given asset.""" - materials = asset.root_physx_view.get_material_properties() + materials = asset.root_view.get_material_properties() materials[..., 0] = value # Static friction. materials[..., 1] = value # Dynamic friction. env_ids = torch.arange(self.scene.num_envs, device="cpu") - asset.root_physx_view.set_material_properties(materials, env_ids) + asset.root_view.set_material_properties(materials, env_ids) def _init_tensors(self): """Initialize tensors once.""" @@ -284,12 +284,12 @@ def _compute_intermediate_values(self, dt): self.fingertip_midpoint_linvel = self._robot.data.body_lin_vel_w[:, self.fingertip_body_idx] self.fingertip_midpoint_angvel = self._robot.data.body_ang_vel_w[:, self.fingertip_body_idx] - jacobians = self._robot.root_physx_view.get_jacobians() + jacobians = self._robot.root_view.get_jacobians() self.left_finger_jacobian = jacobians[:, self.left_finger_body_idx - 1, 0:6, 0:7] self.right_finger_jacobian = jacobians[:, self.right_finger_body_idx - 1, 0:6, 0:7] self.fingertip_midpoint_jacobian = (self.left_finger_jacobian + self.right_finger_jacobian) * 0.5 - self.arm_mass_matrix = self._robot.root_physx_view.get_generalized_mass_matrices()[:, 0:7, 0:7] + self.arm_mass_matrix = self._robot.root_view.get_generalized_mass_matrices()[:, 0:7, 0:7] self.joint_pos = self._robot.data.joint_pos.clone() self.joint_vel = self._robot.data.joint_vel.clone() diff --git a/source/isaaclab_tasks/isaaclab_tasks/direct/automate/disassembly_env.py b/source/isaaclab_tasks/isaaclab_tasks/direct/automate/disassembly_env.py index a4b454829ea..7c27ca0ba8e 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/direct/automate/disassembly_env.py +++ b/source/isaaclab_tasks/isaaclab_tasks/direct/automate/disassembly_env.py @@ -50,11 +50,11 @@ def __init__(self, cfg: DisassemblyEnvCfg, render_mode: str | None = None, **kwa def _set_body_inertias(self): """Note: this is to account for the asset_options.armature parameter in IGE.""" - inertias = self._robot.root_physx_view.get_inertias() + inertias = self._robot.root_view.get_inertias() offset = torch.zeros_like(inertias) offset[:, :, [0, 4, 8]] += 0.01 new_inertias = inertias + offset - self._robot.root_physx_view.set_inertias(new_inertias, torch.arange(self.num_envs)) + self._robot.root_view.set_inertias(new_inertias, torch.arange(self.num_envs)) def _set_default_dynamics_parameters(self): """Set parameters defining dynamic interactions.""" @@ -76,11 +76,11 @@ def _set_default_dynamics_parameters(self): def _set_friction(self, asset, value): """Update material properties for a given asset.""" - materials = asset.root_physx_view.get_material_properties() + materials = asset.root_view.get_material_properties() materials[..., 0] = value # Static friction. materials[..., 1] = value # Dynamic friction. env_ids = torch.arange(self.scene.num_envs, device="cpu") - asset.root_physx_view.set_material_properties(materials, env_ids) + asset.root_view.set_material_properties(materials, env_ids) def _init_tensors(self): """Initialize tensors once.""" @@ -205,12 +205,12 @@ def _compute_intermediate_values(self, dt): self.fingertip_midpoint_linvel = self._robot.data.body_lin_vel_w[:, self.fingertip_body_idx] self.fingertip_midpoint_angvel = self._robot.data.body_ang_vel_w[:, self.fingertip_body_idx] - jacobians = self._robot.root_physx_view.get_jacobians() + jacobians = self._robot.root_view.get_jacobians() self.left_finger_jacobian = jacobians[:, self.left_finger_body_idx - 1, 0:6, 0:7] self.right_finger_jacobian = jacobians[:, self.right_finger_body_idx - 1, 0:6, 0:7] self.fingertip_midpoint_jacobian = (self.left_finger_jacobian + self.right_finger_jacobian) * 0.5 - self.arm_mass_matrix = self._robot.root_physx_view.get_generalized_mass_matrices()[:, 0:7, 0:7] + self.arm_mass_matrix = self._robot.root_view.get_generalized_mass_matrices()[:, 0:7, 0:7] self.joint_pos = self._robot.data.joint_pos.clone() self.joint_vel = self._robot.data.joint_vel.clone() diff --git a/source/isaaclab_tasks/isaaclab_tasks/direct/factory/factory_env.py b/source/isaaclab_tasks/isaaclab_tasks/direct/factory/factory_env.py index a4e9c6d9ece..476c5de4d8e 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/direct/factory/factory_env.py +++ b/source/isaaclab_tasks/isaaclab_tasks/direct/factory/factory_env.py @@ -129,12 +129,12 @@ def _compute_intermediate_values(self, dt): self.fingertip_midpoint_linvel = self._robot.data.body_lin_vel_w[:, self.fingertip_body_idx] self.fingertip_midpoint_angvel = self._robot.data.body_ang_vel_w[:, self.fingertip_body_idx] - jacobians = self._robot.root_physx_view.get_jacobians() + jacobians = self._robot.root_view.get_jacobians() self.left_finger_jacobian = jacobians[:, self.left_finger_body_idx - 1, 0:6, 0:7] self.right_finger_jacobian = jacobians[:, self.right_finger_body_idx - 1, 0:6, 0:7] self.fingertip_midpoint_jacobian = (self.left_finger_jacobian + self.right_finger_jacobian) * 0.5 - self.arm_mass_matrix = self._robot.root_physx_view.get_generalized_mass_matrices()[:, 0:7, 0:7] + self.arm_mass_matrix = self._robot.root_view.get_generalized_mass_matrices()[:, 0:7, 0:7] self.joint_pos = self._robot.data.joint_pos.clone() self.joint_vel = self._robot.data.joint_vel.clone() diff --git a/source/isaaclab_tasks/isaaclab_tasks/direct/factory/factory_utils.py b/source/isaaclab_tasks/isaaclab_tasks/direct/factory/factory_utils.py index 962b3872bf0..6097b304e82 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/direct/factory/factory_utils.py +++ b/source/isaaclab_tasks/isaaclab_tasks/direct/factory/factory_utils.py @@ -30,20 +30,20 @@ def wrap_yaw(angle): def set_friction(asset, value, num_envs): """Update material properties for a given asset.""" - materials = asset.root_physx_view.get_material_properties() + materials = asset.root_view.get_material_properties() materials[..., 0] = value # Static friction. materials[..., 1] = value # Dynamic friction. env_ids = torch.arange(num_envs, device="cpu") - asset.root_physx_view.set_material_properties(materials, env_ids) + asset.root_view.set_material_properties(materials, env_ids) def set_body_inertias(robot, num_envs): """Note: this is to account for the asset_options.armature parameter in IGE.""" - inertias = robot.root_physx_view.get_inertias() + inertias = robot.root_view.get_inertias() offset = torch.zeros_like(inertias) offset[:, :, [0, 4, 8]] += 0.01 new_inertias = inertias + offset - robot.root_physx_view.set_inertias(new_inertias, torch.arange(num_envs)) + robot.root_view.set_inertias(new_inertias, torch.arange(num_envs)) def get_held_base_pos_local(task_name, fixed_asset_cfg, num_envs, device): diff --git a/source/isaaclab_tasks/isaaclab_tasks/direct/forge/forge_env.py b/source/isaaclab_tasks/isaaclab_tasks/direct/forge/forge_env.py index 75484cbd8f1..7617e30988c 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/direct/forge/forge_env.py +++ b/source/isaaclab_tasks/isaaclab_tasks/direct/forge/forge_env.py @@ -92,7 +92,7 @@ def _compute_intermediate_values(self, dt): self.prev_fingertip_quat = self.noisy_fingertip_quat.clone() # Update and smooth force values. - self.force_sensor_world = self._robot.root_physx_view.get_link_incoming_joint_force()[ + self.force_sensor_world = self._robot.root_view.get_link_incoming_joint_force()[ :, self.force_sensor_body_idx ] diff --git a/source/isaaclab_tasks/isaaclab_tasks/direct/inhand_manipulation/inhand_manipulation_env.py b/source/isaaclab_tasks/isaaclab_tasks/direct/inhand_manipulation/inhand_manipulation_env.py index c8d4fbf9e2d..041f6bb7775 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/direct/inhand_manipulation/inhand_manipulation_env.py +++ b/source/isaaclab_tasks/isaaclab_tasks/direct/inhand_manipulation/inhand_manipulation_env.py @@ -51,7 +51,7 @@ def __init__(self, cfg: AllegroHandEnvCfg | ShadowHandEnvCfg, render_mode: str | self.num_fingertips = len(self.finger_bodies) # joint limits - joint_pos_limits = self.hand.root_physx_view.get_dof_limits().to(self.device) + joint_pos_limits = self.hand.root_view.get_dof_limits().to(self.device) self.hand_dof_lower_limits = joint_pos_limits[..., 0] self.hand_dof_upper_limits = joint_pos_limits[..., 1] @@ -119,7 +119,7 @@ def _apply_action(self) -> None: def _get_observations(self) -> dict: if self.cfg.asymmetric_obs: - self.fingertip_force_sensors = self.hand.root_physx_view.get_link_incoming_joint_force()[ + self.fingertip_force_sensors = self.hand.root_view.get_link_incoming_joint_force()[ :, self.finger_bodies ] diff --git a/source/isaaclab_tasks/isaaclab_tasks/direct/quadcopter/quadcopter_env.py b/source/isaaclab_tasks/isaaclab_tasks/direct/quadcopter/quadcopter_env.py index 02857c63d34..284ef2845bf 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/direct/quadcopter/quadcopter_env.py +++ b/source/isaaclab_tasks/isaaclab_tasks/direct/quadcopter/quadcopter_env.py @@ -124,7 +124,7 @@ def __init__(self, cfg: QuadcopterEnvCfg, render_mode: str | None = None, **kwar } # Get specific body indices self._body_id = self._robot.find_bodies("body")[0] - self._robot_mass = self._robot.root_physx_view.get_masses()[0].sum() + self._robot_mass = self._robot.root_view.get_masses()[0].sum() self._gravity_magnitude = torch.tensor(self.sim.cfg.gravity, device=self.device).norm() self._robot_weight = (self._robot_mass * self._gravity_magnitude).item() diff --git a/source/isaaclab_tasks/isaaclab_tasks/direct/shadow_hand/shadow_hand_vision_env.py b/source/isaaclab_tasks/isaaclab_tasks/direct/shadow_hand/shadow_hand_vision_env.py index b5c781c1a9f..b8e3d4be5aa 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/direct/shadow_hand/shadow_hand_vision_env.py +++ b/source/isaaclab_tasks/isaaclab_tasks/direct/shadow_hand/shadow_hand_vision_env.py @@ -149,7 +149,7 @@ def _get_observations(self) -> dict: image_obs = self._compute_image_observations() obs = torch.cat((state_obs, image_obs), dim=-1) # asymmetric critic states - self.fingertip_force_sensors = self.hand.root_physx_view.get_link_incoming_joint_force()[:, self.finger_bodies] + self.fingertip_force_sensors = self.hand.root_view.get_link_incoming_joint_force()[:, self.finger_bodies] state = self._compute_states() observations = {"policy": obs, "critic": state} diff --git a/source/isaaclab_tasks/isaaclab_tasks/direct/shadow_hand_over/shadow_hand_over_env.py b/source/isaaclab_tasks/isaaclab_tasks/direct/shadow_hand_over/shadow_hand_over_env.py index 09bbff6e97c..2c470c7724b 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/direct/shadow_hand_over/shadow_hand_over_env.py +++ b/source/isaaclab_tasks/isaaclab_tasks/direct/shadow_hand_over/shadow_hand_over_env.py @@ -63,7 +63,7 @@ def __init__(self, cfg: ShadowHandOverEnvCfg, render_mode: str | None = None, ** self.num_fingertips = len(self.finger_bodies) # joint limits - joint_pos_limits = self.right_hand.root_physx_view.get_dof_limits().to(self.device) + joint_pos_limits = self.right_hand.root_view.get_dof_limits().to(self.device) self.hand_dof_lower_limits = joint_pos_limits[..., 0] self.hand_dof_upper_limits = joint_pos_limits[..., 1] diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/deploy/mdp/events.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/deploy/mdp/events.py index 7666875435f..e3c0bc35c1b 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/deploy/mdp/events.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/deploy/mdp/events.py @@ -327,7 +327,7 @@ def __call__( break # Solve IK using jacobian - jacobians = self.robot_asset.root_physx_view.get_jacobians().clone() + jacobians = self.robot_asset.root_view.get_jacobians().clone() jacobian = jacobians[env_ids, self.jacobi_body_idx, :, :] delta_dof_pos = fc._get_delta_dof_pos( From 080c5b7079884445355d949582b52051fc8a8768 Mon Sep 17 00:00:00 2001 From: Antoine Richard Date: Wed, 28 Jan 2026 09:57:34 +0100 Subject: [PATCH 08/38] More doc --- source/isaaclab_tasks/docs/CHANGELOG.rst | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/source/isaaclab_tasks/docs/CHANGELOG.rst b/source/isaaclab_tasks/docs/CHANGELOG.rst index f9100882c5d..6ec6f07e523 100644 --- a/source/isaaclab_tasks/docs/CHANGELOG.rst +++ b/source/isaaclab_tasks/docs/CHANGELOG.rst @@ -1,6 +1,23 @@ Changelog --------- +0.11.13 (2026-01-28) +~~~~~~~~~~~~~~~~~~~~ + +Changed +^^^^^^^ + +* Updated all task environments to use the new ``root_view`` property instead of the deprecated + ``root_physx_view`` property. This includes the following environments: + + * AutoMate Assembly and Disassembly environments + * Factory environments + * FORGE environments + * Inhand manipulation environments + * Quadcopter environments + * Shadow Hand environments + + 0.11.12 (2025-12-16) ~~~~~~~~~~~~~~~~~~~~ From 24f5d87576d1652f677040396cd829c94730441f Mon Sep 17 00:00:00 2001 From: Antoine Richard Date: Wed, 28 Jan 2026 10:18:55 +0100 Subject: [PATCH 09/38] pre-commits. --- docs/source/api/index.rst | 2 +- docs/source/api/lab/isaaclab.assets.rst | 2 +- docs/source/api/lab_physx/isaaclab.assets.rst | 2 +- .../migration/migrating_to_isaaclab_3-0.rst | 1 - scripts/demos/pick_and_place.py | 2 +- .../01_assets/run_surface_gripper.py | 2 +- source/isaaclab/isaaclab/assets/__init__.py | 2 +- .../isaaclab/assets/articulation/__init__.py | 2 +- .../assets/articulation/base_articulation.py | 89 +- .../articulation/base_articulation_data.py | 22 +- .../isaaclab/assets/rigid_object/__init__.py | 2 +- .../assets/rigid_object/base_rigid_object.py | 22 +- .../rigid_object/base_rigid_object_data.py | 14 +- .../rigid_object_collection/__init__.py | 2 +- .../base_rigid_object_collection.py | 15 +- .../base_rigid_object_collection_data.py | 18 +- .../rigid_object_collection.py | 4 +- .../rigid_object_collection_data.py | 4 +- .../mdp/actions/surface_gripper_actions.py | 2 +- source/isaaclab/isaaclab/envs/mdp/events.py | 8 +- .../isaaclab/scene/interactive_scene.py | 4 +- .../isaaclab/sim/simulation_manager.py | 1004 ----------------- source/isaaclab_physx/docs/CHANGELOG.rst | 2 +- .../isaaclab_physx/assets/__init__.py | 2 +- .../assets/articulation/__init__.py | 2 +- .../assets/articulation/articulation.py | 31 +- .../assets/articulation/articulation_data.py | 75 +- .../assets/deformable_object/__init__.py | 2 +- .../deformable_object/deformable_object.py | 2 +- .../deformable_object_cfg.py | 2 +- .../deformable_object_data.py | 10 +- .../assets/rigid_object/__init__.py | 2 +- .../assets/rigid_object/rigid_object.py | 25 +- .../assets/rigid_object/rigid_object_data.py | 35 +- .../rigid_object_collection/__init__.py | 2 +- .../rigid_object_collection.py | 32 +- .../rigid_object_collection_data.py | 66 +- .../assets/surface_gripper/__init__.py | 2 +- .../surface_gripper/surface_gripper_cfg.py | 2 +- .../test/assets/test_articulation.py | 2 +- .../test/assets/test_deformable_object.py | 2 +- .../test/assets/test_rigid_object.py | 2 +- .../assets/test_rigid_object_collection.py | 8 +- .../test/assets/test_surface_gripper.py | 2 +- .../isaaclab_tasks/direct/forge/forge_env.py | 4 +- .../inhand_manipulation_env.py | 4 +- .../lift/config/franka/ik_abs_env_cfg.py | 1 + .../config/openarm/lift_openarm_env_cfg.py | 3 +- .../manipulation/lift/lift_env_cfg.py | 3 +- .../config/galbot/stack_joint_pos_env_cfg.py | 3 +- .../ur10_gripper/stack_joint_pos_env_cfg.py | 3 +- 51 files changed, 300 insertions(+), 1256 deletions(-) delete mode 100644 source/isaaclab/isaaclab/sim/simulation_manager.py diff --git a/docs/source/api/index.rst b/docs/source/api/index.rst index 3d0e151cde5..be6e7230fad 100644 --- a/docs/source/api/index.rst +++ b/docs/source/api/index.rst @@ -105,4 +105,4 @@ The following modules are available in the ``isaaclab_physx`` extension: .. autosummary:: :toctree: lab_physx - assets \ No newline at end of file + assets diff --git a/docs/source/api/lab/isaaclab.assets.rst b/docs/source/api/lab/isaaclab.assets.rst index 5d8e6699b4d..28356330b60 100644 --- a/docs/source/api/lab/isaaclab.assets.rst +++ b/docs/source/api/lab/isaaclab.assets.rst @@ -89,4 +89,4 @@ Articulation :members: :inherited-members: :show-inheritance: - :exclude-members: __init__, class_type \ No newline at end of file + :exclude-members: __init__, class_type diff --git a/docs/source/api/lab_physx/isaaclab.assets.rst b/docs/source/api/lab_physx/isaaclab.assets.rst index dd5ee6148b7..dcd80a0aeb4 100644 --- a/docs/source/api/lab_physx/isaaclab.assets.rst +++ b/docs/source/api/lab_physx/isaaclab.assets.rst @@ -128,4 +128,4 @@ Surface Gripper :members: :inherited-members: :show-inheritance: - :exclude-members: __init__, class_type \ No newline at end of file + :exclude-members: __init__, class_type diff --git a/docs/source/migration/migrating_to_isaaclab_3-0.rst b/docs/source/migration/migrating_to_isaaclab_3-0.rst index 4bc9194cf7f..627e78fd5c4 100644 --- a/docs/source/migration/migrating_to_isaaclab_3-0.rst +++ b/docs/source/migration/migrating_to_isaaclab_3-0.rst @@ -250,4 +250,3 @@ Here's a complete example showing how to update your code: collection = scene["object_collection"] poses = collection.data.body_pose_w collection.write_body_state_to_sim(state, env_ids=env_ids, body_ids=object_ids) - diff --git a/scripts/demos/pick_and_place.py b/scripts/demos/pick_and_place.py index 9c43b35ec9d..04516bf0a5b 100644 --- a/scripts/demos/pick_and_place.py +++ b/scripts/demos/pick_and_place.py @@ -26,6 +26,7 @@ from collections.abc import Sequence import torch +from isaaclab_physx.assets import SurfaceGripper, SurfaceGripperCfg import carb import omni @@ -37,7 +38,6 @@ RigidObject, RigidObjectCfg, ) -from isaaclab_physx.assets import SurfaceGripper, SurfaceGripperCfg from isaaclab.envs import DirectRLEnv, DirectRLEnvCfg from isaaclab.markers import SPHERE_MARKER_CFG, VisualizationMarkers from isaaclab.scene import InteractiveSceneCfg diff --git a/scripts/tutorials/01_assets/run_surface_gripper.py b/scripts/tutorials/01_assets/run_surface_gripper.py index 64711041fe2..5ccf47b40a2 100644 --- a/scripts/tutorials/01_assets/run_surface_gripper.py +++ b/scripts/tutorials/01_assets/run_surface_gripper.py @@ -34,10 +34,10 @@ """Rest everything follows.""" import torch +from isaaclab_physx.assets import SurfaceGripper, SurfaceGripperCfg import isaaclab.sim as sim_utils from isaaclab.assets import Articulation -from isaaclab_physx.assets import SurfaceGripper, SurfaceGripperCfg from isaaclab.sim import SimulationContext ## diff --git a/source/isaaclab/isaaclab/assets/__init__.py b/source/isaaclab/isaaclab/assets/__init__.py index c25a8919453..3eacce88028 100644 --- a/source/isaaclab/isaaclab/assets/__init__.py +++ b/source/isaaclab/isaaclab/assets/__init__.py @@ -62,4 +62,4 @@ "RigidObjectCollection", "RigidObjectCollectionCfg", "RigidObjectCollectionData", -] \ No newline at end of file +] diff --git a/source/isaaclab/isaaclab/assets/articulation/__init__.py b/source/isaaclab/isaaclab/assets/articulation/__init__.py index 4e52538e2a3..792b7a5454a 100644 --- a/source/isaaclab/isaaclab/assets/articulation/__init__.py +++ b/source/isaaclab/isaaclab/assets/articulation/__init__.py @@ -17,4 +17,4 @@ "Articulation", "ArticulationCfg", "ArticulationData", -] \ No newline at end of file +] diff --git a/source/isaaclab/isaaclab/assets/articulation/base_articulation.py b/source/isaaclab/isaaclab/assets/articulation/base_articulation.py index d62392b8248..2e8f941dfc1 100644 --- a/source/isaaclab/isaaclab/assets/articulation/base_articulation.py +++ b/source/isaaclab/isaaclab/assets/articulation/base_articulation.py @@ -8,12 +8,11 @@ from __future__ import annotations -import torch from abc import abstractmethod from collections.abc import Sequence from typing import TYPE_CHECKING -import warp as wp +import torch from ..asset_base import AssetBase @@ -224,9 +223,7 @@ def update(self, dt: float) -> None: """ @abstractmethod - def find_bodies( - self, name_keys: str | Sequence[str], preserve_order: bool = False - ) -> tuple[list[int], list[str]]: + def find_bodies(self, name_keys: str | Sequence[str], preserve_order: bool = False) -> tuple[list[int], list[str]]: """Find bodies in the articulation based on the name keys. Please check the :meth:`isaaclab.utils.string_utils.resolve_matching_names` function for more @@ -418,7 +415,8 @@ def write_root_velocity_to_sim( ..note:: This sets the velocity of the root's center of mass rather than the roots frame. Args: - root_velocity: Root center of mass velocities in simulation world frame. Shape is (len(env_ids), 6) or (num_instances, 6). + root_velocity: Root center of mass velocities in simulation world frame. Shape is (len(env_ids), 6) or + (num_instances, 6). env_ids: Environment indices. If None, then all indices are used. """ raise NotImplementedError() @@ -435,7 +433,8 @@ def write_root_com_velocity_to_sim( ..note:: This sets the velocity of the root's center of mass rather than the roots frame. Args: - root_velocity: Root center of mass velocities in simulation world frame. Shape is (len(env_ids), 6) or (num_instances, 6). + root_velocity: Root center of mass velocities in simulation world frame. Shape is (len(env_ids), 6) or + (num_instances, 6). env_ids: Environment indices. If None, then all indices are used. """ raise NotImplementedError() @@ -452,7 +451,8 @@ def write_root_link_velocity_to_sim( ..note:: This sets the velocity of the root's frame rather than the roots center of mass. Args: - root_velocity: Root frame velocities in simulation world frame. Shape is (len(env_ids), 6) or (num_instances, 6). + root_velocity: Root frame velocities in simulation world frame. Shape is (len(env_ids), 6) or + (num_instances, 6). env_ids: Environment indices. If None, then all indices are used. """ raise NotImplementedError() @@ -634,13 +634,13 @@ def write_joint_friction_coefficient_to_sim( to resist the joint motion. Args: - joint_friction_coeff: Joint static friction coefficient. Shape is (len(env_ids), len(joint_ids)) or (num_instances, num_joints). + joint_friction_coeff: Joint static friction coefficient. Shape is (len(env_ids), len(joint_ids)) or + (num_instances, num_joints). joint_ids: The joint indices to set the joint torque limits for. Defaults to None (all joints). env_ids: The environment indices to set the joint torque limits for. Defaults to None (all environments). """ raise NotImplementedError() - """ Operations - Setters. """ @@ -673,7 +673,8 @@ def set_coms( Args: coms: Center of mass positions of all bodies. Shape is (num_instances, num_bodies, 3). body_ids: The body indices to set the center of mass positions for. Defaults to None (all bodies). - env_ids: The environment indices to set the center of mass positions for. Defaults to None (all environments). + env_ids: The environment indices to set the center of mass positions for. Defaults to None + (all environments). """ raise NotImplementedError() @@ -737,9 +738,12 @@ def set_external_force_and_torque( right before the simulation step. Args: - forces: External forces in bodies' local frame. Shape is (len(env_ids), len(body_ids), 3) or (num_instances, num_bodies, 3). - torques: External torques in bodies' local frame. Shape is (len(env_ids), len(body_ids), 3) or (num_instances, num_bodies, 3). - positions: Positions to apply external wrench. Shape is (len(env_ids), len(body_ids), 3) or (num_instances, num_bodies, 3). Defaults to None. + forces: External forces in bodies' local frame. Shape is (len(env_ids), len(body_ids), 3) or + (num_instances, num_bodies, 3). + torques: External torques in bodies' local frame. Shape is (len(env_ids), len(body_ids), 3) or + (num_instances, num_bodies, 3). + positions: Positions to apply external wrench. Shape is (len(env_ids), len(body_ids), 3) or + (num_instances, num_bodies, 3). Defaults to None. body_ids: Body indices to apply external wrench to. Defaults to None (all bodies). env_ids: Environment indices to apply external wrench to. Defaults to None (all instances). is_global: Whether to apply the external wrench in the global frame. Defaults to False. If set to False, @@ -818,10 +822,12 @@ def set_fixed_tendon_stiffness( """Set fixed tendon stiffness into internal buffers. This function does not apply the tendon stiffness to the simulation. It only fills the buffers with - the desired values. To apply the tendon stiffness, call the :meth:`write_fixed_tendon_properties_to_sim` function. + the desired values. To apply the tendon stiffness, call the :meth:`write_fixed_tendon_properties_to_sim` + function. Args: - stiffness: Fixed tendon stiffness. Shape is (len(env_ids), len(fixed_tendon_ids)) or (num_instances, num_fixed_tendons). + stiffness: Fixed tendon stiffness. Shape is (len(env_ids), len(fixed_tendon_ids)) or + (num_instances, num_fixed_tendons). fixed_tendon_ids: The tendon indices to set the stiffness for. Defaults to None (all fixed tendons). env_ids: The environment indices to set the stiffness for. Defaults to None (all environments). """ @@ -840,7 +846,8 @@ def set_fixed_tendon_damping( the desired values. To apply the tendon damping, call the :meth:`write_fixed_tendon_properties_to_sim` function. Args: - damping: Fixed tendon damping. Shape is (len(env_ids), len(fixed_tendon_ids)) or (num_instances, num_fixed_tendons). + damping: Fixed tendon damping. Shape is (len(env_ids), len(fixed_tendon_ids)) or + (num_instances, num_fixed_tendons). fixed_tendon_ids: The tendon indices to set the damping for. Defaults to None (all fixed tendons). env_ids: The environment indices to set the damping for. Defaults to None (all environments). """ @@ -856,10 +863,12 @@ def set_fixed_tendon_limit_stiffness( """Set fixed tendon limit stiffness efforts into internal buffers. This function does not apply the tendon limit stiffness to the simulation. It only fills the buffers with - the desired values. To apply the tendon limit stiffness, call the :meth:`write_fixed_tendon_properties_to_sim` function. + the desired values. To apply the tendon limit stiffness, call the :meth:`write_fixed_tendon_properties_to_sim` + function. Args: - limit_stiffness: Fixed tendon limit stiffness. Shape is (len(env_ids), len(fixed_tendon_ids)) or (num_instances, num_fixed_tendons). + limit_stiffness: Fixed tendon limit stiffness. Shape is (len(env_ids), len(fixed_tendon_ids)) or + (num_instances, num_fixed_tendons). fixed_tendon_ids: The tendon indices to set the limit stiffness for. Defaults to None (all fixed tendons). env_ids: The environment indices to set the limit stiffness for. Defaults to None (all environments). """ @@ -878,7 +887,8 @@ def set_fixed_tendon_position_limit( the desired values. To apply the tendon limit, call the :meth:`write_fixed_tendon_properties_to_sim` function. Args: - limit: Fixed tendon limit. Shape is (len(env_ids), len(fixed_tendon_ids)) or (num_instances, num_fixed_tendons). + limit: Fixed tendon limit. Shape is (len(env_ids), len(fixed_tendon_ids)) or + (num_instances, num_fixed_tendons). fixed_tendon_ids: The tendon indices to set the limit for. Defaults to None (all fixed tendons). env_ids: The environment indices to set the limit for. Defaults to None (all environments). """ @@ -894,10 +904,12 @@ def set_fixed_tendon_rest_length( """Set fixed tendon rest length efforts into internal buffers. This function does not apply the tendon rest length to the simulation. It only fills the buffers with - the desired values. To apply the tendon rest length, call the :meth:`write_fixed_tendon_properties_to_sim` function. + the desired values. To apply the tendon rest length, call the :meth:`write_fixed_tendon_properties_to_sim` + function. Args: - rest_length: Fixed tendon rest length. Shape is (len(env_ids), len(fixed_tendon_ids)) or (num_instances, num_fixed_tendons). + rest_length: Fixed tendon rest length. Shape is (len(env_ids), len(fixed_tendon_ids)) or + (num_instances, num_fixed_tendons). fixed_tendon_ids: The tendon indices to set the rest length for. Defaults to None (all fixed tendons). env_ids: The environment indices to set the rest length for. Defaults to None (all environments). """ @@ -916,7 +928,8 @@ def set_fixed_tendon_offset( the desired values. To apply the tendon offset, call the :meth:`write_fixed_tendon_properties_to_sim` function. Args: - offset: Fixed tendon offset. Shape is (len(env_ids), len(fixed_tendon_ids)) or (num_instances, num_fixed_tendons). + offset: Fixed tendon offset. Shape is (len(env_ids), len(fixed_tendon_ids)) or + (num_instances, num_fixed_tendons). fixed_tendon_ids: The tendon indices to set the offset for. Defaults to None (all fixed tendons). env_ids: The environment indices to set the offset for. Defaults to None (all environments). """ @@ -946,10 +959,12 @@ def set_spatial_tendon_stiffness( """Set spatial tendon stiffness into internal buffers. This function does not apply the tendon stiffness to the simulation. It only fills the buffers with - the desired values. To apply the tendon stiffness, call the :meth:`write_spatial_tendon_properties_to_sim` function. + the desired values. To apply the tendon stiffness, call the :meth:`write_spatial_tendon_properties_to_sim` + function. Args: - stiffness: Spatial tendon stiffness. Shape is (len(env_ids), len(spatial_tendon_ids)) or (num_instances, num_spatial_tendons). + stiffness: Spatial tendon stiffness. Shape is (len(env_ids), len(spatial_tendon_ids)) or + (num_instances, num_spatial_tendons). spatial_tendon_ids: The tendon indices to set the stiffness for. Defaults to None (all spatial tendons). env_ids: The environment indices to set the stiffness for. Defaults to None (all environments). """ @@ -965,11 +980,13 @@ def set_spatial_tendon_damping( """Set spatial tendon damping into internal buffers. This function does not apply the tendon damping to the simulation. It only fills the buffers with - the desired values. To apply the tendon damping, call the :meth:`write_spatial_tendon_properties_to_sim` function. + the desired values. To apply the tendon damping, call the :meth:`write_spatial_tendon_properties_to_sim` + function. Args: - damping: Spatial tendon damping. Shape is (len(env_ids), len(spatial_tendon_ids)) or (num_instances, num_spatial_tendons). + damping: Spatial tendon damping. Shape is (len(env_ids), len(spatial_tendon_ids)) or + (num_instances, num_spatial_tendons). spatial_tendon_ids: The tendon indices to set the damping for. Defaults to None (all spatial tendons). env_ids: The environment indices to set the damping for. Defaults to None (all environments). """ @@ -985,11 +1002,14 @@ def set_spatial_tendon_limit_stiffness( """Set spatial tendon limit stiffness into internal buffers. This function does not apply the tendon limit stiffness to the simulation. It only fills the buffers with - the desired values. To apply the tendon limit stiffness, call the :meth:`write_spatial_tendon_properties_to_sim` function. + the desired values. To apply the tendon limit stiffness, call the :meth:`write_spatial_tendon_properties_to_sim` + function. Args: - limit_stiffness: Spatial tendon limit stiffness. Shape is (len(env_ids), len(spatial_tendon_ids)) or (num_instances, num_spatial_tendons). - spatial_tendon_ids: The tendon indices to set the limit stiffness for. Defaults to None (all spatial tendons). + limit_stiffness: Spatial tendon limit stiffness. Shape is (len(env_ids), len(spatial_tendon_ids)) or + (num_instances, num_spatial_tendons). + spatial_tendon_ids: The tendon indices to set the limit stiffness for. Defaults to None + (all spatial tendons). env_ids: The environment indices to set the limit stiffness for. Defaults to None (all environments). """ raise NotImplementedError() @@ -1004,10 +1024,12 @@ def set_spatial_tendon_offset( """Set spatial tendon offset efforts into internal buffers. This function does not apply the tendon offset to the simulation. It only fills the buffers with - the desired values. To apply the tendon offset, call the :meth:`write_spatial_tendon_properties_to_sim` function. + the desired values. To apply the tendon offset, call the :meth:`write_spatial_tendon_properties_to_sim` + function. Args: - offset: Spatial tendon offset. Shape is (len(env_ids), len(spatial_tendon_ids)) or (num_instances, num_spatial_tendons). + offset: Spatial tendon offset. Shape is (len(env_ids), len(spatial_tendon_ids)) or + (num_instances, num_spatial_tendons). spatial_tendon_ids: The tendon indices to set the offset for. Defaults to None (all spatial tendons). env_ids: The environment indices to set the offset for. Defaults to None (all environments). """ @@ -1022,7 +1044,8 @@ def write_spatial_tendon_properties_to_sim( """Write spatial tendon properties into the simulation. Args: - spatial_tendon_ids: The spatial tendon indices to set the properties for. Defaults to None (all spatial tendons). + spatial_tendon_ids: The spatial tendon indices to set the properties for. Defaults to None + (all spatial tendons). env_ids: The environment indices to set the properties for. Defaults to None (all environments). """ raise NotImplementedError() diff --git a/source/isaaclab/isaaclab/assets/articulation/base_articulation_data.py b/source/isaaclab/isaaclab/assets/articulation/base_articulation_data.py index dfcc646a8e1..323692df243 100644 --- a/source/isaaclab/isaaclab/assets/articulation/base_articulation_data.py +++ b/source/isaaclab/isaaclab/assets/articulation/base_articulation_data.py @@ -63,28 +63,29 @@ def update(self, dt: float) -> None: @property @abstractmethod def default_root_pose(self) -> torch.Tensor: - """Default root pose ``[pos, quat]`` in the local environment frame. Shape is (num_instances, 7). + """Default root pose ``[pos, quat]`` in the local environment frame. - The position and quaternion are of the articulation root's actor frame. + The position and quaternion are of the articulation root's actor frame. Shape is (num_instances, 7). """ raise NotImplementedError @property @abstractmethod def default_root_vel(self) -> torch.Tensor: - """Default root velocity ``[lin_vel, ang_vel]`` in the local environment frame. Shape is (num_instances, 6). + """Default root velocity ``[lin_vel, ang_vel]`` in the local environment frame. The linear and angular velocities are of the articulation root's center of mass frame. + Shape is (num_instances, 6). """ raise NotImplementedError @property @abstractmethod def default_root_state(self) -> torch.Tensor: - """Default root state ``[pos, quat, lin_vel, ang_vel]`` in the local environment frame. Shape is (num_instances, 13). + """Default root state ``[pos, quat, lin_vel, ang_vel]`` in the local environment frame. The position and quaternion are of the articulation root's actor frame. Meanwhile, the linear and angular - velocities are of its center of mass frame. + velocities are of its center of mass frame. Shape is (num_instances, 13). This quantity is configured through the :attr:`isaaclab.assets.ArticulationCfg.init_state` parameter. """ @@ -414,11 +415,11 @@ def root_link_state_w(self) -> torch.Tensor: @property @abstractmethod def root_com_state_w(self) -> torch.Tensor: - """Root center of mass state ``[pos, quat, lin_vel, ang_vel]`` in simulation world frame. Shape is (num_instances, 13). + """Root center of mass state ``[pos, quat, lin_vel, ang_vel]`` in simulation world frame. The position, quaternion, and linear/angular velocity are of the articulation root link's center of mass frame relative to the world. Center of mass frame is assumed to be the same orientation as the link rather than the - orientation of the principle inertia. + orientation of the principle inertia. Shape is (num_instances, 13). """ raise NotImplementedError @@ -544,8 +545,11 @@ def body_incoming_joint_wrench_b(self) -> torch.Tensor: Shape is (num_instances, num_bodies, 6). All body reaction wrenches are provided including the root body to the world of an articulation. - For more information on joint wrenches, please check the`PhysX documentation `__ - and the underlying `PhysX Tensor API `__ . + For more information on joint wrenches, please check the `PhysX documentation`_ and the + underlying `PhysX Tensor API`_. + + .. _PhysX documentation: https://nvidia-omniverse.github.io/PhysX/physx/5.5.1/docs/Articulations.html#link-incoming-joint-force + .. _PhysX Tensor API: https://docs.omniverse.nvidia.com/kit/docs/omni_physics/latest/extensions/runtime/source/omni.physics.tensors/docs/api/python.html#omni.physics.tensors.impl.api.ArticulationView.get_link_incoming_joint_force """ raise NotImplementedError diff --git a/source/isaaclab/isaaclab/assets/rigid_object/__init__.py b/source/isaaclab/isaaclab/assets/rigid_object/__init__.py index 5bc38dcb171..9fdde709c8b 100644 --- a/source/isaaclab/isaaclab/assets/rigid_object/__init__.py +++ b/source/isaaclab/isaaclab/assets/rigid_object/__init__.py @@ -17,4 +17,4 @@ "RigidObject", "RigidObjectCfg", "RigidObjectData", -] \ No newline at end of file +] diff --git a/source/isaaclab/isaaclab/assets/rigid_object/base_rigid_object.py b/source/isaaclab/isaaclab/assets/rigid_object/base_rigid_object.py index c6268deea65..7308bceea92 100644 --- a/source/isaaclab/isaaclab/assets/rigid_object/base_rigid_object.py +++ b/source/isaaclab/isaaclab/assets/rigid_object/base_rigid_object.py @@ -5,11 +5,12 @@ from __future__ import annotations -import torch from abc import abstractmethod from collections.abc import Sequence from typing import TYPE_CHECKING +import torch + from isaaclab.utils.wrench_composer import WrenchComposer from ..asset_base import AssetBase @@ -142,9 +143,7 @@ def update(self, dt: float) -> None: """ @abstractmethod - def find_bodies( - self, name_keys: str | Sequence[str], preserve_order: bool = False - ) -> tuple[list[int], list[str]]: + def find_bodies(self, name_keys: str | Sequence[str], preserve_order: bool = False) -> tuple[list[int], list[str]]: """Find bodies in the rigid body based on the name keys. Please check the :meth:`isaaclab.utils.string_utils.resolve_matching_names` function for more @@ -275,7 +274,8 @@ def write_root_velocity_to_sim( ..note:: This sets the velocity of the root's center of mass rather than the roots frame. Args: - root_velocity: Root center of mass velocities in simulation world frame. Shape is (len(env_ids), 6) or (num_instances, 6). + root_velocity: Root center of mass velocities in simulation world frame. Shape is (len(env_ids), 6) + or (num_instances, 6). env_ids: Environment indices. If None, then all indices are used. """ raise NotImplementedError() @@ -292,7 +292,8 @@ def write_root_com_velocity_to_sim( ..note:: This sets the velocity of the root's center of mass rather than the roots frame. Args: - root_velocity: Root center of mass velocities in simulation world frame. Shape is (len(env_ids), 6) or (num_instances, 6). + root_velocity: Root center of mass velocities in simulation world frame. Shape is (len(env_ids), 6) + or (num_instances, 6). env_ids: Environment indices. If None, then all indices are used. """ raise NotImplementedError() @@ -309,7 +310,8 @@ def write_root_link_velocity_to_sim( ..note:: This sets the velocity of the root's frame rather than the roots center of mass. Args: - root_velocity: Root frame velocities in simulation world frame. Shape is (len(env_ids), 6) or (num_instances, 6). + root_velocity: Root frame velocities in simulation world frame. Shape is (len(env_ids), 6) + or (num_instances, 6). env_ids: Environment indices. If None, then all indices are used. """ raise NotImplementedError() @@ -346,7 +348,8 @@ def set_coms( Args: coms: Center of mass positions of all bodies. Shape is (num_instances, num_bodies, 3). body_ids: The body indices to set the center of mass positions for. Defaults to None (all bodies). - env_ids: The environment indices to set the center of mass positions for. Defaults to None (all environments). + env_ids: The environment indices to set the center of mass positions for. Defaults to None + (all environments). """ raise NotImplementedError() @@ -412,7 +415,8 @@ def set_external_force_and_torque( Args: forces: External forces in bodies' local frame. Shape is (len(env_ids), len(body_ids), 3). torques: External torques in bodies' local frame. Shape is (len(env_ids), len(body_ids), 3). - positions: External wrench positions in bodies' local frame. Shape is (len(env_ids), len(body_ids), 3). Defaults to None. + positions: External wrench positions in bodies' local frame. Shape is (len(env_ids), len(body_ids), 3). + Defaults to None. body_ids: Body indices to apply external wrench to. Defaults to None (all bodies). env_ids: Environment indices to apply external wrench to. Defaults to None (all instances). is_global: Whether to apply the external wrench in the global frame. Defaults to False. If set to False, diff --git a/source/isaaclab/isaaclab/assets/rigid_object/base_rigid_object_data.py b/source/isaaclab/isaaclab/assets/rigid_object/base_rigid_object_data.py index fe05cfc4d7b..4d8a16151a6 100644 --- a/source/isaaclab/isaaclab/assets/rigid_object/base_rigid_object_data.py +++ b/source/isaaclab/isaaclab/assets/rigid_object/base_rigid_object_data.py @@ -61,28 +61,28 @@ def update(self, dt: float) -> None: @property @abstractmethod def default_root_pose(self) -> torch.Tensor: - """Default root pose ``[pos, quat]`` in local environment frame. Shape is (num_instances, 7). + """Default root pose ``[pos, quat]`` in local environment frame. - The position and quaternion are of the rigid body's actor frame. + The position and quaternion are of the rigid body's actor frame. Shape is (num_instances, 7). """ raise NotImplementedError() @property @abstractmethod def default_root_vel(self) -> torch.Tensor: - """Default root velocity ``[lin_vel, ang_vel]`` in local environment frame. Shape is (num_instances, 6). + """Default root velocity ``[lin_vel, ang_vel]`` in local environment frame. - The linear and angular velocities are of the rigid body's center of mass frame. + The linear and angular velocities are of the rigid body's center of mass frame. Shape is (num_instances, 6). """ raise NotImplementedError() @property @abstractmethod def default_root_state(self) -> torch.Tensor: - """Default root state ``[pos, quat, lin_vel, ang_vel]`` in local environment frame. Shape is (num_instances, 13). + """Default root state ``[pos, quat, lin_vel, ang_vel]`` in local environment frame. - The position and quaternion are of the rigid body's actor frame. Meanwhile, the linear and angular velocities are - of the center of mass frame. + The position and quaternion are of the rigid body's actor frame. Meanwhile, the linear and angular velocities + are of the center of mass frame. Shape is (num_instances, 13). """ raise NotImplementedError() diff --git a/source/isaaclab/isaaclab/assets/rigid_object_collection/__init__.py b/source/isaaclab/isaaclab/assets/rigid_object_collection/__init__.py index 53eff6d98fc..f0fd26363a0 100644 --- a/source/isaaclab/isaaclab/assets/rigid_object_collection/__init__.py +++ b/source/isaaclab/isaaclab/assets/rigid_object_collection/__init__.py @@ -17,4 +17,4 @@ "RigidObjectCollection", "RigidObjectCollectionCfg", "RigidObjectCollectionData", -] \ No newline at end of file +] diff --git a/source/isaaclab/isaaclab/assets/rigid_object_collection/base_rigid_object_collection.py b/source/isaaclab/isaaclab/assets/rigid_object_collection/base_rigid_object_collection.py index 55246ed5625..13b1c263e84 100644 --- a/source/isaaclab/isaaclab/assets/rigid_object_collection/base_rigid_object_collection.py +++ b/source/isaaclab/isaaclab/assets/rigid_object_collection/base_rigid_object_collection.py @@ -5,11 +5,12 @@ from __future__ import annotations -import torch from abc import abstractmethod from collections.abc import Sequence from typing import TYPE_CHECKING +import torch + from isaaclab.utils.wrench_composer import WrenchComposer from ..asset_base import AssetBase @@ -172,7 +173,8 @@ def write_body_state_to_sim( """Set the bodies state over selected environment indices into the simulation. The body state comprises of the cartesian position, quaternion orientation in (x, y, z, w), and linear - and angular velocity. All the quantities are in the simulation frame. Shape is (len(env_ids), len(body_ids), 13). + and angular velocity. All the quantities are in the simulation frame. Shape is + (len(env_ids), len(body_ids), 13). Args: body_states: Body states in simulation frame. Shape is (len(env_ids), len(body_ids), 13). @@ -306,7 +308,8 @@ def write_body_com_velocity_to_sim( ..note:: This sets the velocity of the body's center of mass rather than the body's frame. Args: - body_velocities: Body center of mass velocities in simulation frame. Shape is (len(env_ids), len(body_ids), 6). + body_velocities: Body center of mass velocities in simulation frame. Shape is + (len(env_ids), len(body_ids), 6). env_ids: Environment indices. If None, then all indices are used. body_ids: Body indices. If None, then all indices are used. """ @@ -363,7 +366,8 @@ def set_coms( Args: coms: Center of mass positions of all bodies. Shape is (num_instances, num_bodies, 3). body_ids: The body indices to set the center of mass positions for. Defaults to None (all bodies). - env_ids: The environment indices to set the center of mass positions for. Defaults to None (all environments). + env_ids: The environment indices to set the center of mass positions for. Defaults to None + (all environments). """ raise NotImplementedError() @@ -429,7 +433,8 @@ def set_external_force_and_torque( Args: forces: External forces in bodies' local frame. Shape is (len(env_ids), len(body_ids), 3). torques: External torques in bodies' local frame. Shape is (len(env_ids), len(body_ids), 3). - positions: External wrench positions in bodies' local frame. Shape is (len(env_ids), len(body_ids), 3). Defaults to None. + positions: External wrench positions in bodies' local frame. Shape is (len(env_ids), len(body_ids), 3). + Defaults to None. body_ids: Body indices to apply external wrench to. Defaults to None (all bodies). env_ids: Environment indices to apply external wrench to. Defaults to None (all instances). is_global: Whether to apply the external wrench in the global frame. Defaults to False. If set to False, diff --git a/source/isaaclab/isaaclab/assets/rigid_object_collection/base_rigid_object_collection_data.py b/source/isaaclab/isaaclab/assets/rigid_object_collection/base_rigid_object_collection_data.py index 785e9579de4..8f1ed6f6f4a 100644 --- a/source/isaaclab/isaaclab/assets/rigid_object_collection/base_rigid_object_collection_data.py +++ b/source/isaaclab/isaaclab/assets/rigid_object_collection/base_rigid_object_collection_data.py @@ -63,28 +63,30 @@ def update(self, dt: float) -> None: @property @abstractmethod def default_body_pose(self) -> torch.Tensor: - """Default body pose ``[pos, quat]`` in local environment frame. Shape is (num_instances, num_bodies, 7). + """Default body pose ``[pos, quat]`` in local environment frame. - The position and quaternion are of the rigid body's actor frame. + The position and quaternion are of the rigid body's actor frame. Shape is (num_instances, num_bodies, 7). """ raise NotImplementedError() @property @abstractmethod def default_body_vel(self) -> torch.Tensor: - """Default body velocity ``[lin_vel, ang_vel]`` in local environment frame. Shape is (num_instances, num_bodies, 6). + """Default body velocity ``[lin_vel, ang_vel]`` in local environment frame. Shape is - The linear and angular velocities are of the rigid body's center of mass frame. + + The linear and angular velocities are of the rigid body's center of mass frame. Shape is + (num_instances, num_bodies, 6). """ raise NotImplementedError() @property @abstractmethod def default_body_state(self) -> torch.Tensor: - """Default body state ``[pos, quat, lin_vel, ang_vel]`` in local environment frame. Shape is (num_instances, num_bodies, 13). + """Default body state ``[pos, quat, lin_vel, ang_vel]`` in local environment frame. - The position and quaternion are of the rigid body's actor frame. Meanwhile, the linear and angular velocities are - of the center of mass frame. + The position and quaternion are of the rigid body's actor frame. Meanwhile, the linear and angular velocities + are of the center of mass frame. Shape is (num_instances, num_bodies, 13). """ raise NotImplementedError() @@ -373,4 +375,4 @@ def body_com_quat_b(self) -> torch.Tensor: This quantity is the orientation of the principles axes of inertia relative to its body's link frame. """ - raise NotImplementedError() \ No newline at end of file + raise NotImplementedError() diff --git a/source/isaaclab/isaaclab/assets/rigid_object_collection/rigid_object_collection.py b/source/isaaclab/isaaclab/assets/rigid_object_collection/rigid_object_collection.py index ce8df64a64a..21071e3d696 100644 --- a/source/isaaclab/isaaclab/assets/rigid_object_collection/rigid_object_collection.py +++ b/source/isaaclab/isaaclab/assets/rigid_object_collection/rigid_object_collection.py @@ -14,7 +14,9 @@ if TYPE_CHECKING: from isaaclab_physx.assets.rigid_object_collection import RigidObjectCollection as PhysXRigidObjectCollection - from isaaclab_physx.assets.rigid_object_collection import RigidObjectCollectionData as PhysXRigidObjectCollectionData + from isaaclab_physx.assets.rigid_object_collection import ( + RigidObjectCollectionData as PhysXRigidObjectCollectionData, + ) class RigidObjectCollection(FactoryBase, BaseRigidObjectCollection): diff --git a/source/isaaclab/isaaclab/assets/rigid_object_collection/rigid_object_collection_data.py b/source/isaaclab/isaaclab/assets/rigid_object_collection/rigid_object_collection_data.py index 0e15a3eebfa..c9ca2f7ccad 100644 --- a/source/isaaclab/isaaclab/assets/rigid_object_collection/rigid_object_collection_data.py +++ b/source/isaaclab/isaaclab/assets/rigid_object_collection/rigid_object_collection_data.py @@ -12,7 +12,9 @@ from .base_rigid_object_collection_data import BaseRigidObjectCollectionData if TYPE_CHECKING: - from isaaclab_physx.assets.rigid_object_collection.rigid_object_collection_data import RigidObjectCollectionData as PhysXRigidObjectCollectionData + from isaaclab_physx.assets.rigid_object_collection.rigid_object_collection_data import ( + RigidObjectCollectionData as PhysXRigidObjectCollectionData, + ) class RigidObjectCollectionData(FactoryBase): diff --git a/source/isaaclab/isaaclab/envs/mdp/actions/surface_gripper_actions.py b/source/isaaclab/isaaclab/envs/mdp/actions/surface_gripper_actions.py index 7de29172a33..d2cd9ab581a 100644 --- a/source/isaaclab/isaaclab/envs/mdp/actions/surface_gripper_actions.py +++ b/source/isaaclab/isaaclab/envs/mdp/actions/surface_gripper_actions.py @@ -10,8 +10,8 @@ from typing import TYPE_CHECKING import torch - from isaaclab_physx.assets import SurfaceGripper + from isaaclab.managers.action_manager import ActionTerm if TYPE_CHECKING: diff --git a/source/isaaclab/isaaclab/envs/mdp/events.py b/source/isaaclab/isaaclab/envs/mdp/events.py index 6c4b0df5d0a..b8af797833f 100644 --- a/source/isaaclab/isaaclab/envs/mdp/events.py +++ b/source/isaaclab/isaaclab/envs/mdp/events.py @@ -20,6 +20,7 @@ from typing import TYPE_CHECKING, Literal import torch +from isaaclab_physx.assets import DeformableObject import carb import omni.physics.tensors.impl.api as physx @@ -30,7 +31,6 @@ import isaaclab.utils.math as math_utils from isaaclab.actuators import ImplicitActuator from isaaclab.assets import Articulation, RigidObject -from isaaclab_physx.assets import DeformableObject from isaaclab.managers import EventTermCfg, ManagerTermBase, SceneEntityCfg from isaaclab.sim.utils.stage import get_current_stage from isaaclab.terrains import TerrainImporter @@ -574,7 +574,7 @@ def __init__(self, cfg: EventTermCfg, env: ManagerBasedEnv): self.default_joint_stiffness = self.asset.data.joint_stiffness.clone() self.default_joint_damping = self.asset.data.joint_damping.clone() - + # check for valid operation if cfg.params["operation"] == "scale": if "stiffness_distribution_params" in cfg.params: @@ -635,9 +635,7 @@ def randomize(data: torch.Tensor, params: tuple[float, float]) -> torch.Tensor: # Randomize stiffness if stiffness_distribution_params is not None: stiffness = actuator.stiffness[env_ids].clone() - stiffness[:, actuator_indices] = self.default_joint_stiffness[env_ids][ - :, global_indices - ].clone() + stiffness[:, actuator_indices] = self.default_joint_stiffness[env_ids][:, global_indices].clone() randomize(stiffness, stiffness_distribution_params) actuator.stiffness[env_ids] = stiffness if isinstance(actuator, ImplicitActuator): diff --git a/source/isaaclab/isaaclab/scene/interactive_scene.py b/source/isaaclab/isaaclab/scene/interactive_scene.py index 3d615bdfef9..b0837e68b11 100644 --- a/source/isaaclab/isaaclab/scene/interactive_scene.py +++ b/source/isaaclab/isaaclab/scene/interactive_scene.py @@ -8,6 +8,7 @@ from typing import Any import torch +from isaaclab_physx.assets import DeformableObject, DeformableObjectCfg, SurfaceGripper, SurfaceGripperCfg import carb from isaacsim.core.cloner import GridCloner @@ -23,9 +24,6 @@ RigidObjectCollection, RigidObjectCollectionCfg, ) -from isaaclab_physx.assets import SurfaceGripper, SurfaceGripperCfg -from isaaclab_physx.assets import DeformableObject, DeformableObjectCfg - from isaaclab.sensors import ContactSensorCfg, FrameTransformerCfg, SensorBase, SensorBaseCfg, VisuoTactileSensorCfg from isaaclab.sim import SimulationContext from isaaclab.sim.utils.stage import get_current_stage, get_current_stage_id diff --git a/source/isaaclab/isaaclab/sim/simulation_manager.py b/source/isaaclab/isaaclab/sim/simulation_manager.py deleted file mode 100644 index d1687db4fb8..00000000000 --- a/source/isaaclab/isaaclab/sim/simulation_manager.py +++ /dev/null @@ -1,1004 +0,0 @@ -# SPDX-FileCopyrightText: Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-License-Identifier: Apache-2.0 -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import weakref -from collections import OrderedDict - -import carb -import isaacsim.core.utils.numpy as np_utils -import isaacsim.core.utils.torch as torch_utils -import isaacsim.core.utils.warp as warp_utils -import omni.kit -import omni.physx -import omni.timeline -import omni.usd -from isaacsim.core.utils.prims import get_prim_at_path, is_prim_path_valid -from isaacsim.core.utils.stage import get_current_stage, get_current_stage_id -from pxr import PhysxSchema - -from .isaac_events import IsaacEvents - - -class SimulationManager: - """This class provide functions that take care of many time-related events such as - warm starting simulation in order for the physics data to be retrievable. - Adding/ removing callback functions that gets triggered with certain events such as a physics step, - on post reset, on physics ready..etc.""" - - _warmup_needed = True - #_timeline = omni.timeline.get_timeline_interface() - _message_bus = carb.eventdispatcher.get_eventdispatcher() - _physx_sim_interface = omni.physx.get_physx_simulation_interface() - _physx_interface = omni.physx.get_physx_interface() - _physics_sim_view = None - _physics_sim_view__warp = None - _backend = "torch" - _carb_settings = carb.settings.get_settings() - _physics_scene_apis = OrderedDict() - #_callbacks = dict() - _simulation_manager_interface = None - _simulation_view_created = False - #_assets_loaded = True - #_assets_loading_callback = None - #_assets_loaded_callback = None - _default_physics_scene_idx = -1 - - # callback handles - #_warm_start_callback = None - #_on_stop_callback = None - #_post_warm_start_callback = None - #_stage_open_callback = None - - # Add callback state tracking - #_callbacks_enabled = { - # "warm_start": True, - # "on_stop": True, - # "post_warm_start": True, - # "stage_open": True, - #} - - @classmethod - def _initialize(cls) -> None: - # Initialize all callbacks as enabled by default - SimulationManager.enable_all_default_callbacks(True) - SimulationManager._track_physics_scenes() - - # ------------------------------------------------------------------------------------------------------------------ - # TODO: Removing this as the callbacks handling are moved to the SimulationContext class. - #@classmethod - #def _setup_warm_start_callback(cls) -> None: - # if cls._callbacks_enabled["warm_start"] and cls._warm_start_callback is None: - # cls._warm_start_callback = cls._timeline.get_timeline_event_stream().create_subscription_to_pop_by_type( - # int(omni.timeline.TimelineEventType.PLAY), cls._warm_start - # ) - - #@classmethod - #def _setup_on_stop_callback(cls) -> None: - # if cls._callbacks_enabled["on_stop"] and cls._on_stop_callback is None: - # cls._on_stop_callback = cls._timeline.get_timeline_event_stream().create_subscription_to_pop_by_type( - # int(omni.timeline.TimelineEventType.STOP), cls._on_stop - # ) - - #@classmethod - #def _setup_post_warm_start_callback(cls) -> None: - # if cls._callbacks_enabled["post_warm_start"] and cls._post_warm_start_callback is None: - # cls._post_warm_start_callback = cls._message_bus.observe_event( - # event_name=IsaacEvents.PHYSICS_WARMUP.value, - # on_event=cls._create_simulation_view, - # observer_name="SimulationManager._post_warm_start_callback", - # ) - - #@classmethod - #def _setup_stage_open_callback(cls) -> None: - # if cls._callbacks_enabled["stage_open"] and cls._stage_open_callback is None: - # cls._stage_open_callback = cls._message_bus.observe_event( - # event_name=omni.usd.get_context().stage_event_name(omni.usd.StageEventType.OPENED), - # on_event=cls._post_stage_open, - # observer_name="SimulationManager._stage_open_callback", - # ) - - @classmethod - def _clear(cls) -> None: - # Use callback management system for main callbacks - #cls.enable_all_default_callbacks(False) - - # Handle additional cleanup not covered by enable_all_default_callbacks - cls._physics_sim_view = None - cls._physics_sim_view__warp = None - cls._assets_loading_callback = None - cls._assets_loaded_callback = None - cls._simulation_manager_interface.reset() - cls._physics_scene_apis.clear() - cls._callbacks.clear() - - def _post_stage_open(event) -> None: - SimulationManager._simulation_manager_interface.reset() - SimulationManager._physics_scene_apis.clear() - SimulationManager._callbacks.clear() - SimulationManager._track_physics_scenes() - SimulationManager._assets_loaded = True - SimulationManager._assets_loading_callback = None - SimulationManager._assets_loaded_callback = None - - # TODO: This should not be needed as we should be synchronous. - #def _assets_loading(event): - # SimulationManager._assets_loaded = False - - #def _assets_loaded(event): - # SimulationManager._assets_loaded = True - - #SimulationManager._assets_loading_callback = SimulationManager._message_bus.observe_event( - # event_name=omni.usd.get_context().stage_event_name(omni.usd.StageEventType.ASSETS_LOADING), - # on_event=_assets_loading, - # observer_name="SimulationManager._assets_loading_callback", - #) - - #SimulationManager._assets_loaded_callback = SimulationManager._message_bus.observe_event( - # event_name=omni.usd.get_context().stage_event_name(omni.usd.StageEventType.ASSETS_LOADED), - # on_event=_assets_loaded, - # observer_name="SimulationManager._assets_loaded_callback", - #) - - # ------------------------------------------------------------------------------------------------------------------ - # TODO: This is very USD centric, we should not need this. Also we should have only one physics scene. - #def _track_physics_scenes() -> None: - # def add_physics_scenes(physics_scene_prim_path): - # prim = get_prim_at_path(physics_scene_prim_path) - # if prim.GetTypeName() == "PhysicsScene": - # SimulationManager._physics_scene_apis[physics_scene_prim_path] = PhysxSchema.PhysxSceneAPI.Apply(prim) - - # def remove_physics_scenes(physics_scene_prim_path): - # # TODO: match physics scene prim path - # if physics_scene_prim_path in SimulationManager._physics_scene_apis: - # del SimulationManager._physics_scene_apis[physics_scene_prim_path] - - # SimulationManager._simulation_manager_interface.register_physics_scene_addition_callback(add_physics_scenes) - # SimulationManager._simulation_manager_interface.register_deletion_callback(remove_physics_scenes) - - def _warm_start(event) -> None: - if SimulationManager._carb_settings.get_as_bool("/app/player/playSimulations"): - SimulationManager.initialize_physics() - - def _on_stop(event) -> None: - SimulationManager._warmup_needed = True - if SimulationManager._physics_sim_view: - SimulationManager._physics_sim_view.invalidate() - SimulationManager._physics_sim_view = None - SimulationManager._simulation_view_created = False - if SimulationManager._physics_sim_view__warp: - SimulationManager._physics_sim_view__warp.invalidate() - SimulationManager._physics_sim_view__warp = None - - def _create_simulation_view(event) -> None: - if "cuda" in SimulationManager.get_physics_sim_device() and SimulationManager._backend == "numpy": - SimulationManager._backend = "torch" - carb.log_warn("changing backend from numpy to torch since numpy backend cannot be used with GPU piplines") - SimulationManager._physics_sim_view = omni.physics.tensors.create_simulation_view( - SimulationManager.get_backend(), stage_id=get_current_stage_id() - ) - SimulationManager._physics_sim_view.set_subspace_roots("/") - #SimulationManager._physics_sim_view__warp = omni.physics.tensors.create_simulation_view( - # "warp", stage_id=get_current_stage_id() - #) - #SimulationManager._physics_sim_view__warp.set_subspace_roots("/") - SimulationManager._physx_interface.update_simulation(SimulationManager.get_physics_dt(), 0.0) - SimulationManager._message_bus.dispatch_event(IsaacEvents.SIMULATION_VIEW_CREATED.value, payload={}) - SimulationManager._simulation_view_created = True - SimulationManager._message_bus.dispatch_event(IsaacEvents.PHYSICS_READY.value, payload={}) - - # ------------------------------------------------------------------------------------------------------------------ - # TODO: Removing this as we should use our internal tools. - #@classmethod - #def _get_backend_utils(cls) -> str: - # if SimulationManager._backend == "numpy": - # return np_utils - # elif SimulationManager._backend == "torch": - # return torch_utils - # elif SimulationManager._backend == "warp": - # return warp_utils - # else: - # raise Exception( - # f"Provided backend is not supported: {SimulationManager.get_backend()}. Supported: torch, numpy, warp." - # ) - - # ------------------------------------------------------------------------------------------------------------------ - # TODO: This is very USD centric, we should not need this. Also we should have only one physics scene. - #@classmethod - #def _get_physics_scene_api(cls, physics_scene: str = None): - # if physics_scene is None: - # if len(SimulationManager._physics_scene_apis) > 0: - # physics_scene_api = list(SimulationManager._physics_scene_apis.values())[ - # SimulationManager._default_physics_scene_idx - # ] - # else: - # # carb.log_warn("Physics scene is not found in stage") - # return None - # else: - # if physics_scene in SimulationManager._physics_scene_apis: - # physics_scene_api = SimulationManager._physics_scene_apis[physics_scene] - # else: - # carb.log_warn("physics scene specified {} doesn't exist".format(physics_scene)) - # return None - # return physics_scene_api - - @classmethod - def set_backend(cls, val: str) -> None: - SimulationManager._backend = val - - @classmethod - def get_backend(cls) -> str: - return SimulationManager._backend - - @classmethod - def initialize_physics(cls) -> None: - if SimulationManager._warmup_needed: - SimulationManager._physx_interface.force_load_physics_from_usd() - SimulationManager._physx_interface.start_simulation() - SimulationManager._physx_interface.update_simulation(SimulationManager.get_physics_dt(), 0.0) - SimulationManager._physx_sim_interface.fetch_results() - SimulationManager._message_bus.dispatch_event(IsaacEvents.PHYSICS_WARMUP.value, payload={}) - SimulationManager._warmup_needed = False - - # TODO: Should handle simulation time itself. - @classmethod - def get_simulation_time(cls): - #return SimulationManager._simulation_manager_interface.get_simulation_time() - return self._simulation_time - - # TODO: Should handle simulation time itself. - @classmethod - def get_num_physics_steps(cls): - return SimulationManager._simulation_manager_interface.get_num_physics_steps() - - # TODO: Doesn't need to know if the simulation is simulating or not. - #@classmethod - #def is_simulating(cls): - # return SimulationManager._simulation_manager_interface.is_simulating() - - # TODO: Doesn't need to know if the simulation is paused or not. - #@classmethod - #def is_paused(cls): - # return SimulationManager._simulation_manager_interface.is_paused() - - @classmethod - def get_physics_sim_view(cls): - return SimulationManager._physics_sim_view - - @classmethod - def set_default_physics_scene(cls, physics_scene_prim_path: str): - if SimulationManager._warm_start_callback is None: - carb.log_warn("Calling set_default_physics_scene while SimulationManager is not tracking physics scenes") - return - if physics_scene_prim_path in SimulationManager._physics_scene_apis: - SimulationManager._default_physics_scene_idx = list(SimulationManager._physics_scene_apis.keys()).index( - physics_scene_prim_path - ) - elif is_prim_path_valid(physics_scene_prim_path): - prim = get_prim_at_path(physics_scene_prim_path) - if prim.GetTypeName() == "PhysicsScene": - SimulationManager._physics_scene_apis[physics_scene_prim_path] = PhysxSchema.PhysxSceneAPI.Apply(prim) - SimulationManager._default_physics_scene_idx = list(SimulationManager._physics_scene_apis.keys()).index( - physics_scene_prim_path - ) - else: - raise Exception("physics scene specified {} doesn't exist".format(physics_scene_prim_path)) - - @classmethod - def get_default_physics_scene(cls) -> str: - if len(SimulationManager._physics_scene_apis) > 0: - return list(SimulationManager._physics_scene_apis.keys())[SimulationManager._default_physics_scene_idx] - else: - carb.log_warn("No physics scene is found in stage") - return None - - @classmethod - def step(cls, render: bool = False): - if render: - raise Exception( - "Stepping the renderer is not supported at the moment through SimulationManager, use SimulationContext instead." - ) - SimulationManager._physx_sim_interface.simulate( - SimulationManager.get_physics_dt(physics_scene=None), SimulationManager.get_simulation_time() - ) - SimulationManager._physx_sim_interface.fetch_results() - - @classmethod - def set_physics_sim_device(cls, val) -> None: - if "cuda" in val: - parsed_device = val.split(":") - if len(parsed_device) == 1: - device_id = SimulationManager._carb_settings.get_as_int("/physics/cudaDevice") - if device_id < 0: - SimulationManager._carb_settings.set_int("/physics/cudaDevice", 0) - device_id = 0 - else: - SimulationManager._carb_settings.set_int("/physics/cudaDevice", int(parsed_device[1])) - SimulationManager._carb_settings.set_bool("/physics/suppressReadback", True) - SimulationManager.set_broadphase_type("GPU") - SimulationManager.enable_gpu_dynamics(flag=True) - SimulationManager.enable_fabric(enable=True) - elif "cpu" == val.lower(): - SimulationManager._carb_settings.set_bool("/physics/suppressReadback", False) - # SimulationManager._carb_settings.set_int("/physics/cudaDevice", -1) - SimulationManager.set_broadphase_type("MBP") - SimulationManager.enable_gpu_dynamics(flag=False) - else: - raise Exception("Device {} is not supported.".format(val)) - - @classmethod - def get_physics_sim_device(cls) -> str: - supress_readback = SimulationManager._carb_settings.get_as_bool("/physics/suppressReadback") - if (not SimulationManager._physics_scene_apis and supress_readback) or ( - supress_readback - and SimulationManager.get_broadphase_type() == "GPU" - and SimulationManager.is_gpu_dynamics_enabled() - ): - device_id = SimulationManager._carb_settings.get_as_int("/physics/cudaDevice") - if device_id < 0: - SimulationManager._carb_settings.set_int("/physics/cudaDevice", 0) - device_id = 0 - return f"cuda:{device_id}" - else: - return "cpu" - - def set_physics_dt(cls, dt: float = 1.0 / 60.0, physics_scene: str = None) -> None: - """Sets the physics dt on the physics scene provided. - - Args: - dt (float, optional): physics dt. Defaults to 1.0/60.0. - physics_scene (str, optional): physics scene prim path. Defaults to first physics scene found in the stage. - - Raises: - Exception: If the prim path registered in context doesn't correspond to a valid prim path currently. - ValueError: Physics dt must be a >= 0. - ValueError: Physics dt must be a <= 1.0. - """ - if physics_scene is None: - physics_scene_apis = SimulationManager._physics_scene_apis.values() - else: - physics_scene_apis = [SimulationManager._get_physics_scene_api(physics_scene=physics_scene)] - - for physics_scene_api in physics_scene_apis: - if dt < 0: - raise ValueError("physics dt cannot be <0") - # if no stage or no change in physics timestep, exit. - if get_current_stage() is None: - return - if dt == 0: - physics_scene_api.GetTimeStepsPerSecondAttr().Set(0) - elif dt > 1.0: - raise ValueError("physics dt must be <= 1.0") - else: - steps_per_second = int(1.0 / dt) - physics_scene_api.GetTimeStepsPerSecondAttr().Set(steps_per_second) - return - - @classmethod - def get_physics_dt(cls, physics_scene: str = None) -> str: - """ - Returns the current physics dt. - - Args: - physics_scene (str, optional): physics scene prim path. - - Raises: - Exception: If the prim path registered in context doesn't correspond to a valid prim path currently. - - Returns: - float: physics dt. - """ - physics_scene_api = SimulationManager._get_physics_scene_api(physics_scene=physics_scene) - if physics_scene_api is None: - return 1.0 / 60.0 - physics_hz = physics_scene_api.GetTimeStepsPerSecondAttr().Get() - if physics_hz == 0: - return 0.0 - else: - return 1.0 / physics_hz - - # ------------------------------------------------------------------------------------------------------------------ - # TODO: Removing this as we will only set the settings on initialization. - #@classmethod - #def get_broadphase_type(cls, physics_scene: str = None) -> str: - # """Gets current broadcast phase algorithm type. - - # Args: - # physics_scene (str, optional): physics scene prim path. - - # Raises: - # Exception: If the prim path registered in context doesn't correspond to a valid prim path currently. - - # Returns: - # str: Broadcast phase algorithm used. - # """ - # physics_scene_api = SimulationManager._get_physics_scene_api(physics_scene=physics_scene) - # return physics_scene_api.GetBroadphaseTypeAttr().Get() - - #@classmethod - #def set_broadphase_type(cls, val: str, physics_scene: str = None) -> None: - # """Broadcast phase algorithm used in simulation. - - # Args: - # val (str): type of broadcasting to be used, can be "MBP". - # physics_scene (str, optional): physics scene prim path. - - # Raises: - # Exception: If the prim path registered in context doesn't correspond to a valid prim path currently. - # """ - # if physics_scene is None: - # for path, physx_scene_api in SimulationManager._physics_scene_apis.items(): - # if not physx_scene_api.GetPrim().IsValid(): - # continue - # if physx_scene_api.GetBroadphaseTypeAttr().Get() is None: - # physx_scene_api.CreateBroadphaseTypeAttr(val) - # else: - # physx_scene_api.GetBroadphaseTypeAttr().Set(val) - # else: - # physx_scene_api = SimulationManager._get_physics_scene_api(physics_scene=physics_scene) - # if physx_scene_api.GetBroadphaseTypeAttr().Get() is None: - # physx_scene_api.CreateBroadphaseTypeAttr(val) - # else: - # physx_scene_api.GetBroadphaseTypeAttr().Set(val) - - #@classmethod - #def enable_ccd(cls, flag: bool, physics_scene: str = None) -> None: - # """Enables a second broad phase after integration that makes it possible to prevent objects from tunneling - # through each other. - - # Args: - # flag (bool): enables or disables ccd on the PhysicsScene - # physics_scene (str, optional): physics scene prim path. - - # Raises: - # Exception: If the prim path registered in context doesn't correspond to a valid prim path currently. - # """ - # if physics_scene is None: - # for path, physx_scene_api in SimulationManager._physics_scene_apis.items(): - # if not physx_scene_api.GetPrim().IsValid(): - # continue - # if physx_scene_api.GetEnableCCDAttr().Get() is None: - # physx_scene_api.CreateEnableCCDAttr(flag) - # else: - # physx_scene_api.GetEnableCCDAttr().Set(flag) - # else: - # physx_scene_api = SimulationManager._get_physics_scene_api(physics_scene=physics_scene) - # if physx_scene_api.GetEnableCCDAttr().Get() is None: - # physx_scene_api.CreateEnableCCDAttr(flag) - # else: - # physx_scene_api.GetEnableCCDAttr().Set(flag) - - #@classmethod - #def is_ccd_enabled(cls, physics_scene: str = None) -> bool: - # """Checks if ccd is enabled. - - # Args: - # physics_scene (str, optional): physics scene prim path. - - # Raises: - # Exception: If the prim path registered in context doesn't correspond to a valid prim path currently. - - # Returns: - # bool: True if ccd is enabled, otherwise False. - # """ - # physx_scene_api = SimulationManager._get_physics_scene_api(physics_scene=physics_scene) - # return physx_scene_api.GetEnableCCDAttr().Get() - - #@classmethod - #def enable_ccd(cls, flag: bool, physics_scene: str = None) -> None: - # """Enables Continuous Collision Detection (CCD). - - # Args: - # flag (bool): enables or disables CCD on the PhysicsScene. - # physics_scene (str, optional): physics scene prim path. - - # Raises: - # Exception: If the prim path registered in context doesn't correspond to a valid prim path currently. - # """ - # if flag and "cuda" in SimulationManager.get_physics_sim_device(): - # carb.log_warn("CCD is not supported on GPU, ignoring request to enable it") - # return - # if physics_scene is None: - # for path, physx_scene_api in SimulationManager._physics_scene_apis.items(): - # if not physx_scene_api.GetPrim().IsValid(): - # continue - # if physx_scene_api.GetEnableCCDAttr().Get() is None: - # physx_scene_api.CreateEnableCCDAttr(flag) - # else: - # physx_scene_api.GetEnableCCDAttr().Set(flag) - # else: - # if physics_scene in SimulationManager._physics_scene_apis: - # physx_scene_api = SimulationManager._physics_scene_apis[physics_scene] - # if physx_scene_api.GetEnableCCDAttr().Get() is None: - # physx_scene_api.CreateEnableCCDAttr(flag) - # else: - # physx_scene_api.GetEnableCCDAttr().Set(flag) - # else: - # raise Exception("physics scene specified {} doesn't exist".format(physics_scene)) - - #@classmethod - #def is_ccd_enabled(cls, physics_scene: str = None) -> bool: - # """Checks if Continuous Collision Detection (CCD) is enabled. - - # Args: - # physics_scene (str, optional): physics scene prim path. - - # Raises: - # Exception: If the prim path registered in context doesn't correspond to a valid prim path currently. - - # Returns: - # bool: True if CCD is enabled, otherwise False. - # """ - # if physics_scene is None: - # if len(SimulationManager._physics_scene_apis) > 0: - # physx_scene_api = SimulationManager._get_physics_scene_api(physics_scene=physics_scene) - # return physx_scene_api.GetEnableCCDAttr().Get() - # else: - # return False - # else: - # if physics_scene in SimulationManager._physics_scene_apis: - # physx_scene_api = SimulationManager._physics_scene_apis[physics_scene] - # return physx_scene_api.GetEnableCCDAttr().Get() - # else: - # raise Exception("physics scene specified {} doesn't exist".format(physics_scene)) - - #@classmethod - #def enable_gpu_dynamics(cls, flag: bool, physics_scene: str = None) -> None: - # """Enables gpu dynamics pipeline, required for deformables for instance. - - # Args: - # flag (bool): enables or disables gpu dynamics on the PhysicsScene. If flag is true, CCD is disabled. - # physics_scene (str, optional): physics scene prim path. - - # Raises: - # Exception: If the prim path registered in context doesn't correspond to a valid prim path currently. - # """ - # if physics_scene is None: - # for path, physx_scene_api in SimulationManager._physics_scene_apis.items(): - # if not physx_scene_api.GetPrim().IsValid(): - # continue - # if physx_scene_api.GetEnableGPUDynamicsAttr().Get() is None: - # physx_scene_api.CreateEnableGPUDynamicsAttr(flag) - # else: - # physx_scene_api.GetEnableGPUDynamicsAttr().Set(flag) - # # Disable CCD for GPU dynamics as its not supported - # if flag: - # if SimulationManager.is_ccd_enabled(): - # carb.log_warn("Disabling CCD for GPU dynamics as its not supported") - # SimulationManager.enable_ccd(flag=False) - # else: - # physx_scene_api = SimulationManager._get_physics_scene_api(physics_scene=physics_scene) - # if physx_scene_api.GetEnableGPUDynamicsAttr().Get() is None: - # physx_scene_api.CreateEnableGPUDynamicsAttr(flag) - # else: - # physx_scene_api.GetEnableGPUDynamicsAttr().Set(flag) - # # Disable CCD for GPU dynamics as its not supported - # if flag: - # if SimulationManager.is_ccd_enabled(physics_scene=physics_scene): - # carb.log_warn("Disabling CCD for GPU dynamics as its not supported") - # SimulationManager.enable_ccd(flag=False, physics_scene=physics_scene) - # else: - # physx_scene_api.GetEnableGPUDynamicsAttr().Set(flag) - - #@classmethod - #def is_gpu_dynamics_enabled(cls, physics_scene: str = None) -> bool: - # """Checks if Gpu Dynamics is enabled. - - # Args: - # physics_scene (str, optional): physics scene prim path. - - # Raises: - # Exception: If the prim path registered in context doesn't correspond to a valid prim path currently. - - # Returns: - # bool: True if Gpu Dynamics is enabled, otherwise False. - # """ - # physx_scene_api = SimulationManager._get_physics_scene_api(physics_scene=physics_scene) - # return physx_scene_api.GetEnableGPUDynamicsAttr().Get() - - # ------------------------------------------------------------------------------------------------------------------ - - @classmethod - def enable_fabric(cls, enable): - """Enables or disables physics fabric integration and associated settings. - - Args: - enable: Whether to enable or disable fabric. - """ - manager = omni.kit.app.get_app().get_extension_manager() - fabric_was_enabled = manager.is_extension_enabled("omni.physx.fabric") - if not fabric_was_enabled and enable: - manager.set_extension_enabled_immediate("omni.physx.fabric", True) - elif fabric_was_enabled and not enable: - manager.set_extension_enabled_immediate("omni.physx.fabric", False) - SimulationManager._carb_settings.set_bool("/physics/updateToUsd", not enable) - SimulationManager._carb_settings.set_bool("/physics/updateParticlesToUsd", not enable) - SimulationManager._carb_settings.set_bool("/physics/updateVelocitiesToUsd", not enable) - SimulationManager._carb_settings.set_bool("/physics/updateForceSensorsToUsd", not enable) - - @classmethod - def is_fabric_enabled(cls, enable): - """Checks if fabric is enabled. - - Args: - enable: Whether to check if fabric is enabled. - - Returns: - bool: True if fabric is enabled, otherwise False. - """ - return omni.kit.app.get_app().get_extension_manager().is_extension_enabled("omni.physx.fabric") - - # ------------------------------------------------------------------------------------------------------------------ - - # TODO: Removing this as we will only set the settings on initialization. - #@classmethod - #def set_solver_type(cls, solver_type: str, physics_scene: str = None) -> None: - # """solver used for simulation. - - # Args: - # solver_type (str): can be "TGS" or "PGS". - # physics_scene (str, optional): physics scene prim path. - - # Raises: - # Exception: If the prim path registered in context doesn't correspond to a valid prim path currently. - # """ - # if solver_type not in ["TGS", "PGS"]: - # raise Exception("Solver type {} is not supported".format(solver_type)) - # if physics_scene is None: - # for path, physx_scene_api in SimulationManager._physics_scene_apis.items(): - # if not physx_scene_api.GetPrim().IsValid(): - # continue - # if physx_scene_api.GetSolverTypeAttr().Get() is None: - # physx_scene_api.CreateSolverTypeAttr(solver_type) - # else: - # physx_scene_api.GetSolverTypeAttr().Set(solver_type) - # else: - # physx_scene_api = SimulationManager._get_physics_scene_api(physics_scene=physics_scene) - # if physx_scene_api.GetSolverTypeAttr().Get() is None: - # physx_scene_api.CreateSolverTypeAttr(solver_type) - # else: - # physx_scene_api.GetSolverTypeAttr().Set(solver_type) - - #@classmethod - #def get_solver_type(cls, physics_scene: str = None) -> str: - # """Gets current solver type. - - # Args: - # physics_scene (str, optional): physics scene prim path. - - # Raises: - # Exception: If the prim path registered in context doesn't correspond to a valid prim path currently. - - # Returns: - # str: solver used for simulation. - # """ - # physx_scene_api = SimulationManager._get_physics_scene_api(physics_scene=physics_scene) - # return physx_scene_api.GetSolverTypeAttr().Get() - - # ------------------------------------------------------------------------------------------------------------------ - - # TODO: Removing this, this is oddly specific, we will use string sets on initialization. - #@classmethod - #def enable_stablization(cls, flag: bool, physics_scene: str = None) -> None: - # """Enables additional stabilization pass in the solver. - - # Args: - # flag (bool): enables or disables stabilization on the PhysicsScene - # physics_scene (str, optional): physics scene prim path. - - # Raises: - # Exception: If the prim path registered in context doesn't correspond to a valid prim path currently. - # """ - # if physics_scene is None: - # for path, physx_scene_api in SimulationManager._physics_scene_apis.items(): - # if not physx_scene_api.GetPrim().IsValid(): - # continue - # if physx_scene_api.GetEnableStabilizationAttr().Get() is None: - # physx_scene_api.CreateEnableStabilizationAttr(flag) - # else: - # physx_scene_api.GetEnableStabilizationAttr().Set(flag) - # else: - # physx_scene_api = SimulationManager._get_physics_scene_api(physics_scene=physics_scene) - # if physx_scene_api.GetEnableStabilizationAttr().Get() is None: - # physx_scene_api.CreateEnableStabilizationAttr(flag) - # else: - # physx_scene_api.GetEnableStabilizationAttr().Set(flag) - - #@classmethod - #def is_stablization_enabled(cls, physics_scene: str = None) -> bool: - # """Checks if stabilization is enabled. - - # Args: - # physics_scene (str, optional): physics scene prim path. - - # Raises: - # Exception: If the prim path registered in context doesn't correspond to a valid prim path currently. - - # Returns: - # bool: True if stabilization is enabled, otherwise False. - # """ - # physx_scene_api = SimulationManager._get_physics_scene_api(physics_scene=physics_scene) - # return physx_scene_api.GetEnableStabilizationAttr().Get() - - # ------------------------------------------------------------------------------------------------------------------ - - # TODO: Removing this as the callbacks handling are moved to the SimulationContext class. - #@classmethod - #def register_callback(cls, callback: callable, event, order: int = 0, name: str = None): - # """Registers a callback to be triggered when a specific event occurs. - - # Args: - # callback: The callback function to register. - # event: The event to trigger the callback. - # order: The order in which the callback should be triggered. - # name: The name of the callback. - - # Returns: - # int: The ID of the callback. - # """ - # proxy_needed = False - # if hasattr(callback, "__self__"): - # proxy_needed = True - # callback_name = callback.__name__ - # callback_id = SimulationManager._simulation_manager_interface.get_callback_iter() - # if event in [ - # IsaacEvents.PHYSICS_WARMUP, - # IsaacEvents.PHYSICS_READY, - # IsaacEvents.POST_RESET, - # IsaacEvents.SIMULATION_VIEW_CREATED, - # ]: - # if proxy_needed: - # SimulationManager._callbacks[callback_id] = SimulationManager._message_bus.observe_event( - # event_name=event.value, - # order=order, - # on_event=lambda event, obj=weakref.proxy(callback.__self__): getattr(obj, callback_name)(event), - # observer_name=f"SimulationManager._callbacks.{event.value}", - # ) - # else: - # SimulationManager._callbacks[callback_id] = SimulationManager._message_bus.observe_event( - # event_name=event.value, - # order=order, - # on_event=callback, - # observer_name=f"SimulationManager._callbacks.{event.value}", - # ) - # elif event == IsaacEvents.PRIM_DELETION: - # if proxy_needed: - # SimulationManager._simulation_manager_interface.register_deletion_callback( - # lambda event, obj=weakref.proxy(callback.__self__): getattr(obj, callback_name)(event) - # ) - # else: - # SimulationManager._simulation_manager_interface.register_deletion_callback(callback) - # elif event == IsaacEvents.POST_PHYSICS_STEP: - # if proxy_needed: - # SimulationManager._callbacks[callback_id] = ( - # SimulationManager._physx_interface.subscribe_physics_on_step_events( - # lambda step_dt, obj=weakref.proxy(callback.__self__): ( - # getattr(obj, callback_name)(step_dt) if SimulationManager._simulation_view_created else None - # ), - # pre_step=False, - # order=order, - # ) - # ) - # else: - # SimulationManager._callbacks[callback_id] = ( - # SimulationManager._physx_interface.subscribe_physics_on_step_events( - # lambda step_dt: callback(step_dt) if SimulationManager._simulation_view_created else None, - # pre_step=False, - # order=order, - # ) - # ) - # elif event == IsaacEvents.PRE_PHYSICS_STEP: - # if proxy_needed: - # SimulationManager._callbacks[callback_id] = ( - # SimulationManager._physx_interface.subscribe_physics_on_step_events( - # lambda step_dt, obj=weakref.proxy(callback.__self__): ( - # getattr(obj, callback_name)(step_dt) if SimulationManager._simulation_view_created else None - # ), - # pre_step=True, - # order=order, - # ) - # ) - # else: - # SimulationManager._callbacks[callback_id] = ( - # SimulationManager._physx_interface.subscribe_physics_on_step_events( - # lambda step_dt: callback(step_dt) if SimulationManager._simulation_view_created else None, - # pre_step=True, - # order=order, - # ) - # ) - # elif event == IsaacEvents.TIMELINE_STOP: - # if proxy_needed: - # SimulationManager._callbacks[ - # callback_id - # ] = SimulationManager._timeline.get_timeline_event_stream().create_subscription_to_pop_by_type( - # int(omni.timeline.TimelineEventType.STOP), - # lambda event, obj=weakref.proxy(callback.__self__): getattr(obj, callback_name)(event), - # order=order, - # name=name, - # ) - # else: - # SimulationManager._callbacks[ - # callback_id - # ] = SimulationManager._timeline.get_timeline_event_stream().create_subscription_to_pop_by_type( - # int(omni.timeline.TimelineEventType.STOP), callback, order=order, name=name - # ) - # else: - # raise Exception("{} event doesn't exist for callback registering".format(event)) - # SimulationManager._simulation_manager_interface.set_callback_iter(callback_id + 1) - # return callback_id - - # ------------------------------------------------------------------------------------------------------------------ - - # TODO: Removing this as the callbacks handling are moved to the SimulationContext class. - #@classmethod - #def deregister_callback(cls, callback_id): - # """Deregisters a callback which was previously registered using register_callback. - - # Args: - # callback_id: The ID of the callback returned by register_callback to deregister. - # """ - # if callback_id in SimulationManager._callbacks: - # del SimulationManager._callbacks[callback_id] - # elif SimulationManager._simulation_manager_interface.deregister_callback(callback_id): - # return - # else: - # raise Exception("callback with id {} doesn't exist to be deregistered".format(callback_id)) - - # ------------------------------------------------------------------------------------------------------------------ - - @classmethod - def enable_usd_notice_handler(cls, flag): - """Enables or disables the usd notice handler. - - Args: - flag: Whether to enable or disable the handler. - """ - SimulationManager._simulation_manager_interface.enable_usd_notice_handler(flag) - return - - @classmethod - def enable_fabric_usd_notice_handler(cls, stage_id, flag): - """Enables or disables the fabric usd notice handler. - - Args: - stage_id: The stage ID to enable or disable the handler for. - flag: Whether to enable or disable the handler. - """ - SimulationManager._simulation_manager_interface.enable_fabric_usd_notice_handler(stage_id, flag) - return - - @classmethod - def is_fabric_usd_notice_handler_enabled(cls, stage_id): - """Checks if fabric usd notice handler is enabled. - - Args: - stage_id: The stage ID to check. - - Returns: - bool: True if fabric usd notice handler is enabled, otherwise False. - """ - return SimulationManager._simulation_manager_interface.is_fabric_usd_notice_handler_enabled(stage_id) - - # ------------------------------------------------------------------------------------------------------------------ - - # TODO: Removing this as we will not use asynchronous stuff. - #@classmethod - #def assets_loading(cls) -> bool: - # """Checks if textures are loaded. - - # Returns: - # bool: True if textures are loading and not done yet, otherwise False. - # """ - # return not SimulationManager._assets_loaded - - # ------------------------------------------------------------------------------------------------------------------ - - # TODO: Removing these as the callbacks handling are moved to the SimulationContext class. - # Public API methods for enabling/disabling callbacks - #@classmethod - #def enable_warm_start_callback(cls, enable: bool = True) -> None: - # """Enable or disable the warm start callback. - - # Args: - # enable: Whether to enable the callback. - # """ - # cls._callbacks_enabled["warm_start"] = enable - # if enable: - # cls._setup_warm_start_callback() - # else: - # if cls._warm_start_callback is not None: - # cls._warm_start_callback = None - - #@classmethod - #def enable_on_stop_callback(cls, enable: bool = True) -> None: - # """Enable or disable the on stop callback. - - # Args: - # enable: Whether to enable the callback. - # """ - # cls._callbacks_enabled["on_stop"] = enable - # if enable: - # cls._setup_on_stop_callback() - # else: - # if cls._on_stop_callback is not None: - # cls._on_stop_callback = None - - #@classmethod - #def enable_post_warm_start_callback(cls, enable: bool = True) -> None: - # """Enable or disable the post warm start callback. - - # Args: - # enable: Whether to enable the callback. - # """ - # cls._callbacks_enabled["post_warm_start"] = enable - # if enable: - # cls._setup_post_warm_start_callback() - # else: - # if cls._post_warm_start_callback is not None: - # cls._post_warm_start_callback = None - - #@classmethod - #def enable_stage_open_callback(cls, enable: bool = True) -> None: - # """Enable or disable the stage open callback. - # Note: This also enables/disables the assets loading and loaded callbacks. If disabled, assets_loading() will always return True. - - # Args: - # enable: Whether to enable the callback. - # """ - # cls._callbacks_enabled["stage_open"] = enable - # if enable: - # cls._setup_stage_open_callback() - # else: - # if cls._stage_open_callback is not None: - # cls._stage_open_callback = None - # # Reset assets loading and loaded callbacks - # cls._assets_loaded = True - # cls._assets_loading_callback = None - # cls._assets_loaded_callback = None - - ## Convenience methods for bulk operations - #@classmethod - #def enable_all_default_callbacks(cls, enable: bool = True) -> None: - # """Enable or disable all default callbacks. - # Default callbacks are: warm_start, on_stop, post_warm_start, stage_open. - - # Args: - # enable: Whether to enable all callbacks. - # """ - # cls.enable_warm_start_callback(enable) - # cls.enable_on_stop_callback(enable) - # cls.enable_post_warm_start_callback(enable) - # cls.enable_stage_open_callback(enable) - - #@classmethod - #def is_default_callback_enabled(cls, callback_name: str) -> bool: - # """Check if a specific default callback is enabled. - # Default callbacks are: warm_start, on_stop, post_warm_start, stage_open. - - # Args: - # callback_name: Name of the callback to check. - - # Returns: - # Whether the callback is enabled. - # """ - # return cls._callbacks_enabled.get(callback_name, False) - - #@classmethod - #def get_default_callback_status(cls) -> dict: - # """Get the status of all default callbacks. - # Default callbacks are: warm_start, on_stop, post_warm_start, stage_open. - - # Returns: - # Dictionary with callback names and their enabled status. - # """ - # return cls._callbacks_enabled.copy() diff --git a/source/isaaclab_physx/docs/CHANGELOG.rst b/source/isaaclab_physx/docs/CHANGELOG.rst index 82f53ffd249..39ed35abf14 100644 --- a/source/isaaclab_physx/docs/CHANGELOG.rst +++ b/source/isaaclab_physx/docs/CHANGELOG.rst @@ -88,4 +88,4 @@ Deprecated Migration ^^^^^^^^^ -* See :ref:`migrating-to-isaaclab-3-0` for detailed migration instructions. \ No newline at end of file +* See :ref:`migrating-to-isaaclab-3-0` for detailed migration instructions. diff --git a/source/isaaclab_physx/isaaclab_physx/assets/__init__.py b/source/isaaclab_physx/isaaclab_physx/assets/__init__.py index eb529daa1d1..3b31699c1f7 100644 --- a/source/isaaclab_physx/isaaclab_physx/assets/__init__.py +++ b/source/isaaclab_physx/isaaclab_physx/assets/__init__.py @@ -56,4 +56,4 @@ "RigidObjectCollectionData", "SurfaceGripper", "SurfaceGripperCfg", -] \ No newline at end of file +] diff --git a/source/isaaclab_physx/isaaclab_physx/assets/articulation/__init__.py b/source/isaaclab_physx/isaaclab_physx/assets/articulation/__init__.py index 1105cd27520..6efcec972dc 100644 --- a/source/isaaclab_physx/isaaclab_physx/assets/articulation/__init__.py +++ b/source/isaaclab_physx/isaaclab_physx/assets/articulation/__init__.py @@ -11,4 +11,4 @@ __all__ = [ "Articulation", "ArticulationData", -] \ No newline at end of file +] diff --git a/source/isaaclab_physx/isaaclab_physx/assets/articulation/articulation.py b/source/isaaclab_physx/isaaclab_physx/assets/articulation/articulation.py index 22f8b80ba1f..b72cebcd9e9 100644 --- a/source/isaaclab_physx/isaaclab_physx/assets/articulation/articulation.py +++ b/source/isaaclab_physx/isaaclab_physx/assets/articulation/articulation.py @@ -24,11 +24,11 @@ import isaaclab.utils.math as math_utils import isaaclab.utils.string as string_utils from isaaclab.actuators import ActuatorBase, ActuatorBaseCfg, ImplicitActuator +from isaaclab.assets.articulation.base_articulation import BaseArticulation from isaaclab.utils.types import ArticulationActions from isaaclab.utils.version import get_isaac_sim_version from isaaclab.utils.wrench_composer import WrenchComposer -from isaaclab.assets.articulation.base_articulation import BaseArticulation from .articulation_data import ArticulationData if TYPE_CHECKING: @@ -37,6 +37,7 @@ # import logger logger = logging.getLogger(__name__) + class Articulation(BaseArticulation): """An articulation asset class. @@ -263,9 +264,7 @@ def update(self, dt: float): Operations - Finders. """ - def find_bodies( - self, name_keys: str | Sequence[str], preserve_order: bool = False - ) -> tuple[list[int], list[str]]: + def find_bodies(self, name_keys: str | Sequence[str], preserve_order: bool = False) -> tuple[list[int], list[str]]: """Find bodies in the articulation based on the name keys. Please check the :meth:`isaaclab.utils.string_utils.resolve_matching_names` function for more @@ -950,7 +949,8 @@ def write_joint_dynamic_friction_coefficient_to_sim( Args: joint_dynamic_friction_coeff: Joint dynamic friction coefficient. Shape is (len(env_ids), len(joint_ids)). joint_ids: The joint indices to set the dynamic friction coefficient for. Defaults to None (all joints). - env_ids: The environment indices to set the dynamic friction coefficient for. Defaults to None (all environments). + env_ids: The environment indices to set the dynamic friction coefficient for. Defaults to None + (all environments). """ if get_isaac_sim_version().major < 5: logger.warning("Setting joint dynamic friction coefficients are not supported in Isaac Sim < 5.0") @@ -983,7 +983,8 @@ def write_joint_viscous_friction_coefficient_to_sim( Args: joint_viscous_friction_coeff: Joint viscous friction coefficient. Shape is (len(env_ids), len(joint_ids)). joint_ids: The joint indices to set the viscous friction coefficient for. Defaults to None (all joints). - env_ids: The environment indices to set the viscous friction coefficient for. Defaults to None (all environments). + env_ids: The environment indices to set the viscous friction coefficient for. Defaults to None + (all environments). """ if get_isaac_sim_version().major < 5: logger.warning("Setting joint viscous friction coefficients are not supported in Isaac Sim < 5.0") @@ -1136,9 +1137,9 @@ def set_external_force_and_torque( right before the simulation step. Args: - forces: External forces in bodies' local frame. Shape is (len(env_ids), len(body_ids), 3) or (num_instances, num_bodies, 3). - torques: External torques in bodies' local frame. Shape is (len(env_ids), len(body_ids), 3) or (num_instances, num_bodies, 3). - positions: Positions to apply external wrench. Shape is (len(env_ids), len(body_ids), 3) or (num_instances, num_bodies, 3). Defaults to None. + forces: External forces in bodies' local frame. Shape is (len(env_ids), len(body_ids), 3). + torques: External torques in bodies' local frame. Shape is (len(env_ids), len(body_ids), 3). + positions: Positions to apply external wrench. Shape is (len(env_ids), len(body_ids), 3). Defaults to None. body_ids: Body indices to apply external wrench to. Defaults to None (all bodies). env_ids: Environment indices to apply external wrench to. Defaults to None (all instances). is_global: Whether to apply the external wrench in the global frame. Defaults to False. If set to False, @@ -1490,11 +1491,13 @@ def set_spatial_tendon_damping( """Set spatial tendon damping into internal buffers. This function does not apply the tendon damping to the simulation. It only fills the buffers with - the desired values. To apply the tendon damping, call the :meth:`write_spatial_tendon_properties_to_sim` function. + the desired values. To apply the tendon damping, call the :meth:`write_spatial_tendon_properties_to_sim` + function. Args: - damping: Spatial tendon damping. Shape is (len(env_ids), len(spatial_tendon_ids)) or (num_instances, num_spatial_tendons). + damping: Spatial tendon damping. Shape is (len(env_ids), len(spatial_tendon_ids)) or + (num_instances, num_spatial_tendons). spatial_tendon_ids: The tendon indices to set the damping for. Defaults to None (all spatial tendons). env_ids: The environment indices to set the damping for. Defaults to None (all environments). """ @@ -1911,7 +1914,6 @@ def _apply_actuator_model(self): if hasattr(actuator, "gear_ratio"): self._data.gear_ratio[:, actuator.joint_indices] = actuator.gear_ratio - """ Internal helpers -- Debugging. """ @@ -1959,7 +1961,8 @@ def _validate_cfg(self): def _log_articulation_info(self): """Log information about the articulation. - .. note:: We purposefully read the values from the simulator to ensure that the values are configured as expected. + .. note:: We purposefully read the values from the simulator to ensure that the values are configured as + expected. """ # define custom formatters for large numbers and limit ranges @@ -2182,4 +2185,4 @@ def root_physx_view(self) -> physx.RigidBodyView: logger.warning( "The `root_physx_view` property will be deprecated in a future release. Please use `root_view` instead." ) - return self.root_view \ No newline at end of file + return self.root_view diff --git a/source/isaaclab_physx/isaaclab_physx/assets/articulation/articulation_data.py b/source/isaaclab_physx/isaaclab_physx/assets/articulation/articulation_data.py index 110a55871b9..b202709c9e7 100644 --- a/source/isaaclab_physx/isaaclab_physx/assets/articulation/articulation_data.py +++ b/source/isaaclab_physx/isaaclab_physx/assets/articulation/articulation_data.py @@ -11,8 +11,8 @@ import omni.physics.tensors.impl.api as physx from isaacsim.core.simulation_manager import SimulationManager -from isaaclab.assets.articulation.base_articulation_data import BaseArticulationData import isaaclab.utils.math as math_utils +from isaaclab.assets.articulation.base_articulation_data import BaseArticulationData from isaaclab.utils.buffers import TimestampedBuffer # import logger @@ -78,7 +78,7 @@ def is_primed(self) -> bool: @is_primed.setter def is_primed(self, value: bool) -> None: """Set whether the articulation data is fully instantiated and ready to use. - + .. note:: Once this quantity is set to True, it cannot be changed. Args: @@ -91,7 +91,6 @@ def is_primed(self, value: bool) -> None: raise ValueError("The articulation data is already primed.") self._is_primed = True - def update(self, dt: float) -> None: """Updates the data for the articulation. @@ -126,16 +125,16 @@ def update(self, dt: float) -> None: @property def default_root_pose(self) -> torch.Tensor: - """Default root pose ``[pos, quat]`` in the local environment frame. Shape is (num_instances, 7). + """Default root pose ``[pos, quat]`` in the local environment frame. - The position and quaternion are of the articulation root's actor frame. + The position and quaternion are of the articulation root's actor frame. Shape is (num_instances, 7). """ return self._default_root_pose @default_root_pose.setter def default_root_pose(self, value: torch.Tensor) -> None: """Set the default root pose. - + Args: value: The default root pose. Shape is (num_instances, 7). @@ -148,16 +147,17 @@ def default_root_pose(self, value: torch.Tensor) -> None: @property def default_root_vel(self) -> torch.Tensor: - """Default root velocity ``[lin_vel, ang_vel]`` in the local environment frame. Shape is (num_instances, 6). + """Default root velocity ``[lin_vel, ang_vel]`` in the local environment frame. The linear and angular velocities are of the articulation root's center of mass frame. + Shape is (num_instances, 6). """ return self._default_root_vel @default_root_vel.setter def default_root_vel(self, value: torch.Tensor) -> None: """Set the default root velocity. - + Args: value: The default root velocity. Shape is (num_instances, 6). @@ -170,27 +170,34 @@ def default_root_vel(self, value: torch.Tensor) -> None: @property def default_root_state(self) -> torch.Tensor: - """Default root state ``[pos, quat, lin_vel, ang_vel]`` in the local environment frame. Shape is (num_instances, 13). + """Default root state ``[pos, quat, lin_vel, ang_vel]`` in the local environment frame. + The position and quaternion are of the articulation root's actor frame. Meanwhile, the linear and angular - velocities are of its center of mass frame. + velocities are of its center of mass frame. Shape is (num_instances, 13). This quantity is configured through the :attr:`isaaclab.assets.ArticulationCfg.init_state` parameter. """ - logger.warning("Reading the root state directly is deprecated since IsaacLab 3.0 and will be removed in a future version. Please use the default_root_pose and default_root_vel properties instead.") + logger.warning( + "Reading the root state directly is deprecated since IsaacLab 3.0 and will be removed in a future version. \ + Please use the default_root_pose and default_root_vel properties instead." + ) return torch.cat([self._default_root_pose, self._default_root_vel], dim=1) @default_root_state.setter def default_root_state(self, value: torch.Tensor) -> None: """Set the default root state. - + Args: value: The default root state. Shape is (num_instances, 13). Raises: ValueError: If the articulation data is already primed. """ - logger.warning("Setting the root state directly is deprecated since IsaacLab 3.0 and will be removed in a future version. Please use the default_root_pose and default_root_vel properties instead.") + logger.warning( + "Setting the root state directly is deprecated since IsaacLab 3.0 and will be removed in a future version. \ + Please use the default_root_pose and default_root_vel properties instead." + ) if self.is_primed: raise ValueError("The articulation data is already primed.") self._default_root_pose = value[:, :7] @@ -207,7 +214,7 @@ def default_joint_pos(self) -> torch.Tensor: @default_joint_pos.setter def default_joint_pos(self, value: torch.Tensor) -> None: """Set the default joint positions. - + Args: value: The default joint positions. Shape is (num_instances, num_joints). @@ -229,7 +236,7 @@ def default_joint_vel(self) -> torch.Tensor: @default_joint_vel.setter def default_joint_vel(self, value: torch.Tensor) -> None: """Set the default joint velocities. - + Args: value: The default joint velocities. Shape is (num_instances, num_joints). @@ -552,11 +559,11 @@ def root_link_state_w(self) -> torch.Tensor: @property def root_com_state_w(self) -> torch.Tensor: - """Root center of mass state ``[pos, quat, lin_vel, ang_vel]`` in simulation world frame. Shape is (num_instances, 13). + """Root center of mass state ``[pos, quat, lin_vel, ang_vel]`` in simulation world frame. The position, quaternion, and linear/angular velocity are of the articulation root link's center of mass frame relative to the world. Center of mass frame is assumed to be the same orientation as the link rather than the - orientation of the principle inertia. + orientation of the principle inertia. Shape is (num_instances, 13). """ if self._root_com_state_w.timestamp < self._sim_timestamp: self._root_com_state_w.data = torch.cat((self.root_com_pose_w, self.root_com_vel_w), dim=-1) @@ -1056,11 +1063,11 @@ def _create_buffers(self) -> None: self._joint_damping = self._root_view.get_dof_dampings().to(self.device).clone() self._joint_armature = self._root_view.get_dof_armatures().to(self.device).clone() friction_props = self._root_view.get_dof_friction_properties() - self._joint_friction_coeff = friction_props[:, :, 0].to(self.device).clone() + self._joint_friction_coeff = friction_props[:, :, 0].to(self.device).clone() self._joint_dynamic_friction_coeff = friction_props[:, :, 1].to(self.device).clone() self._joint_viscous_friction_coeff = friction_props[:, :, 2].to(self.device).clone() self._joint_pos_limits = self._root_view.get_dof_limits().to(self.device).clone() - self._joint_vel_limits = self._root_view.get_dof_max_velocities().to(self.device).clone() + self._joint_vel_limits = self._root_view.get_dof_max_velocities().to(self.device).clone() self._joint_effort_limits = self._root_view.get_dof_max_forces().to(self.device).clone() # -- Joint properties (custom) self._soft_joint_pos_limits = torch.zeros((self._root_view.count, num_dofs, 2), device=self.device) @@ -1070,7 +1077,9 @@ def _create_buffers(self) -> None: if num_fixed_tendons > 0: self._fixed_tendon_stiffness = self._root_view.get_fixed_tendon_stiffnesses().to(self.device).clone() self._fixed_tendon_damping = self._root_view.get_fixed_tendon_dampings().to(self.device).clone() - self._fixed_tendon_limit_stiffness = self._root_view.get_fixed_tendon_limit_stiffnesses().to(self.device).clone() + self._fixed_tendon_limit_stiffness = ( + self._root_view.get_fixed_tendon_limit_stiffnesses().to(self.device).clone() + ) self._fixed_tendon_rest_length = self._root_view.get_fixed_tendon_rest_lengths().to(self.device).clone() self._fixed_tendon_offset = self._root_view.get_fixed_tendon_offsets().to(self.device).clone() self._fixed_tendon_pos_limits = self._root_view.get_fixed_tendon_limits().to(self.device).clone() @@ -1085,7 +1094,9 @@ def _create_buffers(self) -> None: if num_spatial_tendons > 0: self._spatial_tendon_stiffness = self._root_view.get_spatial_tendon_stiffnesses().to(self.device).clone() self._spatial_tendon_damping = self._root_view.get_spatial_tendon_dampings().to(self.device).clone() - self._spatial_tendon_limit_stiffness = self._root_view.get_spatial_tendon_limit_stiffnesses().to(self.device).clone() + self._spatial_tendon_limit_stiffness = ( + self._root_view.get_spatial_tendon_limit_stiffnesses().to(self.device).clone() + ) self._spatial_tendon_offset = self._root_view.get_spatial_tendon_offsets().to(self.device).clone() else: self._spatial_tendon_stiffness = None @@ -1378,16 +1389,18 @@ def default_joint_friction_coeff(self) -> torch.Tensor: def default_joint_dynamic_friction_coeff(self) -> torch.Tensor: """Removed: Default joint dynamic friction coefficient is no longer stored.""" raise RuntimeError( - "The property 'default_joint_dynamic_friction_coeff' has been removed. Default values are no longer stored. " - "Please use 'joint_dynamic_friction_coeff' to get the current dynamic friction coefficient values from the simulation." + "The property 'default_joint_dynamic_friction_coeff' has been removed. Default values are no longer " + "stored. Please use 'joint_dynamic_friction_coeff' to get the current dynamic friction coefficient values" + " from the simulation." ) @property def default_joint_viscous_friction_coeff(self) -> torch.Tensor: """Removed: Default joint viscous friction coefficient is no longer stored.""" raise RuntimeError( - "The property 'default_joint_viscous_friction_coeff' has been removed. Default values are no longer stored. " - "Please use 'joint_viscous_friction_coeff' to get the current viscous friction coefficient values from the simulation." + "The property 'default_joint_viscous_friction_coeff' has been removed. Default values are no longer " + "stored. Please use 'joint_viscous_friction_coeff' to get the current viscous friction coefficient values" + " from the simulation." ) @property @@ -1418,8 +1431,9 @@ def default_fixed_tendon_damping(self) -> torch.Tensor: def default_fixed_tendon_limit_stiffness(self) -> torch.Tensor: """Removed: Default fixed tendon limit stiffness is no longer stored.""" raise RuntimeError( - "The property 'default_fixed_tendon_limit_stiffness' has been removed. Default values are no longer stored. " - "Please use 'fixed_tendon_limit_stiffness' to get the current limit stiffness values from the simulation." + "The property 'default_fixed_tendon_limit_stiffness' has been removed. Default values are no longer " + "stored. Please use 'fixed_tendon_limit_stiffness' to get the current limit stiffness values from the " + "simulation." ) @property @@ -1466,8 +1480,9 @@ def default_spatial_tendon_damping(self) -> torch.Tensor: def default_spatial_tendon_limit_stiffness(self) -> torch.Tensor: """Removed: Default spatial tendon limit stiffness is no longer stored.""" raise RuntimeError( - "The property 'default_spatial_tendon_limit_stiffness' has been removed. Default values are no longer stored. " - "Please use 'spatial_tendon_limit_stiffness' to get the current limit stiffness values from the simulation." + "The property 'default_spatial_tendon_limit_stiffness' has been removed. Default values are no longer " + "stored. Please use 'spatial_tendon_limit_stiffness' to get the current limit stiffness values from the " + "simulation." ) @property @@ -1476,4 +1491,4 @@ def default_spatial_tendon_offset(self) -> torch.Tensor: raise RuntimeError( "The property 'default_spatial_tendon_offset' has been removed. Default values are no longer stored. " "Please use 'spatial_tendon_offset' to get the current offset values from the simulation." - ) \ No newline at end of file + ) diff --git a/source/isaaclab_physx/isaaclab_physx/assets/deformable_object/__init__.py b/source/isaaclab_physx/isaaclab_physx/assets/deformable_object/__init__.py index e7d3faf6f09..34ebd7892ff 100644 --- a/source/isaaclab_physx/isaaclab_physx/assets/deformable_object/__init__.py +++ b/source/isaaclab_physx/isaaclab_physx/assets/deformable_object/__init__.py @@ -13,4 +13,4 @@ "DeformableObject", "DeformableObjectCfg", "DeformableObjectData", -] \ No newline at end of file +] diff --git a/source/isaaclab_physx/isaaclab_physx/assets/deformable_object/deformable_object.py b/source/isaaclab_physx/isaaclab_physx/assets/deformable_object/deformable_object.py index 5fe4bdf509c..02cb1281e7f 100644 --- a/source/isaaclab_physx/isaaclab_physx/assets/deformable_object/deformable_object.py +++ b/source/isaaclab_physx/isaaclab_physx/assets/deformable_object/deformable_object.py @@ -17,9 +17,9 @@ import isaaclab.sim as sim_utils import isaaclab.utils.math as math_utils +from isaaclab.assets.asset_base import AssetBase from isaaclab.markers import VisualizationMarkers -from isaaclab.assets.asset_base import AssetBase from .deformable_object_data import DeformableObjectData if TYPE_CHECKING: diff --git a/source/isaaclab_physx/isaaclab_physx/assets/deformable_object/deformable_object_cfg.py b/source/isaaclab_physx/isaaclab_physx/assets/deformable_object/deformable_object_cfg.py index 8d90de2549d..d917aef810c 100644 --- a/source/isaaclab_physx/isaaclab_physx/assets/deformable_object/deformable_object_cfg.py +++ b/source/isaaclab_physx/isaaclab_physx/assets/deformable_object/deformable_object_cfg.py @@ -5,11 +5,11 @@ from __future__ import annotations +from isaaclab.assets.asset_base_cfg import AssetBaseCfg from isaaclab.markers import VisualizationMarkersCfg from isaaclab.markers.config import DEFORMABLE_TARGET_MARKER_CFG from isaaclab.utils import configclass -from isaaclab.assets.asset_base_cfg import AssetBaseCfg from .deformable_object import DeformableObject diff --git a/source/isaaclab_physx/isaaclab_physx/assets/deformable_object/deformable_object_data.py b/source/isaaclab_physx/isaaclab_physx/assets/deformable_object/deformable_object_data.py index a68a88e67c4..8581c80646c 100644 --- a/source/isaaclab_physx/isaaclab_physx/assets/deformable_object/deformable_object_data.py +++ b/source/isaaclab_physx/isaaclab_physx/assets/deformable_object/deformable_object_data.py @@ -166,10 +166,8 @@ def sim_element_deform_gradient_w(self): """ if self._sim_element_deform_gradient_w.timestamp < self._sim_timestamp: # set the buffer data and timestamp - self._sim_element_deform_gradient_w.data = ( - self._root_view.get_sim_element_deformation_gradients().view( - self._root_view.count, -1, 3, 3 - ) + self._sim_element_deform_gradient_w.data = self._root_view.get_sim_element_deformation_gradients().view( + self._root_view.count, -1, 3, 3 ) self._sim_element_deform_gradient_w.timestamp = self._sim_timestamp return self._sim_element_deform_gradient_w.data @@ -181,8 +179,8 @@ def collision_element_deform_gradient_w(self): """ if self._collision_element_deform_gradient_w.timestamp < self._sim_timestamp: # set the buffer data and timestamp - self._collision_element_deform_gradient_w.data = ( - self._root_view.get_element_deformation_gradients().view(self._root_view.count, -1, 3, 3) + self._collision_element_deform_gradient_w.data = self._root_view.get_element_deformation_gradients().view( + self._root_view.count, -1, 3, 3 ) self._collision_element_deform_gradient_w.timestamp = self._sim_timestamp return self._collision_element_deform_gradient_w.data diff --git a/source/isaaclab_physx/isaaclab_physx/assets/rigid_object/__init__.py b/source/isaaclab_physx/isaaclab_physx/assets/rigid_object/__init__.py index 3ea9cf62dd6..a6a074e9618 100644 --- a/source/isaaclab_physx/isaaclab_physx/assets/rigid_object/__init__.py +++ b/source/isaaclab_physx/isaaclab_physx/assets/rigid_object/__init__.py @@ -11,4 +11,4 @@ __all__ = [ "RigidObject", "RigidObjectData", -] \ No newline at end of file +] diff --git a/source/isaaclab_physx/isaaclab_physx/assets/rigid_object/rigid_object.py b/source/isaaclab_physx/isaaclab_physx/assets/rigid_object/rigid_object.py index 17a1d6ff73b..a344161378e 100644 --- a/source/isaaclab_physx/isaaclab_physx/assets/rigid_object/rigid_object.py +++ b/source/isaaclab_physx/isaaclab_physx/assets/rigid_object/rigid_object.py @@ -19,8 +19,8 @@ import isaaclab.sim as sim_utils import isaaclab.utils.math as math_utils import isaaclab.utils.string as string_utils -from isaaclab.utils.wrench_composer import WrenchComposer from isaaclab.assets.rigid_object.base_rigid_object import BaseRigidObject +from isaaclab.utils.wrench_composer import WrenchComposer from .rigid_object_data import RigidObjectData @@ -187,9 +187,7 @@ def update(self, dt: float) -> None: Operations - Finders. """ - def find_bodies( - self, name_keys: str | Sequence[str], preserve_order: bool = False - ) -> tuple[list[int], list[str]]: + def find_bodies(self, name_keys: str | Sequence[str], preserve_order: bool = False) -> tuple[list[int], list[str]]: """Find bodies in the rigid body based on the name keys. Please check the :meth:`isaaclab.utils.string_utils.resolve_matching_names` function for more @@ -410,7 +408,6 @@ def write_root_link_velocity_to_sim(self, root_velocity: torch.Tensor, env_ids: # write transformed velocity in CoM frame to sim self.write_root_com_velocity_to_sim(root_com_velocity, env_ids=env_ids) - """ Operations - Setters. """ @@ -454,7 +451,8 @@ def set_coms( Args: coms: Center of mass positions of all bodies. Shape is (num_instances, num_bodies, 3). body_ids: The body indices to set the center of mass positions for. Defaults to None (all bodies). - env_ids: The environment indices to set the center of mass positions for. Defaults to None (all environments). + env_ids: The environment indices to set the center of mass positions for. Defaults to None + (all environments). """ # resolve indices physx_env_ids = env_ids @@ -544,7 +542,8 @@ def set_external_force_and_torque( Args: forces: External forces in bodies' local frame. Shape is (len(env_ids), len(body_ids), 3). torques: External torques in bodies' local frame. Shape is (len(env_ids), len(body_ids), 3). - positions: External wrench positions in bodies' local frame. Shape is (len(env_ids), len(body_ids), 3). Defaults to None. + positions: External wrench positions in bodies' local frame. Shape is (len(env_ids), len(body_ids), 3). + Defaults to None. body_ids: Body indices to apply external wrench to. Defaults to None (all bodies). env_ids: Environment indices to apply external wrench to. Defaults to None (all instances). is_global: Whether to apply the external wrench in the global frame. Defaults to False. If set to False, @@ -673,14 +672,8 @@ def _process_cfg(self) -> None: # default state # -- root state # note: we cast to tuple to avoid torch/numpy type mismatch. - default_root_pose = ( - tuple(self.cfg.init_state.pos) - + tuple(self.cfg.init_state.rot) - ) - default_root_vel = ( - tuple(self.cfg.init_state.lin_vel) - + tuple(self.cfg.init_state.ang_vel) - ) + default_root_pose = tuple(self.cfg.init_state.pos) + tuple(self.cfg.init_state.rot) + default_root_vel = tuple(self.cfg.init_state.lin_vel) + tuple(self.cfg.init_state.ang_vel) default_root_pose = torch.tensor(default_root_pose, dtype=torch.float, device=self.device) default_root_vel = torch.tensor(default_root_vel, dtype=torch.float, device=self.device) self._data.default_root_pose = default_root_pose.repeat(self.num_instances, 1) @@ -703,4 +696,4 @@ def root_physx_view(self) -> physx.RigidBodyView: logger.warning( "The `root_physx_view` property will be deprecated in a future release. Please use `root_view` instead." ) - return self.root_view \ No newline at end of file + return self.root_view diff --git a/source/isaaclab_physx/isaaclab_physx/assets/rigid_object/rigid_object_data.py b/source/isaaclab_physx/isaaclab_physx/assets/rigid_object/rigid_object_data.py index b71e9620faf..46ab004b138 100644 --- a/source/isaaclab_physx/isaaclab_physx/assets/rigid_object/rigid_object_data.py +++ b/source/isaaclab_physx/isaaclab_physx/assets/rigid_object/rigid_object_data.py @@ -3,17 +3,17 @@ # # SPDX-License-Identifier: BSD-3-Clause -import weakref import logging +import weakref import torch import omni.physics.tensors.impl.api as physx import isaaclab.utils.math as math_utils +from isaaclab.assets.rigid_object.base_rigid_object_data import BaseRigidObjectData from isaaclab.sim.utils.stage import get_current_stage_id from isaaclab.utils.buffers import TimestampedBuffer -from isaaclab.assets.rigid_object.base_rigid_object_data import BaseRigidObjectData # import logger logger = logging.getLogger(__name__) @@ -82,7 +82,7 @@ def is_primed(self) -> bool: @is_primed.setter def is_primed(self, value: bool) -> None: """Set whether the rigid object data is fully instantiated and ready to use. - + .. note:: Once this quantity is set to True, it cannot be changed. Args: @@ -126,10 +126,10 @@ def default_root_pose(self) -> torch.Tensor: @default_root_pose.setter def default_root_pose(self, value: torch.Tensor) -> None: """Set the default root pose. - + Args: value: The default root pose. Shape is (num_instances, 7). - + Raises: ValueError: If the rigid object data is already primed. """ @@ -137,7 +137,6 @@ def default_root_pose(self, value: torch.Tensor) -> None: raise ValueError("The rigid object data is already primed.") self._default_root_pose = value - @property def default_root_vel(self) -> torch.Tensor: """Default root velocity ``[lin_vel, ang_vel]`` in local environment frame. Shape is (num_instances, 6). @@ -149,10 +148,10 @@ def default_root_vel(self) -> torch.Tensor: @default_root_vel.setter def default_root_vel(self, value: torch.Tensor) -> None: """Set the default root velocity. - + Args: value: The default root velocity. Shape is (num_instances, 6). - + Raises: ValueError: If the rigid object data is already primed. """ @@ -162,25 +161,31 @@ def default_root_vel(self, value: torch.Tensor) -> None: @property def default_root_state(self) -> torch.Tensor: - """Default root state ``[pos, quat, lin_vel, ang_vel]`` in local environment frame. Shape is (num_instances, 13). + """Default root state ``[pos, quat, lin_vel, ang_vel]`` in local environment frame. - The position and quaternion are of the rigid body's actor frame. Meanwhile, the linear and angular velocities are - of the center of mass frame. + The position and quaternion are of the rigid body's actor frame. Meanwhile, the linear and angular velocities + are of the center of mass frame. Shape is (num_instances, 13). """ - logger.warning("Reading the root state directly is deprecated since IsaacLab 3.0 and will be removed in a future version. Please use the default_root_pose and default_root_vel properties instead.") + logger.warning( + "Reading the root state directly is deprecated since IsaacLab 3.0 and will be removed in a future version. \ + Please use the default_root_pose and default_root_vel properties instead." + ) return torch.cat([self.default_root_pose, self.default_root_vel], dim=1) @default_root_state.setter def default_root_state(self, value: torch.Tensor) -> None: """Set the default root state. - + Args: value: The default root state. Shape is (num_instances, 13). Raises: ValueError: If the rigid object data is already primed. """ - logger.warning("Setting the root state directly is deprecated since IsaacLab 3.0 and will be removed in a future version. Please use the default_root_pose and default_root_vel properties instead.") + logger.warning( + "Setting the root state directly is deprecated since IsaacLab 3.0 and will be removed in a future version. \ + Please use the default_root_pose and default_root_vel properties instead." + ) if self._is_primed: raise ValueError("The rigid object data is already primed.") self._default_root_pose = value[:, :7] @@ -839,4 +844,4 @@ def default_inertia(self) -> torch.Tensor: raise RuntimeError( "The property 'default_inertia' has been removed. Default values are no longer stored. " "Please use 'body_inertia' to get the current inertia values from the simulation." - ) \ No newline at end of file + ) diff --git a/source/isaaclab_physx/isaaclab_physx/assets/rigid_object_collection/__init__.py b/source/isaaclab_physx/isaaclab_physx/assets/rigid_object_collection/__init__.py index 66e5c5aa594..d5969bf393a 100644 --- a/source/isaaclab_physx/isaaclab_physx/assets/rigid_object_collection/__init__.py +++ b/source/isaaclab_physx/isaaclab_physx/assets/rigid_object_collection/__init__.py @@ -11,4 +11,4 @@ __all__ = [ "RigidObjectCollection", "RigidObjectCollectionData", -] \ No newline at end of file +] diff --git a/source/isaaclab_physx/isaaclab_physx/assets/rigid_object_collection/rigid_object_collection.py b/source/isaaclab_physx/isaaclab_physx/assets/rigid_object_collection/rigid_object_collection.py index ad693e1da40..e7f757fec30 100644 --- a/source/isaaclab_physx/isaaclab_physx/assets/rigid_object_collection/rigid_object_collection.py +++ b/source/isaaclab_physx/isaaclab_physx/assets/rigid_object_collection/rigid_object_collection.py @@ -20,8 +20,8 @@ import isaaclab.sim as sim_utils import isaaclab.utils.math as math_utils import isaaclab.utils.string as string_utils -from isaaclab.utils.wrench_composer import WrenchComposer from isaaclab.assets.rigid_object_collection.base_rigid_object_collection import BaseRigidObjectCollection +from isaaclab.utils.wrench_composer import WrenchComposer from .rigid_object_collection_data import RigidObjectCollectionData @@ -31,6 +31,7 @@ # import logger logger = logging.getLogger(__name__) + class RigidObjectCollection(BaseRigidObjectCollection): """A rigid object collection class. @@ -241,7 +242,8 @@ def write_body_state_to_sim( """Set the bodies state over selected environment indices into the simulation. The body state comprises of the cartesian position, quaternion orientation in (x, y, z, w), and linear - and angular velocity. All the quantities are in the simulation frame. Shape is (len(env_ids), len(body_ids), 13). + and angular velocity. All the quantities are in the simulation frame. Shape is + (len(env_ids), len(body_ids), 13). Args: body_states: Body states in simulation frame. Shape is (len(env_ids), len(body_ids), 13). @@ -433,7 +435,8 @@ def write_body_com_velocity_to_sim( ..note:: This sets the velocity of the body's center of mass rather than the body's frame. Args: - body_velocities: Body center of mass velocities in simulation frame. Shape is (len(env_ids), len(body_ids), 6). + body_velocities: Body center of mass velocities in simulation frame. Shape is + (len(env_ids), len(body_ids), 6). env_ids: Environment indices. If None, then all indices are used. body_ids: Body indices. If None, then all indices are used. """ @@ -533,7 +536,8 @@ def set_coms( Args: coms: Center of mass positions of all bodies. Shape is (num_instances, num_bodies, 3). body_ids: The body indices to set the center of mass positions for. Defaults to None (all bodies). - env_ids: The environment indices to set the center of mass positions for. Defaults to None (all environments). + env_ids: The environment indices to set the center of mass positions for. Defaults to None + (all environments). """ def set_inertias( @@ -595,7 +599,8 @@ def set_external_force_and_torque( Args: forces: External forces in bodies' local frame. Shape is (len(env_ids), len(body_ids), 3). torques: External torques in bodies' local frame. Shape is (len(env_ids), len(body_ids), 3). - positions: External wrench positions in bodies' local frame. Shape is (len(env_ids), len(body_ids), 3). Defaults to None. + positions: External wrench positions in bodies' local frame. Shape is (len(env_ids), len(body_ids), 3). + Defaults to None. body_ids: Body indices to apply external wrench to. Defaults to None (all bodies). env_ids: Environment indices to apply external wrench to. Defaults to None (all instances). is_global: Whether to apply the external wrench in the global frame. Defaults to False. If set to False, @@ -624,7 +629,7 @@ def set_external_force_and_torque( body_ids = wp.from_torch( torch.arange(self.num_bodies, dtype=torch.int32, device=self.device)[body_ids], dtype=wp.int32 ) - elif not isinstance(object_ids, torch.Tensor): + elif not isinstance(body_ids, torch.Tensor): body_ids = wp.array(body_ids, dtype=wp.int32, device=self.device) else: body_ids = wp.from_torch(body_ids.to(torch.int32), dtype=wp.int32) @@ -766,14 +771,8 @@ def _process_cfg(self) -> None: default_body_poses = [] default_body_vels = [] for rigid_object_cfg in self.cfg.rigid_objects.values(): - default_body_pose = ( - tuple(rigid_object_cfg.init_state.pos) - + tuple(rigid_object_cfg.init_state.rot) - ) - default_body_vel = ( - tuple(rigid_object_cfg.init_state.lin_vel) - + tuple(rigid_object_cfg.init_state.ang_vel) - ) + default_body_pose = tuple(rigid_object_cfg.init_state.pos) + tuple(rigid_object_cfg.init_state.rot) + default_body_vel = tuple(rigid_object_cfg.init_state.lin_vel) + tuple(rigid_object_cfg.init_state.ang_vel) default_body_pose = ( torch.tensor(default_body_pose, dtype=torch.float, device=self.device) .repeat(self.num_instances, 1) @@ -989,7 +988,6 @@ def find_objects( ) -> tuple[torch.Tensor, list[str], list[int]]: """Deprecated method. Please use :meth:`find_bodies` instead.""" logger.warning( - "The `find_objects` method will be deprecated in a future release. Please use" - " `find_bodies` instead." + "The `find_objects` method will be deprecated in a future release. Please use `find_bodies` instead." ) - return self.find_bodies(name_keys, preserve_order) \ No newline at end of file + return self.find_bodies(name_keys, preserve_order) diff --git a/source/isaaclab_physx/isaaclab_physx/assets/rigid_object_collection/rigid_object_collection_data.py b/source/isaaclab_physx/isaaclab_physx/assets/rigid_object_collection/rigid_object_collection_data.py index 9b51d65b72f..394b8d1a3df 100644 --- a/source/isaaclab_physx/isaaclab_physx/assets/rigid_object_collection/rigid_object_collection_data.py +++ b/source/isaaclab_physx/isaaclab_physx/assets/rigid_object_collection/rigid_object_collection_data.py @@ -3,17 +3,17 @@ # # SPDX-License-Identifier: BSD-3-Clause -import weakref import logging +import weakref import torch import omni.physics.tensors.impl.api as physx import isaaclab.utils.math as math_utils +from isaaclab.assets.rigid_object_collection.base_rigid_object_collection_data import BaseRigidObjectCollectionData from isaaclab.sim.utils.stage import get_current_stage_id from isaaclab.utils.buffers import TimestampedBuffer -from isaaclab.assets.rigid_object_collection.base_rigid_object_collection_data import BaseRigidObjectCollectionData # import logger logger = logging.getLogger(__name__) @@ -88,7 +88,7 @@ def is_primed(self) -> bool: @is_primed.setter def is_primed(self, value: bool) -> None: """Set whether the rigid object collection data is fully instantiated and ready to use. - + .. note:: Once this quantity is set to True, it cannot be changed. Args: @@ -123,19 +123,19 @@ def update(self, dt: float) -> None: @property def default_body_pose(self) -> torch.Tensor: - """Default body pose ``[pos, quat]`` in local environment frame. Shape is (num_instances, num_bodies, 7). + """Default body pose ``[pos, quat]`` in local environment frame. - The position and quaternion are of the rigid body's actor frame. + The position and quaternion are of the rigid body's actor frame. Shape is (num_instances, num_bodies, 7). """ return self._default_body_pose @default_body_pose.setter def default_body_pose(self, value: torch.Tensor) -> None: """Set the default body pose. - + Args: value: The default body pose. Shape is (num_instances, num_bodies, 7). - + Raises: ValueError: If the rigid object collection data is already primed. """ @@ -143,22 +143,22 @@ def default_body_pose(self, value: torch.Tensor) -> None: raise ValueError("The rigid object collection data is already primed.") self._default_body_pose = value - @property def default_body_vel(self) -> torch.Tensor: - """Default body velocity ``[lin_vel, ang_vel]`` in local environment frame. Shape is (num_instances, num_bodies, 6). + """Default body velocity ``[lin_vel, ang_vel]`` in local environment frame. Shape is - The linear and angular velocities are of the rigid body's center of mass frame. + The linear and angular velocities are of the rigid body's center of mass frame. Shape is + (num_instances, num_bodies, 6). """ return self._default_body_vel @default_body_vel.setter def default_body_vel(self, value: torch.Tensor) -> None: """Set the default body velocity. - + Args: value: The default body velocity. Shape is (num_instances, num_bodies, 6). - + Raises: ValueError: If the rigid object collection data is already primed. """ @@ -168,31 +168,36 @@ def default_body_vel(self, value: torch.Tensor) -> None: @property def default_body_state(self) -> torch.Tensor: - """Default root state ``[pos, quat, lin_vel, ang_vel]`` in local environment frame. Shape is (num_instances, num_bodies, 13). + """Default root state ``[pos, quat, lin_vel, ang_vel]`` in local environment frame. - The position and quaternion are of the rigid body's actor frame. Meanwhile, the linear and angular velocities are - of the center of mass frame. + The position and quaternion are of the rigid body's actor frame. Meanwhile, the linear and angular velocities + are of the center of mass frame. Shape is (num_instances, num_bodies, 13). """ - logger.warning("Reading the body state directly is deprecated since IsaacLab 3.0 and will be removed in a future version. Please use the default_body_pose and default_body_vel properties instead.") + logger.warning( + "Reading the body state directly is deprecated since IsaacLab 3.0 and will be removed in a future version. \ + Please use the default_body_pose and default_body_vel properties instead." + ) return torch.cat([self.default_body_pose, self.default_body_vel], dim=-1) @default_body_state.setter def default_body_state(self, value: torch.Tensor) -> None: """Set the default body state. - + Args: value: The default body state. Shape is (num_instances, num_bodies, 13). Raises: ValueError: If the rigid object collection data is already primed. """ - logger.warning("Setting the root state directly is deprecated since IsaacLab 3.0 and will be removed in a future version. Please use the default_root_pose and default_root_vel properties instead.") + logger.warning( + "Setting the root state directly is deprecated since IsaacLab 3.0 and will be removed in a future version. \ + Please use the default_root_pose and default_root_vel properties instead." + ) if self._is_primed: raise ValueError("The rigid object collection data is already primed.") self._default_body_pose = value[:, :, :7] self._default_body_vel = value[:, :, 7:] - """ Body state properties. """ @@ -530,6 +535,7 @@ def _create_buffers(self) -> None: # -- Body properties self._body_mass = self._root_view.get_masses().to(self.device).clone() self._body_inertia = self._root_view.get_inertias().to(self.device).clone() + """ Properties for backwards compatibility. """ @@ -551,7 +557,7 @@ def default_object_vel(self) -> torch.Tensor: " `default_body_vel` instead." ) return self.default_body_vel - + @property def default_object_state(self) -> torch.Tensor: """Deprecated property. Please use :attr:`default_body_state` instead.""" @@ -601,8 +607,7 @@ def object_com_vel_w(self): def object_state_w(self): """Deprecated property. Please use :attr:`body_state_w` instead.""" logger.warning( - "The `object_state_w` property will be deprecated in a future release. Please use" - " `body_state_w` instead." + "The `object_state_w` property will be deprecated in a future release. Please use `body_state_w` instead." ) return self.body_state_w @@ -799,8 +804,7 @@ def object_pose_w(self) -> torch.Tensor: def object_pos_w(self) -> torch.Tensor: """Deprecated property. Please use :attr:`body_link_pos_w` instead.""" logger.warning( - "The `object_pos_w` property will be deprecated in a future release. Please use" - " `body_link_pos_w` instead." + "The `object_pos_w` property will be deprecated in a future release. Please use `body_link_pos_w` instead." ) return self.body_link_pos_w @@ -817,8 +821,7 @@ def object_quat_w(self) -> torch.Tensor: def object_vel_w(self) -> torch.Tensor: """Deprecated property. Please use :attr:`body_com_vel_w` instead.""" logger.warning( - "The `object_vel_w` property will be deprecated in a future release. Please use" - " `body_com_vel_w` instead." + "The `object_vel_w` property will be deprecated in a future release. Please use `body_com_vel_w` instead." ) return self.body_com_vel_w @@ -862,8 +865,7 @@ def object_ang_vel_b(self) -> torch.Tensor: def object_acc_w(self) -> torch.Tensor: """Deprecated property. Please use :attr:`body_com_acc_w` instead.""" logger.warning( - "The `object_acc_w` property will be deprecated in a future release. Please use" - " `body_com_acc_w` instead." + "The `object_acc_w` property will be deprecated in a future release. Please use `body_com_acc_w` instead." ) return self.body_com_acc_w @@ -889,8 +891,7 @@ def object_ang_acc_w(self) -> torch.Tensor: def com_pos_b(self) -> torch.Tensor: """Deprecated property. Please use :attr:`body_com_pos_b` instead.""" logger.warning( - "The `com_pos_b` property will be deprecated in a future release. Please use" - " `body_com_pos_b` instead." + "The `com_pos_b` property will be deprecated in a future release. Please use `body_com_pos_b` instead." ) return self.body_com_pos_b @@ -898,8 +899,7 @@ def com_pos_b(self) -> torch.Tensor: def com_quat_b(self) -> torch.Tensor: """Deprecated property. Please use :attr:`body_com_quat_b` instead.""" logger.warning( - "The `com_quat_b` property will be deprecated in a future release. Please use" - " `body_com_quat_b` instead." + "The `com_quat_b` property will be deprecated in a future release. Please use `body_com_quat_b` instead." ) return self.body_com_quat_b @@ -936,4 +936,4 @@ def _reshape_view_to_data(self, data: torch.Tensor) -> torch.Tensor: Returns: The reshaped data. Shape is (num_bodies, num_instances, data_size). """ - return torch.einsum("ijk -> jik", data.reshape(self.num_bodies, self.num_instances, -1)) \ No newline at end of file + return torch.einsum("ijk -> jik", data.reshape(self.num_bodies, self.num_instances, -1)) diff --git a/source/isaaclab_physx/isaaclab_physx/assets/surface_gripper/__init__.py b/source/isaaclab_physx/isaaclab_physx/assets/surface_gripper/__init__.py index 856946e9ecd..1c7771cc5bc 100644 --- a/source/isaaclab_physx/isaaclab_physx/assets/surface_gripper/__init__.py +++ b/source/isaaclab_physx/isaaclab_physx/assets/surface_gripper/__init__.py @@ -11,4 +11,4 @@ __all__ = [ "SurfaceGripper", "SurfaceGripperCfg", -] \ No newline at end of file +] diff --git a/source/isaaclab_physx/isaaclab_physx/assets/surface_gripper/surface_gripper_cfg.py b/source/isaaclab_physx/isaaclab_physx/assets/surface_gripper/surface_gripper_cfg.py index 8b13b80195b..a6e3bdb2ebb 100644 --- a/source/isaaclab_physx/isaaclab_physx/assets/surface_gripper/surface_gripper_cfg.py +++ b/source/isaaclab_physx/isaaclab_physx/assets/surface_gripper/surface_gripper_cfg.py @@ -5,9 +5,9 @@ from dataclasses import MISSING +from isaaclab.assets.asset_base_cfg import AssetBaseCfg from isaaclab.utils import configclass -from isaaclab.assets.asset_base_cfg import AssetBaseCfg from .surface_gripper import SurfaceGripper diff --git a/source/isaaclab_physx/test/assets/test_articulation.py b/source/isaaclab_physx/test/assets/test_articulation.py index f760b7289ce..4d7f462ef2e 100644 --- a/source/isaaclab_physx/test/assets/test_articulation.py +++ b/source/isaaclab_physx/test/assets/test_articulation.py @@ -21,12 +21,12 @@ import pytest import torch +from isaaclab_physx.assets import Articulation import isaaclab.sim as sim_utils import isaaclab.utils.math as math_utils import isaaclab.utils.string as string_utils from isaaclab.actuators import ActuatorBase, IdealPDActuatorCfg, ImplicitActuatorCfg -from isaaclab_physx.assets import Articulation from isaaclab.assets import ArticulationCfg from isaaclab.envs.mdp.terminations import joint_effort_out_of_limit from isaaclab.managers import SceneEntityCfg diff --git a/source/isaaclab_physx/test/assets/test_deformable_object.py b/source/isaaclab_physx/test/assets/test_deformable_object.py index a17cf58d204..651be1283ae 100644 --- a/source/isaaclab_physx/test/assets/test_deformable_object.py +++ b/source/isaaclab_physx/test/assets/test_deformable_object.py @@ -21,12 +21,12 @@ import pytest import torch from flaky import flaky +from isaaclab_physx.assets import DeformableObject, DeformableObjectCfg import carb import isaaclab.sim as sim_utils import isaaclab.utils.math as math_utils -from isaaclab_physx.assets import DeformableObject, DeformableObjectCfg from isaaclab.sim import build_simulation_context diff --git a/source/isaaclab_physx/test/assets/test_rigid_object.py b/source/isaaclab_physx/test/assets/test_rigid_object.py index 257c2ce4919..3a2870bf25a 100644 --- a/source/isaaclab_physx/test/assets/test_rigid_object.py +++ b/source/isaaclab_physx/test/assets/test_rigid_object.py @@ -22,9 +22,9 @@ import pytest import torch from flaky import flaky +from isaaclab_physx.assets import RigidObject import isaaclab.sim as sim_utils -from isaaclab_physx.assets import RigidObject from isaaclab.assets import RigidObjectCfg from isaaclab.sim import build_simulation_context from isaaclab.sim.spawners import materials diff --git a/source/isaaclab_physx/test/assets/test_rigid_object_collection.py b/source/isaaclab_physx/test/assets/test_rigid_object_collection.py index 9c85847b3a1..70419426815 100644 --- a/source/isaaclab_physx/test/assets/test_rigid_object_collection.py +++ b/source/isaaclab_physx/test/assets/test_rigid_object_collection.py @@ -20,10 +20,10 @@ import pytest import torch +from isaaclab_physx.assets import RigidObjectCollection import isaaclab.sim as sim_utils -from isaaclab_physx.assets import RigidObjectCollection -from isaaclab.assets import RigidObjectCollectionCfg, RigidObjectCfg +from isaaclab.assets import RigidObjectCfg, RigidObjectCollectionCfg from isaaclab.sim import build_simulation_context from isaaclab.utils.assets import ISAAC_NUCLEUS_DIR from isaaclab.utils.math import ( @@ -668,9 +668,7 @@ def test_set_material_properties(sim, num_envs, num_cubes, device): # Add friction to cube indices = torch.tensor(range(num_cubes * num_envs), dtype=torch.int) - object_collection.root_view.set_material_properties( - object_collection.reshape_data_to_view(materials), indices - ) + object_collection.root_view.set_material_properties(object_collection.reshape_data_to_view(materials), indices) # Perform simulation sim.step() diff --git a/source/isaaclab_physx/test/assets/test_surface_gripper.py b/source/isaaclab_physx/test/assets/test_surface_gripper.py index be4c5ebec9e..14b48bf2735 100644 --- a/source/isaaclab_physx/test/assets/test_surface_gripper.py +++ b/source/isaaclab_physx/test/assets/test_surface_gripper.py @@ -18,6 +18,7 @@ import pytest import torch +from isaaclab_physx.assets import SurfaceGripper, SurfaceGripperCfg import isaaclab.sim as sim_utils from isaaclab.actuators import ImplicitActuatorCfg @@ -27,7 +28,6 @@ RigidObject, RigidObjectCfg, ) -from isaaclab_physx.assets import SurfaceGripper, SurfaceGripperCfg from isaaclab.sim import build_simulation_context from isaaclab.utils.assets import ISAACLAB_NUCLEUS_DIR from isaaclab.utils.version import get_isaac_sim_version diff --git a/source/isaaclab_tasks/isaaclab_tasks/direct/forge/forge_env.py b/source/isaaclab_tasks/isaaclab_tasks/direct/forge/forge_env.py index 7617e30988c..7c9e894e3c7 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/direct/forge/forge_env.py +++ b/source/isaaclab_tasks/isaaclab_tasks/direct/forge/forge_env.py @@ -92,9 +92,7 @@ def _compute_intermediate_values(self, dt): self.prev_fingertip_quat = self.noisy_fingertip_quat.clone() # Update and smooth force values. - self.force_sensor_world = self._robot.root_view.get_link_incoming_joint_force()[ - :, self.force_sensor_body_idx - ] + self.force_sensor_world = self._robot.root_view.get_link_incoming_joint_force()[:, self.force_sensor_body_idx] alpha = self.cfg.ft_smoothing_factor self.force_sensor_world_smooth = alpha * self.force_sensor_world + (1 - alpha) * self.force_sensor_world_smooth diff --git a/source/isaaclab_tasks/isaaclab_tasks/direct/inhand_manipulation/inhand_manipulation_env.py b/source/isaaclab_tasks/isaaclab_tasks/direct/inhand_manipulation/inhand_manipulation_env.py index 041f6bb7775..62b03b9a35d 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/direct/inhand_manipulation/inhand_manipulation_env.py +++ b/source/isaaclab_tasks/isaaclab_tasks/direct/inhand_manipulation/inhand_manipulation_env.py @@ -119,9 +119,7 @@ def _apply_action(self) -> None: def _get_observations(self) -> dict: if self.cfg.asymmetric_obs: - self.fingertip_force_sensors = self.hand.root_view.get_link_incoming_joint_force()[ - :, self.finger_bodies - ] + self.fingertip_force_sensors = self.hand.root_view.get_link_incoming_joint_force()[:, self.finger_bodies] if self.cfg.obs_type == "openai": obs = self.compute_reduced_observations() diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/lift/config/franka/ik_abs_env_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/lift/config/franka/ik_abs_env_cfg.py index cac90ed641e..1e92063af3c 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/lift/config/franka/ik_abs_env_cfg.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/lift/config/franka/ik_abs_env_cfg.py @@ -4,6 +4,7 @@ # SPDX-License-Identifier: BSD-3-Clause from isaaclab_physx.assets import DeformableObjectCfg + from isaaclab.controllers.differential_ik_cfg import DifferentialIKControllerCfg from isaaclab.envs.mdp.actions.actions_cfg import DifferentialInverseKinematicsActionCfg from isaaclab.managers import EventTermCfg as EventTerm diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/lift/config/openarm/lift_openarm_env_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/lift/config/openarm/lift_openarm_env_cfg.py index 305dc5125fe..e16bb17f950 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/lift/config/openarm/lift_openarm_env_cfg.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/lift/config/openarm/lift_openarm_env_cfg.py @@ -10,9 +10,10 @@ from dataclasses import MISSING +from isaaclab_physx.assets import DeformableObjectCfg + import isaaclab.sim as sim_utils from isaaclab.assets import ArticulationCfg, AssetBaseCfg, RigidObjectCfg -from isaaclab_physx.assets import DeformableObjectCfg from isaaclab.envs import ManagerBasedRLEnvCfg from isaaclab.managers import CurriculumTermCfg as CurrTerm from isaaclab.managers import EventTermCfg as EventTerm diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/lift/lift_env_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/lift/lift_env_cfg.py index 485be97d3f8..a065297b0b2 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/lift/lift_env_cfg.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/lift/lift_env_cfg.py @@ -5,9 +5,10 @@ from dataclasses import MISSING +from isaaclab_physx.assets import DeformableObjectCfg + import isaaclab.sim as sim_utils from isaaclab.assets import ArticulationCfg, AssetBaseCfg, RigidObjectCfg -from isaaclab_physx.assets import DeformableObjectCfg from isaaclab.envs import ManagerBasedRLEnvCfg from isaaclab.managers import CurriculumTermCfg as CurrTerm from isaaclab.managers import EventTermCfg as EventTerm diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/config/galbot/stack_joint_pos_env_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/config/galbot/stack_joint_pos_env_cfg.py index ddff39a4f33..be7636c8745 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/config/galbot/stack_joint_pos_env_cfg.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/config/galbot/stack_joint_pos_env_cfg.py @@ -4,8 +4,9 @@ # SPDX-License-Identifier: BSD-3-Clause -from isaaclab.assets import RigidObjectCfg from isaaclab_physx.assets import SurfaceGripperCfg + +from isaaclab.assets import RigidObjectCfg from isaaclab.devices import DevicesCfg from isaaclab.devices.device_base import DeviceBase from isaaclab.devices.openxr.openxr_device import OpenXRDeviceCfg diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/config/ur10_gripper/stack_joint_pos_env_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/config/ur10_gripper/stack_joint_pos_env_cfg.py index c0b04999388..c80745d8970 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/config/ur10_gripper/stack_joint_pos_env_cfg.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/config/ur10_gripper/stack_joint_pos_env_cfg.py @@ -3,8 +3,9 @@ # # SPDX-License-Identifier: BSD-3-Clause -from isaaclab.assets import RigidObjectCfg from isaaclab_physx.assets import SurfaceGripperCfg + +from isaaclab.assets import RigidObjectCfg from isaaclab.envs.mdp.actions.actions_cfg import SurfaceGripperBinaryActionCfg from isaaclab.managers import EventTermCfg as EventTerm from isaaclab.managers import SceneEntityCfg From 23d2375e47a1cf7caff5d398c06e7d5ee7ab4720 Mon Sep 17 00:00:00 2001 From: Antoine Richard Date: Wed, 28 Jan 2026 10:30:45 +0100 Subject: [PATCH 10/38] fixing failing doc --- docs/source/api/lab_physx/isaaclab.assets.rst | 5 --- .../migration/migrating_to_isaaclab_3-0.rst | 34 +++++++++---------- 2 files changed, 16 insertions(+), 23 deletions(-) diff --git a/docs/source/api/lab_physx/isaaclab.assets.rst b/docs/source/api/lab_physx/isaaclab.assets.rst index dcd80a0aeb4..df3af9f1662 100644 --- a/docs/source/api/lab_physx/isaaclab.assets.rst +++ b/docs/source/api/lab_physx/isaaclab.assets.rst @@ -7,17 +7,12 @@ .. autosummary:: - AssetBase - AssetBaseCfg RigidObject RigidObjectData - RigidObjectCfg RigidObjectCollection RigidObjectCollectionData - RigidObjectCollectionCfg Articulation ArticulationData - ArticulationCfg DeformableObject DeformableObjectData DeformableObjectCfg diff --git a/docs/source/migration/migrating_to_isaaclab_3-0.rst b/docs/source/migration/migrating_to_isaaclab_3-0.rst index 627e78fd5c4..3da20d93359 100644 --- a/docs/source/migration/migrating_to_isaaclab_3-0.rst +++ b/docs/source/migration/migrating_to_isaaclab_3-0.rst @@ -19,24 +19,22 @@ New ``isaaclab_physx`` Extension A new extension ``isaaclab_physx`` has been introduced to contain PhysX-specific implementations of asset classes. The following classes have been moved to this new extension: -+--------------------------------------------------+--------------------------------------------------+ -| Isaac Lab 2.x | Isaac Lab 3.0 | -+==================================================+==================================================+ -| ``from isaaclab.assets import DeformableObject`` | ``from isaaclab_physx.assets import | -| | DeformableObject`` | -+--------------------------------------------------+--------------------------------------------------+ -| ``from isaaclab.assets import DeformableObjectCfg``| ``from isaaclab_physx.assets import | -| | DeformableObjectCfg`` | -+--------------------------------------------------+--------------------------------------------------+ -| ``from isaaclab.assets import DeformableObjectData``| ``from isaaclab_physx.assets import | -| | DeformableObjectData`` | -+--------------------------------------------------+--------------------------------------------------+ -| ``from isaaclab.assets import SurfaceGripper`` | ``from isaaclab_physx.assets import | -| | SurfaceGripper`` | -+--------------------------------------------------+--------------------------------------------------+ -| ``from isaaclab.assets import SurfaceGripperCfg``| ``from isaaclab_physx.assets import | -| | SurfaceGripperCfg`` | -+--------------------------------------------------+--------------------------------------------------+ +.. list-table:: + :header-rows: 1 + :widths: 50 50 + + * - Isaac Lab 2.x + - Isaac Lab 3.0 + * - ``from isaaclab.assets import DeformableObject`` + - ``from isaaclab_physx.assets import DeformableObject`` + * - ``from isaaclab.assets import DeformableObjectCfg`` + - ``from isaaclab_physx.assets import DeformableObjectCfg`` + * - ``from isaaclab.assets import DeformableObjectData`` + - ``from isaaclab_physx.assets import DeformableObjectData`` + * - ``from isaaclab.assets import SurfaceGripper`` + - ``from isaaclab_physx.assets import SurfaceGripper`` + * - ``from isaaclab.assets import SurfaceGripperCfg`` + - ``from isaaclab_physx.assets import SurfaceGripperCfg`` .. note:: From 4b981d2610d670be8eace34ce99efd9b88df5c4b Mon Sep 17 00:00:00 2001 From: Antoine Richard Date: Wed, 28 Jan 2026 10:48:21 +0100 Subject: [PATCH 11/38] Fixing doc build errors --- docs/source/api/lab_physx/isaaclab.assets.rst | 52 +++++-------------- .../assets/rigid_object/rigid_object_data.py | 1 + .../assets/surface_gripper/surface_gripper.py | 14 ++--- 3 files changed, 21 insertions(+), 46 deletions(-) diff --git a/docs/source/api/lab_physx/isaaclab.assets.rst b/docs/source/api/lab_physx/isaaclab.assets.rst index df3af9f1662..08f4fd6ba3a 100644 --- a/docs/source/api/lab_physx/isaaclab.assets.rst +++ b/docs/source/api/lab_physx/isaaclab.assets.rst @@ -1,5 +1,5 @@ isaaclab_physx.assets -=============== +===================== .. automodule:: isaaclab_physx.assets @@ -7,12 +7,12 @@ .. autosummary:: + Articulation + ArticulationData RigidObject RigidObjectData RigidObjectCollection RigidObjectCollectionData - Articulation - ArticulationData DeformableObject DeformableObjectData DeformableObjectCfg @@ -21,15 +21,19 @@ .. currentmodule:: isaaclab_physx.assets -Asset Base ----------- +Articulation +------------ -.. autoclass:: AssetBase +.. autoclass:: Articulation :members: + :inherited-members: + :show-inheritance: -.. autoclass:: AssetBaseCfg +.. autoclass:: ArticulationData :members: - :exclude-members: __init__, class_type, InitialStateCfg + :inherited-members: + :show-inheritance: + :exclude-members: __init__ Rigid Object ------------ @@ -45,12 +49,6 @@ Rigid Object :show-inheritance: :exclude-members: __init__ -.. autoclass:: RigidObjectCfg - :members: - :inherited-members: - :show-inheritance: - :exclude-members: __init__, class_type - Rigid Object Collection ----------------------- @@ -65,32 +63,6 @@ Rigid Object Collection :show-inheritance: :exclude-members: __init__ -.. autoclass:: RigidObjectCollectionCfg - :members: - :inherited-members: - :show-inheritance: - :exclude-members: __init__, class_type - -Articulation ------------- - -.. autoclass:: Articulation - :members: - :inherited-members: - :show-inheritance: - -.. autoclass:: ArticulationData - :members: - :inherited-members: - :show-inheritance: - :exclude-members: __init__ - -.. autoclass:: ArticulationCfg - :members: - :inherited-members: - :show-inheritance: - :exclude-members: __init__, class_type - Deformable Object ----------------- diff --git a/source/isaaclab_physx/isaaclab_physx/assets/rigid_object/rigid_object_data.py b/source/isaaclab_physx/isaaclab_physx/assets/rigid_object/rigid_object_data.py index 46ab004b138..fc921482113 100644 --- a/source/isaaclab_physx/isaaclab_physx/assets/rigid_object/rigid_object_data.py +++ b/source/isaaclab_physx/isaaclab_physx/assets/rigid_object/rigid_object_data.py @@ -31,6 +31,7 @@ class RigidObjectData(BaseRigidObjectData): - Actor frame: The frame of reference of the rigid body prim. This typically corresponds to the Xform prim with the rigid body schema. - Center of mass frame: The frame of reference of the center of mass of the rigid body. + Depending on the settings of the simulation, the actor frame and the center of mass frame may be the same. This needs to be taken into account when interpreting the data. diff --git a/source/isaaclab_physx/isaaclab_physx/assets/surface_gripper/surface_gripper.py b/source/isaaclab_physx/isaaclab_physx/assets/surface_gripper/surface_gripper.py index 2742e9baeb4..fcf02950285 100644 --- a/source/isaaclab_physx/isaaclab_physx/assets/surface_gripper/surface_gripper.py +++ b/source/isaaclab_physx/isaaclab_physx/assets/surface_gripper/surface_gripper.py @@ -165,14 +165,16 @@ def update(self, dt: float) -> None: This function is called every simulation step. The data fetched from the gripper view is a list of strings containing 3 possible states: - - "Open" --> 0 - - "Closing" --> 1 - - "Closed" --> 2 + + - "Open" --> 0 + - "Closing" --> 1 + - "Closed" --> 2 To make this more neural network friendly, we convert the list of strings to a list of floats: - - "Open" --> -1.0 - - "Closing" --> 0.0 - - "Closed" --> 1.0 + + - "Open" --> -1.0 + - "Closing" --> 0.0 + - "Closed" --> 1.0 Note: We need to do this conversion for every single step of the simulation because the gripper can lose contact From 252633e46863044711d252a49e5f0f37a10ec424 Mon Sep 17 00:00:00 2001 From: Antoine Richard Date: Wed, 28 Jan 2026 11:12:40 +0100 Subject: [PATCH 12/38] Fix multi-rotor + wrench composer tests. --- source/isaaclab/test/utils/test_wrench_composer.py | 10 +++++----- .../isaaclab_contrib/assets/multirotor/multirotor.py | 2 +- .../assets/multirotor/multirotor_data.py | 2 +- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/source/isaaclab/test/utils/test_wrench_composer.py b/source/isaaclab/test/utils/test_wrench_composer.py index 3cc88b3b902..84a714c2e49 100644 --- a/source/isaaclab/test/utils/test_wrench_composer.py +++ b/source/isaaclab/test/utils/test_wrench_composer.py @@ -13,7 +13,7 @@ import torch import warp as wp -from isaaclab.assets import RigidObject +from isaaclab.assets import BaseRigidObject from isaaclab.utils.wrench_composer import WrenchComposer @@ -55,7 +55,7 @@ class MockRigidObject: """Mock RigidObject that provides the minimal interface required by WrenchComposer. This mock enables testing WrenchComposer in isolation without requiring a full simulation setup. - It passes isinstance checks by registering as a virtual subclass of RigidObject. + It passes isinstance checks by registering as a virtual subclass of BaseRigidObject. """ def __init__( @@ -126,9 +126,9 @@ def random_unit_quaternion_np(rng: np.random.Generator, shape: tuple) -> np.ndar return q -# Register MockRigidObject as a virtual subclass of RigidObject -# This allows isinstance(mock, RigidObject) to return True -RigidObject.register(MockRigidObject) +# Register MockRigidObject as a virtual subclass of BaseRigidObject +# This allows isinstance(mock, BaseRigidObject) to return True +BaseRigidObject.register(MockRigidObject) @pytest.mark.parametrize("device", ["cuda:0", "cpu"]) diff --git a/source/isaaclab_contrib/isaaclab_contrib/assets/multirotor/multirotor.py b/source/isaaclab_contrib/isaaclab_contrib/assets/multirotor/multirotor.py index 0949c77818a..5af55984dd9 100644 --- a/source/isaaclab_contrib/isaaclab_contrib/assets/multirotor/multirotor.py +++ b/source/isaaclab_contrib/isaaclab_contrib/assets/multirotor/multirotor.py @@ -15,7 +15,7 @@ import torch import isaaclab.utils.string as string_utils -from isaaclab.assets.articulation import Articulation +from isaaclab_physx.assets.articulation import Articulation from isaaclab_contrib.actuators import Thruster from isaaclab_contrib.utils.types import MultiRotorActions diff --git a/source/isaaclab_contrib/isaaclab_contrib/assets/multirotor/multirotor_data.py b/source/isaaclab_contrib/isaaclab_contrib/assets/multirotor/multirotor_data.py index 05ea56c4565..e6403e6d1e2 100644 --- a/source/isaaclab_contrib/isaaclab_contrib/assets/multirotor/multirotor_data.py +++ b/source/isaaclab_contrib/isaaclab_contrib/assets/multirotor/multirotor_data.py @@ -5,7 +5,7 @@ import torch -from isaaclab.assets.articulation.articulation_data import ArticulationData +from isaaclab_physx.assets.articulation.articulation_data import ArticulationData class MultirotorData(ArticulationData): From 64c758e2814072a6dafa6abb9265524af985951b Mon Sep 17 00:00:00 2001 From: Antoine Richard Date: Wed, 28 Jan 2026 11:20:53 +0100 Subject: [PATCH 13/38] Fixing issues here and here. --- source/isaaclab/isaaclab/envs/mdp/events.py | 15 +++++++++------ source/isaaclab_contrib/docs/CHANGELOG.rst | 9 +++++++++ 2 files changed, 18 insertions(+), 6 deletions(-) diff --git a/source/isaaclab/isaaclab/envs/mdp/events.py b/source/isaaclab/isaaclab/envs/mdp/events.py index b8af797833f..e0cf09b3155 100644 --- a/source/isaaclab/isaaclab/envs/mdp/events.py +++ b/source/isaaclab/isaaclab/envs/mdp/events.py @@ -30,7 +30,7 @@ import isaaclab.sim as sim_utils import isaaclab.utils.math as math_utils from isaaclab.actuators import ImplicitActuator -from isaaclab.assets import Articulation, RigidObject +from isaaclab.assets import Articulation, RigidObject, BaseArticulation, BaseRigidObject from isaaclab.managers import EventTermCfg, ManagerTermBase, SceneEntityCfg from isaaclab.sim.utils.stage import get_current_stage from isaaclab.terrains import TerrainImporter @@ -197,7 +197,7 @@ def __init__(self, cfg: EventTermCfg, env: ManagerBasedEnv): self.asset_cfg: SceneEntityCfg = cfg.params["asset_cfg"] self.asset: RigidObject | Articulation = env.scene[self.asset_cfg.name] - if not isinstance(self.asset, (RigidObject, Articulation)): + if not isinstance(self.asset, (BaseRigidObject, BaseArticulation)): raise ValueError( f"Randomization term 'randomize_rigid_body_material' not supported for asset: '{self.asset_cfg.name}'" f" with type: '{type(self.asset)}'." @@ -337,6 +337,9 @@ def __init__(self, cfg: EventTermCfg, env: ManagerBasedEnv): " physics errors." ) + self.default_mass = self.asset.data.default_mass + self.default_inertia = self.asset.data.default_inertia + def __call__( self, env: ManagerBasedEnv, @@ -366,7 +369,7 @@ def __call__( # apply randomization on default values # this is to make sure when calling the function multiple times, the randomization is applied on the # default values and not the previously randomized values - masses[env_ids[:, None], body_ids] = self.asset.data.default_mass[env_ids[:, None], body_ids].clone() + masses[env_ids[:, None], body_ids] = self.default_mass[env_ids[:, None], body_ids].clone() # sample from the given range # note: we modify the masses in-place for all environments @@ -382,18 +385,18 @@ def __call__( # recompute inertia tensors if needed if recompute_inertia: # compute the ratios of the new masses to the initial masses - ratios = masses[env_ids[:, None], body_ids] / self.asset.data.default_mass[env_ids[:, None], body_ids] + ratios = masses[env_ids[:, None], body_ids] / self.default_mass[env_ids[:, None], body_ids] # scale the inertia tensors by the the ratios # since mass randomization is done on default values, we can use the default inertia tensors inertias = self.asset.root_view.get_inertias() if isinstance(self.asset, Articulation): # inertia has shape: (num_envs, num_bodies, 9) for articulation inertias[env_ids[:, None], body_ids] = ( - self.asset.data.default_inertia[env_ids[:, None], body_ids] * ratios[..., None] + self.default_inertia[env_ids[:, None], body_ids] * ratios[..., None] ) else: # inertia has shape: (num_envs, 9) for rigid object - inertias[env_ids] = self.asset.data.default_inertia[env_ids] * ratios + inertias[env_ids] = self.default_inertia[env_ids] * ratios # set the inertia tensors into the physics simulation self.asset.root_view.set_inertias(inertias, env_ids) diff --git a/source/isaaclab_contrib/docs/CHANGELOG.rst b/source/isaaclab_contrib/docs/CHANGELOG.rst index cd515121e65..39662f766c0 100644 --- a/source/isaaclab_contrib/docs/CHANGELOG.rst +++ b/source/isaaclab_contrib/docs/CHANGELOG.rst @@ -1,6 +1,15 @@ Changelog --------- +0.0.2 (2026-01-28) +~~~~~~~~~~~~~~~~~~ + +Updated +^^^^^^^ + +* Updated the multirotor asset to use the new base classes from the isaaclab_physx package. + + 0.0.1 (2025-12-17) ~~~~~~~~~~~~~~~~~~ From e9ae031db23821c41d3633bacea525e37971f05e Mon Sep 17 00:00:00 2001 From: Antoine Richard Date: Wed, 28 Jan 2026 11:50:58 +0100 Subject: [PATCH 14/38] Fixing more issues. --- source/isaaclab/isaaclab/envs/mdp/events.py | 50 +++++++++++-------- .../assets/articulation/articulation_data.py | 4 +- .../assets/rigid_object/rigid_object_data.py | 4 +- .../rigid_object_collection_data.py | 4 +- 4 files changed, 35 insertions(+), 27 deletions(-) diff --git a/source/isaaclab/isaaclab/envs/mdp/events.py b/source/isaaclab/isaaclab/envs/mdp/events.py index e0cf09b3155..495387b9812 100644 --- a/source/isaaclab/isaaclab/envs/mdp/events.py +++ b/source/isaaclab/isaaclab/envs/mdp/events.py @@ -337,8 +337,9 @@ def __init__(self, cfg: EventTermCfg, env: ManagerBasedEnv): " physics errors." ) - self.default_mass = self.asset.data.default_mass - self.default_inertia = self.asset.data.default_inertia + self.default_mass = None + self.default_inertia = None + def __call__( self, @@ -351,20 +352,25 @@ def __call__( recompute_inertia: bool = True, min_mass: float = 1e-6, ): + if self.default_mass is None: + self.default_mass = self.asset.data.body_mass + if self.default_inertia is None: + self.default_inertia = self.asset.data.body_inertia + # resolve environment ids if env_ids is None: - env_ids = torch.arange(env.scene.num_envs, device="cpu") + env_ids = torch.arange(env.scene.num_envs, device=self.asset.device) else: - env_ids = env_ids.cpu() + env_ids = env_ids.to(self.asset.device) # resolve body indices if self.asset_cfg.body_ids == slice(None): - body_ids = torch.arange(self.asset.num_bodies, dtype=torch.int, device="cpu") + body_ids = torch.arange(self.asset.num_bodies, dtype=torch.int, device=self.asset.device) else: - body_ids = torch.tensor(self.asset_cfg.body_ids, dtype=torch.int, device="cpu") + body_ids = torch.tensor(self.asset_cfg.body_ids, dtype=torch.int, device=self.asset.device) # get the current masses of the bodies (num_assets, num_bodies) - masses = self.asset.root_view.get_masses() + masses = self.asset.data.body_mass.clone() # apply randomization on default values # this is to make sure when calling the function multiple times, the randomization is applied on the @@ -380,7 +386,7 @@ def __call__( masses = torch.clamp(masses, min=min_mass) # ensure masses are positive # set the mass into the physics simulation - self.asset.root_view.set_masses(masses, env_ids) + self.asset.set_masses(masses, None, env_ids) # recompute inertia tensors if needed if recompute_inertia: @@ -388,8 +394,10 @@ def __call__( ratios = masses[env_ids[:, None], body_ids] / self.default_mass[env_ids[:, None], body_ids] # scale the inertia tensors by the the ratios # since mass randomization is done on default values, we can use the default inertia tensors - inertias = self.asset.root_view.get_inertias() - if isinstance(self.asset, Articulation): + inertias = self.asset.data.body_inertia.clone() + print("inertias device: ", inertias.device) + print("inertias shape: ", inertias.shape) + if isinstance(self.asset, BaseArticulation): # inertia has shape: (num_envs, num_bodies, 9) for articulation inertias[env_ids[:, None], body_ids] = ( self.default_inertia[env_ids[:, None], body_ids] * ratios[..., None] @@ -398,7 +406,7 @@ def __call__( # inertia has shape: (num_envs, 9) for rigid object inertias[env_ids] = self.default_inertia[env_ids] * ratios # set the inertia tensors into the physics simulation - self.asset.root_view.set_inertias(inertias, env_ids) + self.asset.set_inertias(inertias, None, env_ids) def randomize_rigid_body_com( @@ -410,36 +418,36 @@ def randomize_rigid_body_com( """Randomize the center of mass (CoM) of rigid bodies by adding a random value sampled from the given ranges. .. note:: - This function uses CPU tensors to assign the CoM. It is recommended to use this function - only during the initialization of the environment. + This function does not properly track the original CoM values. It is recommended to use this function + only once, during the initialization of the environment. """ # extract the used quantities (to enable type-hinting) asset: Articulation = env.scene[asset_cfg.name] # resolve environment ids if env_ids is None: - env_ids = torch.arange(env.scene.num_envs, device="cpu") + env_ids = torch.arange(env.scene.num_envs, device=asset.device) else: - env_ids = env_ids.cpu() + env_ids = env_ids.to(asset.device) # resolve body indices if asset_cfg.body_ids == slice(None): - body_ids = torch.arange(asset.num_bodies, dtype=torch.int, device="cpu") + body_ids = torch.arange(asset.num_bodies, dtype=torch.int, device=asset.device) else: - body_ids = torch.tensor(asset_cfg.body_ids, dtype=torch.int, device="cpu") + body_ids = torch.tensor(asset_cfg.body_ids, dtype=torch.int, device=asset.device) # sample random CoM values range_list = [com_range.get(key, (0.0, 0.0)) for key in ["x", "y", "z"]] - ranges = torch.tensor(range_list, device="cpu") - rand_samples = math_utils.sample_uniform(ranges[:, 0], ranges[:, 1], (len(env_ids), 3), device="cpu").unsqueeze(1) + ranges = torch.tensor(range_list, device=asset.device) + rand_samples = math_utils.sample_uniform(ranges[:, 0], ranges[:, 1], (len(env_ids), 3), device=asset.device).unsqueeze(1) # get the current com of the bodies (num_assets, num_bodies) - coms = asset.root_view.get_coms().clone() + coms = asset.data.body_com_pose_b.clone() # Randomize the com in range coms[env_ids[:, None], body_ids, :3] += rand_samples # Set the new coms - asset.root_view.set_coms(coms, env_ids) + asset.set_coms(coms, None, env_ids) def randomize_rigid_body_collider_offsets( diff --git a/source/isaaclab_physx/isaaclab_physx/assets/articulation/articulation_data.py b/source/isaaclab_physx/isaaclab_physx/assets/articulation/articulation_data.py index b202709c9e7..ff76c2adacc 100644 --- a/source/isaaclab_physx/isaaclab_physx/assets/articulation/articulation_data.py +++ b/source/isaaclab_physx/isaaclab_physx/assets/articulation/articulation_data.py @@ -578,12 +578,12 @@ def root_com_state_w(self) -> torch.Tensor: @property def body_mass(self) -> torch.Tensor: """Body mass ``wp.float32`` in the world frame. Shape is (num_instances, num_bodies).""" - return self._body_mass + return self._body_mass.to(self.device) @property def body_inertia(self) -> torch.Tensor: """Body inertia ``wp.mat33`` in the world frame. Shape is (num_instances, num_bodies, 3, 3).""" - return self._body_inertia + return self._body_inertia.to(self.device) @property def body_link_pose_w(self) -> torch.Tensor: diff --git a/source/isaaclab_physx/isaaclab_physx/assets/rigid_object/rigid_object_data.py b/source/isaaclab_physx/isaaclab_physx/assets/rigid_object/rigid_object_data.py index fc921482113..f5b104e5ae0 100644 --- a/source/isaaclab_physx/isaaclab_physx/assets/rigid_object/rigid_object_data.py +++ b/source/isaaclab_physx/isaaclab_physx/assets/rigid_object/rigid_object_data.py @@ -311,12 +311,12 @@ def root_com_state_w(self) -> torch.Tensor: @property def body_mass(self) -> torch.Tensor: """Mass of all bodies in the simulation world frame. Shape is (num_instances, 1, 1).""" - return self._body_mass + return self._body_mass.to(self.device) @property def body_inertia(self) -> torch.Tensor: """Inertia of all bodies in the simulation world frame. Shape is (num_instances, 1, 3, 3).""" - return self._body_inertia + return self._body_inertia.to(self.device) @property def body_link_pose_w(self) -> torch.Tensor: diff --git a/source/isaaclab_physx/isaaclab_physx/assets/rigid_object_collection/rigid_object_collection_data.py b/source/isaaclab_physx/isaaclab_physx/assets/rigid_object_collection/rigid_object_collection_data.py index 394b8d1a3df..ffc8e8beb07 100644 --- a/source/isaaclab_physx/isaaclab_physx/assets/rigid_object_collection/rigid_object_collection_data.py +++ b/source/isaaclab_physx/isaaclab_physx/assets/rigid_object_collection/rigid_object_collection_data.py @@ -347,12 +347,12 @@ def body_com_pose_b(self) -> torch.Tensor: @property def body_mass(self) -> torch.Tensor: """Mass of all bodies in the simulation world frame. Shape is (num_instances, 1, 1).""" - return self._reshape_view_to_data(self._body_mass) + return self._reshape_view_to_data(self._body_mass.to(self.device)) @property def body_inertia(self) -> torch.Tensor: """Inertia of all bodies in the simulation world frame. Shape is (num_instances, 1, 3, 3).""" - return self._reshape_view_to_data(self._body_inertia) + return self._reshape_view_to_data(self._body_inertia.to(self.device)) """ Derived Properties. From 03fd8019ec7f33e9b8e7a5b2815e7fd497d0e9c5 Mon Sep 17 00:00:00 2001 From: Antoine Richard Date: Wed, 28 Jan 2026 11:51:23 +0100 Subject: [PATCH 15/38] pre-commits --- source/isaaclab/isaaclab/envs/mdp/events.py | 7 ++++--- .../isaaclab_contrib/assets/multirotor/multirotor.py | 2 +- .../isaaclab_contrib/assets/multirotor/multirotor_data.py | 1 - 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/source/isaaclab/isaaclab/envs/mdp/events.py b/source/isaaclab/isaaclab/envs/mdp/events.py index 495387b9812..bdfc1d03614 100644 --- a/source/isaaclab/isaaclab/envs/mdp/events.py +++ b/source/isaaclab/isaaclab/envs/mdp/events.py @@ -30,7 +30,7 @@ import isaaclab.sim as sim_utils import isaaclab.utils.math as math_utils from isaaclab.actuators import ImplicitActuator -from isaaclab.assets import Articulation, RigidObject, BaseArticulation, BaseRigidObject +from isaaclab.assets import Articulation, BaseArticulation, BaseRigidObject, RigidObject from isaaclab.managers import EventTermCfg, ManagerTermBase, SceneEntityCfg from isaaclab.sim.utils.stage import get_current_stage from isaaclab.terrains import TerrainImporter @@ -339,7 +339,6 @@ def __init__(self, cfg: EventTermCfg, env: ManagerBasedEnv): self.default_mass = None self.default_inertia = None - def __call__( self, @@ -438,7 +437,9 @@ def randomize_rigid_body_com( # sample random CoM values range_list = [com_range.get(key, (0.0, 0.0)) for key in ["x", "y", "z"]] ranges = torch.tensor(range_list, device=asset.device) - rand_samples = math_utils.sample_uniform(ranges[:, 0], ranges[:, 1], (len(env_ids), 3), device=asset.device).unsqueeze(1) + rand_samples = math_utils.sample_uniform( + ranges[:, 0], ranges[:, 1], (len(env_ids), 3), device=asset.device + ).unsqueeze(1) # get the current com of the bodies (num_assets, num_bodies) coms = asset.data.body_com_pose_b.clone() diff --git a/source/isaaclab_contrib/isaaclab_contrib/assets/multirotor/multirotor.py b/source/isaaclab_contrib/isaaclab_contrib/assets/multirotor/multirotor.py index 5af55984dd9..339a48a387e 100644 --- a/source/isaaclab_contrib/isaaclab_contrib/assets/multirotor/multirotor.py +++ b/source/isaaclab_contrib/isaaclab_contrib/assets/multirotor/multirotor.py @@ -13,9 +13,9 @@ from typing import TYPE_CHECKING import torch +from isaaclab_physx.assets.articulation import Articulation import isaaclab.utils.string as string_utils -from isaaclab_physx.assets.articulation import Articulation from isaaclab_contrib.actuators import Thruster from isaaclab_contrib.utils.types import MultiRotorActions diff --git a/source/isaaclab_contrib/isaaclab_contrib/assets/multirotor/multirotor_data.py b/source/isaaclab_contrib/isaaclab_contrib/assets/multirotor/multirotor_data.py index e6403e6d1e2..b1eaf25e4f3 100644 --- a/source/isaaclab_contrib/isaaclab_contrib/assets/multirotor/multirotor_data.py +++ b/source/isaaclab_contrib/isaaclab_contrib/assets/multirotor/multirotor_data.py @@ -4,7 +4,6 @@ # SPDX-License-Identifier: BSD-3-Clause import torch - from isaaclab_physx.assets.articulation.articulation_data import ArticulationData From 2bc7c41e143b683befd9da341c7c15918bad6d0d Mon Sep 17 00:00:00 2001 From: Antoine Richard Date: Wed, 28 Jan 2026 13:30:47 +0100 Subject: [PATCH 16/38] Fixing failing doc build. --- docs/source/api/lab_physx/isaaclab.assets.rst | 5 +++-- .../isaaclab/assets/articulation/base_articulation.py | 8 ++++---- .../base_rigid_object_collection.py | 2 +- 3 files changed, 8 insertions(+), 7 deletions(-) diff --git a/docs/source/api/lab_physx/isaaclab.assets.rst b/docs/source/api/lab_physx/isaaclab.assets.rst index 08f4fd6ba3a..b631dcbcb7b 100644 --- a/docs/source/api/lab_physx/isaaclab.assets.rst +++ b/docs/source/api/lab_physx/isaaclab.assets.rst @@ -2,6 +2,7 @@ ===================== .. automodule:: isaaclab_physx.assets + :noindex: .. rubric:: Classes @@ -81,7 +82,7 @@ Deformable Object :members: :inherited-members: :show-inheritance: - :exclude-members: __init__, class_type + :exclude-members: __init__, class_type, InitialStateCfg Surface Gripper --------------- @@ -95,4 +96,4 @@ Surface Gripper :members: :inherited-members: :show-inheritance: - :exclude-members: __init__, class_type + :exclude-members: __init__, class_type, InitialStateCfg diff --git a/source/isaaclab/isaaclab/assets/articulation/base_articulation.py b/source/isaaclab/isaaclab/assets/articulation/base_articulation.py index 2e8f941dfc1..a614705ed3d 100644 --- a/source/isaaclab/isaaclab/assets/articulation/base_articulation.py +++ b/source/isaaclab/isaaclab/assets/articulation/base_articulation.py @@ -886,11 +886,11 @@ def set_fixed_tendon_position_limit( This function does not apply the tendon limit to the simulation. It only fills the buffers with the desired values. To apply the tendon limit, call the :meth:`write_fixed_tendon_properties_to_sim` function. - Args: - limit: Fixed tendon limit. Shape is (len(env_ids), len(fixed_tendon_ids)) or + Args: + limit: Fixed tendon limit. Shape is (len(env_ids), len(fixed_tendon_ids)) or (num_instances, num_fixed_tendons). - fixed_tendon_ids: The tendon indices to set the limit for. Defaults to None (all fixed tendons). - env_ids: The environment indices to set the limit for. Defaults to None (all environments). + fixed_tendon_ids: The tendon indices to set the limit for. Defaults to None (all fixed tendons). + env_ids: The environment indices to set the limit for. Defaults to None (all environments). """ raise NotImplementedError() diff --git a/source/isaaclab/isaaclab/assets/rigid_object_collection/base_rigid_object_collection.py b/source/isaaclab/isaaclab/assets/rigid_object_collection/base_rigid_object_collection.py index 13b1c263e84..42f504070e8 100644 --- a/source/isaaclab/isaaclab/assets/rigid_object_collection/base_rigid_object_collection.py +++ b/source/isaaclab/isaaclab/assets/rigid_object_collection/base_rigid_object_collection.py @@ -174,7 +174,7 @@ def write_body_state_to_sim( The body state comprises of the cartesian position, quaternion orientation in (x, y, z, w), and linear and angular velocity. All the quantities are in the simulation frame. Shape is - (len(env_ids), len(body_ids), 13). + ``(len(env_ids), len(body_ids), 13)``. Args: body_states: Body states in simulation frame. Shape is (len(env_ids), len(body_ids), 13). From ac8c2e5fea14c727da0c21bf461b0ea49863b9cd Mon Sep 17 00:00:00 2001 From: Antoine Richard Date: Wed, 28 Jan 2026 14:27:19 +0100 Subject: [PATCH 17/38] Addressing last doc bits --- docs/index.rst | 1 + docs/source/api/index.rst | 5 +++++ 2 files changed, 6 insertions(+) diff --git a/docs/index.rst b/docs/index.rst index 97b5f851e08..56177298e60 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -147,6 +147,7 @@ Table of Contents :caption: Migration Guides :titlesonly: + source/migration/migrating_to_isaaclab_3-0 source/migration/migrating_from_isaacgymenvs source/migration/migrating_from_omniisaacgymenvs source/migration/migrating_from_orbit diff --git a/docs/source/api/index.rst b/docs/source/api/index.rst index be6e7230fad..272e33e7398 100644 --- a/docs/source/api/index.rst +++ b/docs/source/api/index.rst @@ -106,3 +106,8 @@ The following modules are available in the ``isaaclab_physx`` extension: :toctree: lab_physx assets + +.. toctree:: + :hidden: + + lab_physx/isaaclab.assets From da066572ccef4ed8cb1a22e41289a474b19e9c76 Mon Sep 17 00:00:00 2001 From: Antoine Richard Date: Thu, 29 Jan 2026 09:43:14 +0100 Subject: [PATCH 18/38] should fix some of the failing mimic tests. --- .../stack/config/franka/bin_stack_joint_pos_env_cfg.py | 1 + .../manipulation/stack/config/franka/stack_joint_pos_env_cfg.py | 1 + .../config/franka/stack_joint_pos_instance_randomize_env_cfg.py | 1 + .../stack/config/ur10_gripper/stack_joint_pos_env_cfg.py | 1 + .../manager_based/manipulation/stack/mdp/franka_stack_events.py | 2 +- 5 files changed, 5 insertions(+), 1 deletion(-) diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/config/franka/bin_stack_joint_pos_env_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/config/franka/bin_stack_joint_pos_env_cfg.py index 4121c1bb2e2..8238dd00ef6 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/config/franka/bin_stack_joint_pos_env_cfg.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/config/franka/bin_stack_joint_pos_env_cfg.py @@ -30,6 +30,7 @@ class EventCfg: """Configuration for events.""" + # FIXME: Let's not do that and initialize the arm pose correctly in the environment constructor instead. init_franka_arm_pose = EventTerm( func=franka_stack_events.set_default_joint_pose, # mode="startup", diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/config/franka/stack_joint_pos_env_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/config/franka/stack_joint_pos_env_cfg.py index e344f0021db..f9b4d498371 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/config/franka/stack_joint_pos_env_cfg.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/config/franka/stack_joint_pos_env_cfg.py @@ -28,6 +28,7 @@ class EventCfg: """Configuration for events.""" + # FIXME: Let's not do that and initialize the arm pose correctly in the environment constructor instead. init_franka_arm_pose = EventTerm( func=franka_stack_events.set_default_joint_pose, mode="reset", diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/config/franka/stack_joint_pos_instance_randomize_env_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/config/franka/stack_joint_pos_instance_randomize_env_cfg.py index 3e14199a263..ad6ef5c1bd0 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/config/franka/stack_joint_pos_instance_randomize_env_cfg.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/config/franka/stack_joint_pos_instance_randomize_env_cfg.py @@ -32,6 +32,7 @@ class EventCfg: """Configuration for events.""" + # FIXME: Let's not do that and initialize the arm pose correctly in the environment constructor instead. init_franka_arm_pose = EventTerm( func=franka_stack_events.set_default_joint_pose, mode="startup", diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/config/ur10_gripper/stack_joint_pos_env_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/config/ur10_gripper/stack_joint_pos_env_cfg.py index c80745d8970..88e59a1617c 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/config/ur10_gripper/stack_joint_pos_env_cfg.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/config/ur10_gripper/stack_joint_pos_env_cfg.py @@ -35,6 +35,7 @@ class EventCfgLongSuction: """Configuration for events.""" + # FIXME: Let's not do that and initialize the arm pose correctly in the environment constructor instead. init_franka_arm_pose = EventTerm( func=franka_stack_events.set_default_joint_pose, mode="reset", diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/mdp/franka_stack_events.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/mdp/franka_stack_events.py index 3d9e1db4862..92cd7b955d6 100644 --- a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/mdp/franka_stack_events.py +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/stack/mdp/franka_stack_events.py @@ -30,7 +30,7 @@ def set_default_joint_pose( ): # Set the default pose for robots in all envs asset = env.scene[asset_cfg.name] - asset.data.default_joint_pos = torch.tensor(default_pose, device=env.device).repeat(env.num_envs, 1) + asset.data.default_joint_pos[:] = torch.tensor(default_pose, device=env.device).repeat(env.num_envs, 1) def randomize_joint_by_gaussian_offset( From 69be8a47fce00dd8a3b8d92af3c151c25c89cfe6 Mon Sep 17 00:00:00 2001 From: Antoine Richard Date: Thu, 29 Jan 2026 09:42:30 +0100 Subject: [PATCH 19/38] Initial commit --- .../migration/migrating_to_isaaclab_3-0.rst | 19 + source/isaaclab/docs/CHANGELOG.rst | 31 + .../sensors/contact_sensor/__init__.py | 2 + .../contact_sensor/base_contact_sensor.py | 192 ++++++ .../base_contact_sensor_data.py | 122 ++++ .../sensors/contact_sensor/contact_sensor.py | 534 +---------------- .../contact_sensor/contact_sensor_data.py | 150 +---- .../sensors/frame_transformer/__init__.py | 2 + .../base_frame_transformer.py | 124 ++++ .../base_frame_transformer_data.py | 65 ++ .../frame_transformer/frame_transformer.py | 555 +----------------- .../frame_transformer_data.py | 54 +- .../isaaclab/isaaclab/sensors/imu/__init__.py | 2 + .../isaaclab/isaaclab/sensors/imu/base_imu.py | 86 +++ .../isaaclab/sensors/imu/base_imu_data.py | 62 ++ source/isaaclab/isaaclab/sensors/imu/imu.py | 294 +--------- .../isaaclab/isaaclab/sensors/imu/imu_data.py | 54 +- source/isaaclab_physx/docs/CHANGELOG.rst | 32 + .../isaaclab_physx/isaaclab_physx/__init__.py | 3 + .../isaaclab_physx/sensors/__init__.py | 19 + .../sensors/contact_sensor/__init__.py | 9 + .../sensors/contact_sensor/contact_sensor.py | 423 +++++++++++++ .../contact_sensor/contact_sensor_data.py | 181 ++++++ .../sensors/frame_transformer/__init__.py | 9 + .../frame_transformer/frame_transformer.py | 549 +++++++++++++++++ .../frame_transformer_data.py | 72 +++ .../isaaclab_physx/sensors/imu/__init__.py | 9 + .../isaaclab_physx/sensors/imu/imu.py | 297 ++++++++++ .../isaaclab_physx/sensors/imu/imu_data.py | 65 ++ .../test/sensors/check_contact_sensor.py | 0 .../test/sensors/check_imu_sensor.py | 0 .../test/sensors/test_contact_sensor.py | 0 .../test/sensors/test_frame_transformer.py | 0 .../test/sensors/test_imu.py | 0 .../test/sensors/urdfs/simple_2_link.urdf | 0 35 files changed, 2423 insertions(+), 1593 deletions(-) create mode 100644 source/isaaclab/isaaclab/sensors/contact_sensor/base_contact_sensor.py create mode 100644 source/isaaclab/isaaclab/sensors/contact_sensor/base_contact_sensor_data.py create mode 100644 source/isaaclab/isaaclab/sensors/frame_transformer/base_frame_transformer.py create mode 100644 source/isaaclab/isaaclab/sensors/frame_transformer/base_frame_transformer_data.py create mode 100644 source/isaaclab/isaaclab/sensors/imu/base_imu.py create mode 100644 source/isaaclab/isaaclab/sensors/imu/base_imu_data.py create mode 100644 source/isaaclab_physx/isaaclab_physx/sensors/__init__.py create mode 100644 source/isaaclab_physx/isaaclab_physx/sensors/contact_sensor/__init__.py create mode 100644 source/isaaclab_physx/isaaclab_physx/sensors/contact_sensor/contact_sensor.py create mode 100644 source/isaaclab_physx/isaaclab_physx/sensors/contact_sensor/contact_sensor_data.py create mode 100644 source/isaaclab_physx/isaaclab_physx/sensors/frame_transformer/__init__.py create mode 100644 source/isaaclab_physx/isaaclab_physx/sensors/frame_transformer/frame_transformer.py create mode 100644 source/isaaclab_physx/isaaclab_physx/sensors/frame_transformer/frame_transformer_data.py create mode 100644 source/isaaclab_physx/isaaclab_physx/sensors/imu/__init__.py create mode 100644 source/isaaclab_physx/isaaclab_physx/sensors/imu/imu.py create mode 100644 source/isaaclab_physx/isaaclab_physx/sensors/imu/imu_data.py rename source/{isaaclab => isaaclab_physx}/test/sensors/check_contact_sensor.py (100%) rename source/{isaaclab => isaaclab_physx}/test/sensors/check_imu_sensor.py (100%) rename source/{isaaclab => isaaclab_physx}/test/sensors/test_contact_sensor.py (100%) rename source/{isaaclab => isaaclab_physx}/test/sensors/test_frame_transformer.py (100%) rename source/{isaaclab => isaaclab_physx}/test/sensors/test_imu.py (100%) rename source/{isaaclab => isaaclab_physx}/test/sensors/urdfs/simple_2_link.urdf (100%) diff --git a/docs/source/migration/migrating_to_isaaclab_3-0.rst b/docs/source/migration/migrating_to_isaaclab_3-0.rst index 3da20d93359..1024d9a6d3f 100644 --- a/docs/source/migration/migrating_to_isaaclab_3-0.rst +++ b/docs/source/migration/migrating_to_isaaclab_3-0.rst @@ -53,6 +53,25 @@ The following asset classes remain in the ``isaaclab`` package and can still be These classes now inherit from new abstract base classes but maintain full backward compatibility. +The following sensor classes also remain in the ``isaaclab`` package with unchanged imports: + +- :class:`~isaaclab.sensors.ContactSensor`, :class:`~isaaclab.sensors.ContactSensorCfg`, :class:`~isaaclab.sensors.ContactSensorData` +- :class:`~isaaclab.sensors.Imu`, :class:`~isaaclab.sensors.ImuCfg`, :class:`~isaaclab.sensors.ImuData` +- :class:`~isaaclab.sensors.FrameTransformer`, :class:`~isaaclab.sensors.FrameTransformerCfg`, :class:`~isaaclab.sensors.FrameTransformerData` + +These sensor classes now use factory patterns that automatically instantiate the appropriate backend +implementation (PhysX by default), maintaining full backward compatibility. + +If you need to import the PhysX sensor implementations directly (e.g., for type hints or subclassing), +you can import from ``isaaclab_physx.sensors``: + +.. code-block:: python + + # Direct PhysX implementation imports + from isaaclab_physx.sensors import ContactSensor, ContactSensorData + from isaaclab_physx.sensors import Imu, ImuData + from isaaclab_physx.sensors import FrameTransformer, FrameTransformerData + ``root_physx_view`` Deprecation ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/source/isaaclab/docs/CHANGELOG.rst b/source/isaaclab/docs/CHANGELOG.rst index 4abb2a6ac4f..ba621e1c45e 100644 --- a/source/isaaclab/docs/CHANGELOG.rst +++ b/source/isaaclab/docs/CHANGELOG.rst @@ -1,6 +1,37 @@ Changelog --------- +0.54.4 (2026-01-29) +~~~~~~~~~~~~~~~~~~~ + +Added +^^^^^ + +* Added :class:`~isaaclab.sensors.contact_sensor.BaseContactSensor` and + :class:`~isaaclab.sensors.contact_sensor.BaseContactSensorData` abstract base classes that define + the interface for contact sensors. These classes provide a backend-agnostic API for contact sensing. +* Added :class:`~isaaclab.sensors.imu.BaseImu` and :class:`~isaaclab.sensors.imu.BaseImuData` abstract + base classes that define the interface for IMU sensors. These classes provide a backend-agnostic + API for inertial measurement. +* Added :class:`~isaaclab.sensors.frame_transformer.BaseFrameTransformer` and + :class:`~isaaclab.sensors.frame_transformer.BaseFrameTransformerData` abstract base classes that + define the interface for frame transformer sensors. These classes provide a backend-agnostic API + for coordinate frame transformations. + +Changed +^^^^^^^ + +* Refactored the sensor classes (:class:`~isaaclab.sensors.ContactSensor`, + :class:`~isaaclab.sensors.Imu`, :class:`~isaaclab.sensors.FrameTransformer`) to follow the + multi-backend architecture. The classes now act as factory wrappers that instantiate the + appropriate backend-specific implementation (PhysX by default). +* Moved PhysX-specific sensor tests to the ``isaaclab_physx`` package: + + * ``test_contact_sensor.py`` → ``isaaclab_physx/test/sensors/`` + * ``test_imu.py`` → ``isaaclab_physx/test/sensors/`` + * ``test_frame_transformer.py`` → ``isaaclab_physx/test/sensors/`` + + 0.54.3 (2026-01-29) ~~~~~~~~~~~~~~~~~~~ diff --git a/source/isaaclab/isaaclab/sensors/contact_sensor/__init__.py b/source/isaaclab/isaaclab/sensors/contact_sensor/__init__.py index 94b402d41a3..b43f57f2daa 100644 --- a/source/isaaclab/isaaclab/sensors/contact_sensor/__init__.py +++ b/source/isaaclab/isaaclab/sensors/contact_sensor/__init__.py @@ -5,6 +5,8 @@ """Sub-module for rigid contact sensor.""" +from .base_contact_sensor import BaseContactSensor +from .base_contact_sensor_data import BaseContactSensorData from .contact_sensor import ContactSensor from .contact_sensor_cfg import ContactSensorCfg from .contact_sensor_data import ContactSensorData diff --git a/source/isaaclab/isaaclab/sensors/contact_sensor/base_contact_sensor.py b/source/isaaclab/isaaclab/sensors/contact_sensor/base_contact_sensor.py new file mode 100644 index 00000000000..d32631bba5e --- /dev/null +++ b/source/isaaclab/isaaclab/sensors/contact_sensor/base_contact_sensor.py @@ -0,0 +1,192 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from __future__ import annotations + +from abc import abstractmethod +from collections.abc import Sequence +from typing import TYPE_CHECKING + +import torch + +import isaaclab.utils.string as string_utils + +from ..sensor_base import SensorBase +from .base_contact_sensor_data import BaseContactSensorData + +if TYPE_CHECKING: + from .contact_sensor_cfg import ContactSensorCfg + + +class BaseContactSensor(SensorBase): + """A contact reporting sensor. + + The contact sensor reports the normal contact forces on a rigid body in the world frame. + It relies on the physics engine's contact reporting API to be activated on the rigid bodies. + + To enable the contact reporter on a rigid body, please make sure to enable the + :attr:`isaaclab.sim.spawner.RigidObjectSpawnerCfg.activate_contact_sensors` on your + asset spawner configuration. This will enable the contact reporter on all the rigid bodies + in the asset. + + The sensor can be configured to report the contact forces on a set of bodies with a given + filter pattern using the :attr:`ContactSensorCfg.filter_prim_paths_expr`. This is useful + when you want to report the contact forces between the sensor bodies and a specific set of + bodies in the scene. The data can be accessed using the :attr:`ContactSensorData.force_matrix_w`. + + The reporting of the filtered contact forces is only possible as one-to-many. This means that only one + sensor body in an environment can be filtered against multiple bodies in that environment. If you need to + filter multiple sensor bodies against multiple bodies, you need to create separate sensors for each sensor + body. + + As an example, suppose you want to report the contact forces for all the feet of a robot against an object + exclusively. In that case, setting the :attr:`ContactSensorCfg.prim_path` and + :attr:`ContactSensorCfg.filter_prim_paths_expr` with ``{ENV_REGEX_NS}/Robot/.*_FOOT`` and ``{ENV_REGEX_NS}/Object`` + respectively will not work. Instead, you need to create a separate sensor for each foot and filter + it against the object. + """ + + cfg: ContactSensorCfg + """The configuration parameters.""" + + __backend_name__: str = "base" + """The name of the backend for the contact sensor.""" + + def __init__(self, cfg: ContactSensorCfg): + """Initializes the contact sensor object. + + Args: + cfg: The configuration parameters. + """ + # initialize base class + super().__init__(cfg) + + """ + Properties + """ + + @property + @abstractmethod + def data(self) -> BaseContactSensorData: + raise NotImplementedError + + @property + @abstractmethod + def num_bodies(self) -> int: + """Number of bodies with contact sensors attached.""" + raise NotImplementedError + + @property + @abstractmethod + def body_names(self) -> list[str]: + """Ordered names of bodies with contact sensors attached.""" + raise NotImplementedError + + """ + Operations + """ + + def find_bodies(self, name_keys: str | Sequence[str], preserve_order: bool = False) -> tuple[list[int], list[str]]: + """Find bodies in the articulation based on the name keys. + + Args: + name_keys: A regular expression or a list of regular expressions to match the body names. + preserve_order: Whether to preserve the order of the name keys in the output. Defaults to False. + + Returns: + A tuple of lists containing the body indices and names. + """ + return string_utils.resolve_matching_names(name_keys, self.body_names, preserve_order) + + def compute_first_contact(self, dt: float, abs_tol: float = 1.0e-8) -> torch.Tensor: + """Checks if bodies that have established contact within the last :attr:`dt` seconds. + + This function checks if the bodies have established contact within the last :attr:`dt` seconds + by comparing the current contact time with the given time period. If the contact time is less + than the given time period, then the bodies are considered to be in contact. + + Note: + The function assumes that :attr:`dt` is a factor of the sensor update time-step. In other + words :math:`dt / dt_sensor = n`, where :math:`n` is a natural number. This is always true + if the sensor is updated by the physics or the environment stepping time-step and the sensor + is read by the environment stepping time-step. + + Args: + dt: The time period since the contact was established. + abs_tol: The absolute tolerance for the comparison. + + Returns: + A boolean tensor indicating the bodies that have established contact within the last + :attr:`dt` seconds. Shape is (N, B), where N is the number of sensors and B is the + number of bodies in each sensor. + + Raises: + RuntimeError: If the sensor is not configured to track contact time. + """ + # check if the sensor is configured to track contact time + if not self.cfg.track_air_time: + raise RuntimeError( + "The contact sensor is not configured to track contact time." + "Please enable the 'track_air_time' in the sensor configuration." + ) + # check if the bodies are in contact + currently_in_contact = self.data.current_contact_time > 0.0 + less_than_dt_in_contact = self.data.current_contact_time < (dt + abs_tol) + return currently_in_contact * less_than_dt_in_contact + + def compute_first_air(self, dt: float, abs_tol: float = 1.0e-8) -> torch.Tensor: + """Checks if bodies that have broken contact within the last :attr:`dt` seconds. + + This function checks if the bodies have broken contact within the last :attr:`dt` seconds + by comparing the current air time with the given time period. If the air time is less + than the given time period, then the bodies are considered to not be in contact. + + Note: + It assumes that :attr:`dt` is a factor of the sensor update time-step. In other words, + :math:`dt / dt_sensor = n`, where :math:`n` is a natural number. This is always true if + the sensor is updated by the physics or the environment stepping time-step and the sensor + is read by the environment stepping time-step. + + Args: + dt: The time period since the contract is broken. + abs_tol: The absolute tolerance for the comparison. + + Returns: + A boolean tensor indicating the bodies that have broken contact within the last :attr:`dt` seconds. + Shape is (N, B), where N is the number of sensors and B is the number of bodies in each sensor. + + Raises: + RuntimeError: If the sensor is not configured to track contact time. + """ + # check if the sensor is configured to track contact time + if not self.cfg.track_air_time: + raise RuntimeError( + "The contact sensor is not configured to track contact time." + "Please enable the 'track_air_time' in the sensor configuration." + ) + # check if the sensor is configured to track contact time + currently_detached = self.data.current_air_time > 0.0 + less_than_dt_detached = self.data.current_air_time < (dt + abs_tol) + return currently_detached * less_than_dt_detached + + """ + Implementation - Abstract methods to be implemented by backend-specific subclasses. + """ + + def _initialize_impl(self): + """Initializes the sensor handles and internal buffers. + + Subclasses should call ``super()._initialize_impl()`` first to initialize + the common sensor infrastructure from :class:`SensorBase`. + """ + super()._initialize_impl() + + @abstractmethod + def _update_buffers_impl(self, env_ids: Sequence[int]): + raise NotImplementedError + + def _invalidate_initialize_callback(self, event): + """Invalidates the scene elements.""" + super()._invalidate_initialize_callback(event) diff --git a/source/isaaclab/isaaclab/sensors/contact_sensor/base_contact_sensor_data.py b/source/isaaclab/isaaclab/sensors/contact_sensor/base_contact_sensor_data.py new file mode 100644 index 00000000000..aeccf133033 --- /dev/null +++ b/source/isaaclab/isaaclab/sensors/contact_sensor/base_contact_sensor_data.py @@ -0,0 +1,122 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Base class for contact sensor data containers.""" + +from __future__ import annotations + +from abc import ABC, abstractmethod + +import torch + + +class BaseContactSensorData(ABC): + """Data container for the contact reporting sensor. + + This base class defines the interface for contact sensor data. Backend-specific + implementations should inherit from this class and provide the actual data storage. + """ + + @property + @abstractmethod + def pos_w(self) -> torch.Tensor | None: + """Position of the sensor origin in world frame. Shape is (N, 3). + + None if :attr:`ContactSensorCfg.track_pose` is False. + """ + raise NotImplementedError + + @property + @abstractmethod + def quat_w(self) -> torch.Tensor | None: + """Orientation of the sensor origin in quaternion (w, x, y, z) in world frame. + + Shape is (N, 4). None if :attr:`ContactSensorCfg.track_pose` is False. + """ + raise NotImplementedError + + @property + @abstractmethod + def net_forces_w(self) -> torch.Tensor | None: + """The net normal contact forces in world frame. Shape is (N, B, 3).""" + raise NotImplementedError + + @property + @abstractmethod + def net_forces_w_history(self) -> torch.Tensor | None: + """History of net normal contact forces. Shape is (N, T, B, 3).""" + raise NotImplementedError + + @property + @abstractmethod + def force_matrix_w(self) -> torch.Tensor | None: + """Normal contact forces filtered between sensor and filtered bodies. + + Shape is (N, B, M, 3). None if :attr:`ContactSensorCfg.filter_prim_paths_expr` is empty. + """ + raise NotImplementedError + + @property + @abstractmethod + def force_matrix_w_history(self) -> torch.Tensor | None: + """History of filtered contact forces. Shape is (N, T, B, M, 3). + + None if :attr:`ContactSensorCfg.filter_prim_paths_expr` is empty. + """ + raise NotImplementedError + + @property + @abstractmethod + def contact_pos_w(self) -> torch.Tensor | None: + """Average position of contact points. Shape is (N, B, M, 3). + + None if :attr:`ContactSensorCfg.track_contact_points` is False. + """ + raise NotImplementedError + + @property + @abstractmethod + def friction_forces_w(self) -> torch.Tensor | None: + """Sum of friction forces. Shape is (N, B, M, 3). + + None if :attr:`ContactSensorCfg.track_friction_forces` is False. + """ + raise NotImplementedError + + @property + @abstractmethod + def last_air_time(self) -> torch.Tensor | None: + """Time spent in air before last contact. Shape is (N, B). + + None if :attr:`ContactSensorCfg.track_air_time` is False. + """ + raise NotImplementedError + + @property + @abstractmethod + def current_air_time(self) -> torch.Tensor | None: + """Time spent in air since last detach. Shape is (N, B). + + None if :attr:`ContactSensorCfg.track_air_time` is False. + """ + raise NotImplementedError + + @property + @abstractmethod + def last_contact_time(self) -> torch.Tensor | None: + """Time spent in contact before last detach. Shape is (N, B). + + None if :attr:`ContactSensorCfg.track_air_time` is False. + """ + raise NotImplementedError + + @property + @abstractmethod + def current_contact_time(self) -> torch.Tensor | None: + """Time spent in contact since last contact. Shape is (N, B). + + None if :attr:`ContactSensorCfg.track_air_time` is False. + """ + raise NotImplementedError diff --git a/source/isaaclab/isaaclab/sensors/contact_sensor/contact_sensor.py b/source/isaaclab/isaaclab/sensors/contact_sensor/contact_sensor.py index 2a85a2661f6..9ddaf15fa9d 100644 --- a/source/isaaclab/isaaclab/sensors/contact_sensor/contact_sensor.py +++ b/source/isaaclab/isaaclab/sensors/contact_sensor/contact_sensor.py @@ -3,537 +3,25 @@ # # SPDX-License-Identifier: BSD-3-Clause -# Ignore optional memory usage warning globally -# pyright: reportOptionalSubscript=false - from __future__ import annotations -from collections.abc import Sequence from typing import TYPE_CHECKING -import torch - -import carb -import omni.physics.tensors.impl.api as physx -from isaacsim.core.simulation_manager import SimulationManager -from pxr import PhysxSchema - -import isaaclab.sim as sim_utils -import isaaclab.utils.string as string_utils -from isaaclab.markers import VisualizationMarkers -from isaaclab.utils.math import convert_quat +from isaaclab.utils.backend_utils import FactoryBase -from ..sensor_base import SensorBase -from .contact_sensor_data import ContactSensorData +from .base_contact_sensor import BaseContactSensor +from .base_contact_sensor_data import BaseContactSensorData if TYPE_CHECKING: - from .contact_sensor_cfg import ContactSensorCfg - - -class ContactSensor(SensorBase): - """A contact reporting sensor. - - The contact sensor reports the normal contact forces on a rigid body in the world frame. - It relies on the `PhysX ContactReporter`_ API to be activated on the rigid bodies. - - To enable the contact reporter on a rigid body, please make sure to enable the - :attr:`isaaclab.sim.spawner.RigidObjectSpawnerCfg.activate_contact_sensors` on your - asset spawner configuration. This will enable the contact reporter on all the rigid bodies - in the asset. - - The sensor can be configured to report the contact forces on a set of bodies with a given - filter pattern using the :attr:`ContactSensorCfg.filter_prim_paths_expr`. This is useful - when you want to report the contact forces between the sensor bodies and a specific set of - bodies in the scene. The data can be accessed using the :attr:`ContactSensorData.force_matrix_w`. - Please check the documentation on `RigidContact`_ for more details. - - The reporting of the filtered contact forces is only possible as one-to-many. This means that only one - sensor body in an environment can be filtered against multiple bodies in that environment. If you need to - filter multiple sensor bodies against multiple bodies, you need to create separate sensors for each sensor - body. - - As an example, suppose you want to report the contact forces for all the feet of a robot against an object - exclusively. In that case, setting the :attr:`ContactSensorCfg.prim_path` and - :attr:`ContactSensorCfg.filter_prim_paths_expr` with ``{ENV_REGEX_NS}/Robot/.*_FOOT`` and ``{ENV_REGEX_NS}/Object`` - respectively will not work. Instead, you need to create a separate sensor for each foot and filter - it against the object. - - .. _PhysX ContactReporter: https://docs.omniverse.nvidia.com/kit/docs/omni_usd_schema_physics/104.2/class_physx_schema_physx_contact_report_a_p_i.html - .. _RigidContact: https://docs.isaacsim.omniverse.nvidia.com/latest/py/source/extensions/isaacsim.core.api/docs/index.html#isaacsim.core.api.sensors.RigidContactView - """ - - cfg: ContactSensorCfg - """The configuration parameters.""" - - def __init__(self, cfg: ContactSensorCfg): - """Initializes the contact sensor object. - - Args: - cfg: The configuration parameters. - """ - # initialize base class - super().__init__(cfg) - - # Enable contact processing - carb_settings_iface = carb.settings.get_settings() - carb_settings_iface.set_bool("/physics/disableContactProcessing", False) - - # Create empty variables for storing output data - self._data: ContactSensorData = ContactSensorData() - # initialize self._body_physx_view for running in extension mode - self._body_physx_view = None - - def __str__(self) -> str: - """Returns: A string containing information about the instance.""" - return ( - f"Contact sensor @ '{self.cfg.prim_path}': \n" - f"\tview type : {self.body_physx_view.__class__}\n" - f"\tupdate period (s) : {self.cfg.update_period}\n" - f"\tnumber of bodies : {self.num_bodies}\n" - f"\tbody names : {self.body_names}\n" - ) - - """ - Properties - """ - - @property - def num_instances(self) -> int: - return self.body_physx_view.count - - @property - def data(self) -> ContactSensorData: - # update sensors if needed - self._update_outdated_buffers() - # return the data - return self._data - - @property - def num_bodies(self) -> int: - """Number of bodies with contact sensors attached.""" - return self._num_bodies - - @property - def body_names(self) -> list[str]: - """Ordered names of bodies with contact sensors attached.""" - prim_paths = self.body_physx_view.prim_paths[: self.num_bodies] - return [path.split("/")[-1] for path in prim_paths] - - @property - def body_physx_view(self) -> physx.RigidBodyView: - """View for the rigid bodies captured (PhysX). - - Note: - Use this view with caution. It requires handling of tensors in a specific way. - """ - return self._body_physx_view - - @property - def contact_physx_view(self) -> physx.RigidContactView: - """Contact reporter view for the bodies (PhysX). - - Note: - Use this view with caution. It requires handling of tensors in a specific way. - """ - return self._contact_physx_view - - """ - Operations - """ - - def reset(self, env_ids: Sequence[int] | None = None): - # reset the timers and counters - super().reset(env_ids) - # resolve None - if env_ids is None: - env_ids = slice(None) - # reset accumulative data buffers - self._data.net_forces_w[env_ids] = 0.0 - self._data.net_forces_w_history[env_ids] = 0.0 - # reset force matrix - if len(self.cfg.filter_prim_paths_expr) != 0: - self._data.force_matrix_w[env_ids] = 0.0 - self._data.force_matrix_w_history[env_ids] = 0.0 - # reset the current air time - if self.cfg.track_air_time: - self._data.current_air_time[env_ids] = 0.0 - self._data.last_air_time[env_ids] = 0.0 - self._data.current_contact_time[env_ids] = 0.0 - self._data.last_contact_time[env_ids] = 0.0 - # reset contact positions - if self.cfg.track_contact_points: - self._data.contact_pos_w[env_ids, :] = torch.nan - # reset friction forces - if self.cfg.track_friction_forces: - self._data.friction_forces_w[env_ids, :] = 0.0 - - def find_bodies(self, name_keys: str | Sequence[str], preserve_order: bool = False) -> tuple[list[int], list[str]]: - """Find bodies in the articulation based on the name keys. - - Args: - name_keys: A regular expression or a list of regular expressions to match the body names. - preserve_order: Whether to preserve the order of the name keys in the output. Defaults to False. - - Returns: - A tuple of lists containing the body indices and names. - """ - return string_utils.resolve_matching_names(name_keys, self.body_names, preserve_order) - - def compute_first_contact(self, dt: float, abs_tol: float = 1.0e-8) -> torch.Tensor: - """Checks if bodies that have established contact within the last :attr:`dt` seconds. - - This function checks if the bodies have established contact within the last :attr:`dt` seconds - by comparing the current contact time with the given time period. If the contact time is less - than the given time period, then the bodies are considered to be in contact. - - Note: - The function assumes that :attr:`dt` is a factor of the sensor update time-step. In other - words :math:`dt / dt_sensor = n`, where :math:`n` is a natural number. This is always true - if the sensor is updated by the physics or the environment stepping time-step and the sensor - is read by the environment stepping time-step. - - Args: - dt: The time period since the contact was established. - abs_tol: The absolute tolerance for the comparison. - - Returns: - A boolean tensor indicating the bodies that have established contact within the last - :attr:`dt` seconds. Shape is (N, B), where N is the number of sensors and B is the - number of bodies in each sensor. - - Raises: - RuntimeError: If the sensor is not configured to track contact time. - """ - # check if the sensor is configured to track contact time - if not self.cfg.track_air_time: - raise RuntimeError( - "The contact sensor is not configured to track contact time." - "Please enable the 'track_air_time' in the sensor configuration." - ) - # check if the bodies are in contact - currently_in_contact = self.data.current_contact_time > 0.0 - less_than_dt_in_contact = self.data.current_contact_time < (dt + abs_tol) - return currently_in_contact * less_than_dt_in_contact - - def compute_first_air(self, dt: float, abs_tol: float = 1.0e-8) -> torch.Tensor: - """Checks if bodies that have broken contact within the last :attr:`dt` seconds. - - This function checks if the bodies have broken contact within the last :attr:`dt` seconds - by comparing the current air time with the given time period. If the air time is less - than the given time period, then the bodies are considered to not be in contact. - - Note: - It assumes that :attr:`dt` is a factor of the sensor update time-step. In other words, - :math:`dt / dt_sensor = n`, where :math:`n` is a natural number. This is always true if - the sensor is updated by the physics or the environment stepping time-step and the sensor - is read by the environment stepping time-step. - - Args: - dt: The time period since the contract is broken. - abs_tol: The absolute tolerance for the comparison. - - Returns: - A boolean tensor indicating the bodies that have broken contact within the last :attr:`dt` seconds. - Shape is (N, B), where N is the number of sensors and B is the number of bodies in each sensor. - - Raises: - RuntimeError: If the sensor is not configured to track contact time. - """ - # check if the sensor is configured to track contact time - if not self.cfg.track_air_time: - raise RuntimeError( - "The contact sensor is not configured to track contact time." - "Please enable the 'track_air_time' in the sensor configuration." - ) - # check if the sensor is configured to track contact time - currently_detached = self.data.current_air_time > 0.0 - less_than_dt_detached = self.data.current_air_time < (dt + abs_tol) - return currently_detached * less_than_dt_detached - - """ - Implementation. - """ - - def _initialize_impl(self): - super()._initialize_impl() - # obtain global simulation view - self._physics_sim_view = SimulationManager.get_physics_sim_view() - # check that only rigid bodies are selected - leaf_pattern = self.cfg.prim_path.rsplit("/", 1)[-1] - template_prim_path = self._parent_prims[0].GetPath().pathString - body_names = list() - for prim in sim_utils.find_matching_prims(template_prim_path + "/" + leaf_pattern): - # check if prim has contact reporter API - if prim.HasAPI(PhysxSchema.PhysxContactReportAPI): - prim_path = prim.GetPath().pathString - body_names.append(prim_path.rsplit("/", 1)[-1]) - # check that there is at least one body with contact reporter API - if not body_names: - raise RuntimeError( - f"Sensor at path '{self.cfg.prim_path}' could not find any bodies with contact reporter API." - "\nHINT: Make sure to enable 'activate_contact_sensors' in the corresponding asset spawn configuration." - ) - - # construct regex expression for the body names - body_names_regex = r"(" + "|".join(body_names) + r")" - body_names_regex = f"{self.cfg.prim_path.rsplit('/', 1)[0]}/{body_names_regex}" - # convert regex expressions to glob expressions for PhysX - body_names_glob = body_names_regex.replace(".*", "*") - filter_prim_paths_glob = [expr.replace(".*", "*") for expr in self.cfg.filter_prim_paths_expr] - - # create a rigid prim view for the sensor - self._body_physx_view = self._physics_sim_view.create_rigid_body_view(body_names_glob) - self._contact_physx_view = self._physics_sim_view.create_rigid_contact_view( - body_names_glob, - filter_patterns=filter_prim_paths_glob, - max_contact_data_count=self.cfg.max_contact_data_count_per_prim * len(body_names) * self._num_envs, - ) - # resolve the true count of bodies - self._num_bodies = self.body_physx_view.count // self._num_envs - # check that contact reporter succeeded - if self._num_bodies != len(body_names): - raise RuntimeError( - "Failed to initialize contact reporter for specified bodies." - f"\n\tInput prim path : {self.cfg.prim_path}" - f"\n\tResolved prim paths: {body_names_regex}" - ) - - # prepare data buffers - self._data.net_forces_w = torch.zeros(self._num_envs, self._num_bodies, 3, device=self._device) - # optional buffers - # -- history of net forces - if self.cfg.history_length > 0: - self._data.net_forces_w_history = torch.zeros( - self._num_envs, self.cfg.history_length, self._num_bodies, 3, device=self._device - ) - else: - self._data.net_forces_w_history = self._data.net_forces_w.unsqueeze(1) - # -- pose of sensor origins - if self.cfg.track_pose: - self._data.pos_w = torch.zeros(self._num_envs, self._num_bodies, 3, device=self._device) - self._data.quat_w = torch.zeros(self._num_envs, self._num_bodies, 4, device=self._device) - - # check if filter paths are valid - if self.cfg.track_contact_points or self.cfg.track_friction_forces: - if len(self.cfg.filter_prim_paths_expr) == 0: - raise ValueError( - "The 'filter_prim_paths_expr' is empty. Please specify a valid filter pattern to track" - f" {'contact points' if self.cfg.track_contact_points else 'friction forces'}." - ) - if self.cfg.max_contact_data_count_per_prim < 1: - raise ValueError( - f"The 'max_contact_data_count_per_prim' is {self.cfg.max_contact_data_count_per_prim}. " - "Please set it to a value greater than 0 to track" - f" {'contact points' if self.cfg.track_contact_points else 'friction forces'}." - ) - - # -- position of contact points - if self.cfg.track_contact_points: - self._data.contact_pos_w = torch.full( - (self._num_envs, self._num_bodies, self.contact_physx_view.filter_count, 3), - torch.nan, - device=self._device, - ) - # -- friction forces at contact points - if self.cfg.track_friction_forces: - self._data.friction_forces_w = torch.full( - (self._num_envs, self._num_bodies, self.contact_physx_view.filter_count, 3), - 0.0, - device=self._device, - ) - # -- air/contact time between contacts - if self.cfg.track_air_time: - self._data.last_air_time = torch.zeros(self._num_envs, self._num_bodies, device=self._device) - self._data.current_air_time = torch.zeros(self._num_envs, self._num_bodies, device=self._device) - self._data.last_contact_time = torch.zeros(self._num_envs, self._num_bodies, device=self._device) - self._data.current_contact_time = torch.zeros(self._num_envs, self._num_bodies, device=self._device) - # force matrix: (num_envs, num_bodies, num_filter_shapes, 3) - # force matrix history: (num_envs, history_length, num_bodies, num_filter_shapes, 3) - if len(self.cfg.filter_prim_paths_expr) != 0: - num_filters = self.contact_physx_view.filter_count - self._data.force_matrix_w = torch.zeros( - self._num_envs, self._num_bodies, num_filters, 3, device=self._device - ) - if self.cfg.history_length > 0: - self._data.force_matrix_w_history = torch.zeros( - self._num_envs, self.cfg.history_length, self._num_bodies, num_filters, 3, device=self._device - ) - else: - self._data.force_matrix_w_history = self._data.force_matrix_w.unsqueeze(1) - - def _update_buffers_impl(self, env_ids: Sequence[int]): - """Fills the buffers of the sensor data.""" - # default to all sensors - if len(env_ids) == self._num_envs: - env_ids = slice(None) - - # obtain the contact forces - # TODO: We are handling the indexing ourself because of the shape; (N, B) vs expected (N * B). - # This isn't the most efficient way to do this, but it's the easiest to implement. - net_forces_w = self.contact_physx_view.get_net_contact_forces(dt=self._sim_physics_dt) - self._data.net_forces_w[env_ids, :, :] = net_forces_w.view(-1, self._num_bodies, 3)[env_ids] - # update contact force history - if self.cfg.history_length > 0: - self._data.net_forces_w_history[env_ids] = self._data.net_forces_w_history[env_ids].roll(1, dims=1) - self._data.net_forces_w_history[env_ids, 0] = self._data.net_forces_w[env_ids] - - # obtain the contact force matrix - if len(self.cfg.filter_prim_paths_expr) != 0: - # shape of the filtering matrix: (num_envs, num_bodies, num_filter_shapes, 3) - num_filters = self.contact_physx_view.filter_count - # acquire and shape the force matrix - force_matrix_w = self.contact_physx_view.get_contact_force_matrix(dt=self._sim_physics_dt) - force_matrix_w = force_matrix_w.view(-1, self._num_bodies, num_filters, 3) - self._data.force_matrix_w[env_ids] = force_matrix_w[env_ids] - if self.cfg.history_length > 0: - self._data.force_matrix_w_history[env_ids] = self._data.force_matrix_w_history[env_ids].roll(1, dims=1) - self._data.force_matrix_w_history[env_ids, 0] = self._data.force_matrix_w[env_ids] - - # obtain the pose of the sensor origin - if self.cfg.track_pose: - pose = self.body_physx_view.get_transforms().view(-1, self._num_bodies, 7)[env_ids] - pose[..., 3:] = convert_quat(pose[..., 3:], to="wxyz") - self._data.pos_w[env_ids], self._data.quat_w[env_ids] = pose.split([3, 4], dim=-1) - - # obtain contact points - if self.cfg.track_contact_points: - _, buffer_contact_points, _, _, buffer_count, buffer_start_indices = ( - self.contact_physx_view.get_contact_data(dt=self._sim_physics_dt) - ) - self._data.contact_pos_w[env_ids] = self._unpack_contact_buffer_data( - buffer_contact_points, buffer_count, buffer_start_indices - )[env_ids] - - # obtain friction forces - if self.cfg.track_friction_forces: - friction_forces, _, buffer_count, buffer_start_indices = self.contact_physx_view.get_friction_data( - dt=self._sim_physics_dt - ) - self._data.friction_forces_w[env_ids] = self._unpack_contact_buffer_data( - friction_forces, buffer_count, buffer_start_indices, avg=False, default=0.0 - )[env_ids] - - # obtain the air time - if self.cfg.track_air_time: - # -- time elapsed since last update - # since this function is called every frame, we can use the difference to get the elapsed time - elapsed_time = self._timestamp[env_ids] - self._timestamp_last_update[env_ids] - # -- check contact state of bodies - is_contact = torch.norm(self._data.net_forces_w[env_ids, :, :], dim=-1) > self.cfg.force_threshold - is_first_contact = (self._data.current_air_time[env_ids] > 0) * is_contact - is_first_detached = (self._data.current_contact_time[env_ids] > 0) * ~is_contact - # -- update the last contact time if body has just become in contact - self._data.last_air_time[env_ids] = torch.where( - is_first_contact, - self._data.current_air_time[env_ids] + elapsed_time.unsqueeze(-1), - self._data.last_air_time[env_ids], - ) - # -- increment time for bodies that are not in contact - self._data.current_air_time[env_ids] = torch.where( - ~is_contact, self._data.current_air_time[env_ids] + elapsed_time.unsqueeze(-1), 0.0 - ) - # -- update the last contact time if body has just detached - self._data.last_contact_time[env_ids] = torch.where( - is_first_detached, - self._data.current_contact_time[env_ids] + elapsed_time.unsqueeze(-1), - self._data.last_contact_time[env_ids], - ) - # -- increment time for bodies that are in contact - self._data.current_contact_time[env_ids] = torch.where( - is_contact, self._data.current_contact_time[env_ids] + elapsed_time.unsqueeze(-1), 0.0 - ) - - def _unpack_contact_buffer_data( - self, - contact_data: torch.Tensor, - buffer_count: torch.Tensor, - buffer_start_indices: torch.Tensor, - avg: bool = True, - default: float = float("nan"), - ) -> torch.Tensor: - """ - Unpacks and aggregates contact data for each (env, body, filter) group. - - This function vectorizes the following nested loop: - - for i in range(self._num_bodies * self._num_envs): - for j in range(self.contact_physx_view.filter_count): - start_index_ij = buffer_start_indices[i, j] - count_ij = buffer_count[i, j] - self._contact_position_aggregate_buffer[i, j, :] = torch.mean( - contact_data[start_index_ij : (start_index_ij + count_ij), :], dim=0 - ) - - For more details, see the `RigidContactView.get_contact_data() documentation `_. - - Args: - contact_data: Flat tensor of contact data, shape (N_envs * N_bodies, 3). - buffer_count: Number of contact points per (env, body, filter), shape (N_envs * N_bodies, N_filters). - buffer_start_indices: Start indices for each (env, body, filter), shape (N_envs * N_bodies, N_filters). - avg: If True, average the contact data for each group; if False, sum the data. Defaults to True. - default: Default value to use for groups with zero contacts. Defaults to NaN. - - Returns: - Aggregated contact data, shape (N_envs, N_bodies, N_filters, 3). - """ - counts, starts = buffer_count.view(-1), buffer_start_indices.view(-1) - n_rows, total = counts.numel(), int(counts.sum()) - agg = torch.full((n_rows, 3), default, device=self._device, dtype=contact_data.dtype) - if total > 0: - row_ids = torch.repeat_interleave(torch.arange(n_rows, device=self._device), counts) - - block_starts = counts.cumsum(0) - counts - deltas = torch.arange(row_ids.numel(), device=counts.device) - block_starts.repeat_interleave(counts) - flat_idx = starts[row_ids] + deltas - - pts = contact_data.index_select(0, flat_idx) - agg = agg.zero_().index_add_(0, row_ids, pts) - agg = agg / counts.clamp_min(1).unsqueeze(-1) if avg else agg - agg[counts == 0] = default - - return agg.view(self._num_envs * self.num_bodies, -1, 3).view( - self._num_envs, self._num_bodies, self.contact_physx_view.filter_count, 3 - ) + from isaaclab_physx.sensors.contact_sensor import ContactSensor as PhysXContactSensor + from isaaclab_physx.sensors.contact_sensor import ContactSensorData as PhysXContactSensorData - def _set_debug_vis_impl(self, debug_vis: bool): - # set visibility of markers - # note: parent only deals with callbacks. not their visibility - if debug_vis: - # create markers if necessary for the first time - if not hasattr(self, "contact_visualizer"): - self.contact_visualizer = VisualizationMarkers(self.cfg.visualizer_cfg) - # set their visibility to true - self.contact_visualizer.set_visibility(True) - else: - if hasattr(self, "contact_visualizer"): - self.contact_visualizer.set_visibility(False) - def _debug_vis_callback(self, event): - # safely return if view becomes invalid - # note: this invalidity happens because of isaac sim view callbacks - if self.body_physx_view is None: - return - # marker indices - # 0: contact, 1: no contact - net_contact_force_w = torch.norm(self._data.net_forces_w, dim=-1) - marker_indices = torch.where(net_contact_force_w > self.cfg.force_threshold, 0, 1) - # check if prim is visualized - if self.cfg.track_pose: - frame_origins: torch.Tensor = self._data.pos_w - else: - pose = self.body_physx_view.get_transforms() - frame_origins = pose.view(-1, self._num_bodies, 7)[:, :, :3] - # visualize - self.contact_visualizer.visualize(frame_origins.view(-1, 3), marker_indices=marker_indices.view(-1)) +class ContactSensor(FactoryBase, BaseContactSensor): + """Factory for creating contact sensor instances.""" - """ - Internal simulation callbacks. - """ + data: BaseContactSensorData | PhysXContactSensorData - def _invalidate_initialize_callback(self, event): - """Invalidates the scene elements.""" - # call parent - super()._invalidate_initialize_callback(event) - # set all existing views to None to invalidate them - self._body_physx_view = None - self._contact_physx_view = None + def __new__(cls, *args, **kwargs) -> BaseContactSensor | PhysXContactSensor: + """Create a new instance of a contact sensor based on the backend.""" + return super().__new__(cls, *args, **kwargs) diff --git a/source/isaaclab/isaaclab/sensors/contact_sensor/contact_sensor_data.py b/source/isaaclab/isaaclab/sensors/contact_sensor/contact_sensor_data.py index fd6c15ebe96..64fd537cb97 100644 --- a/source/isaaclab/isaaclab/sensors/contact_sensor/contact_sensor_data.py +++ b/source/isaaclab/isaaclab/sensors/contact_sensor/contact_sensor_data.py @@ -3,151 +3,11 @@ # # SPDX-License-Identifier: BSD-3-Clause -# needed to import for allowing type-hinting: torch.Tensor | None -from __future__ import annotations +"""Re-exports the base contact sensor data class for backwards compatibility.""" -from dataclasses import dataclass +from .base_contact_sensor_data import BaseContactSensorData -import torch +# Re-export for backwards compatibility +ContactSensorData = BaseContactSensorData - -@dataclass -class ContactSensorData: - """Data container for the contact reporting sensor.""" - - pos_w: torch.Tensor | None = None - """Position of the sensor origin in world frame. - - Shape is (N, 3), where N is the number of sensors. - - Note: - If the :attr:`ContactSensorCfg.track_pose` is False, then this quantity is None. - - """ - - contact_pos_w: torch.Tensor | None = None - """Average of the positions of contact points between sensor body and filter prim in world frame. - - Shape is (N, B, M, 3), where N is the number of sensors, B is number of bodies in each sensor - and M is the number of filtered bodies. - - Collision pairs not in contact will result in NaN. - - Note: - - * If the :attr:`ContactSensorCfg.track_contact_points` is False, then this quantity is None. - * If the :attr:`ContactSensorCfg.track_contact_points` is True, a ValueError will be raised if: - - * If the :attr:`ContactSensorCfg.filter_prim_paths_expr` is empty. - * If the :attr:`ContactSensorCfg.max_contact_data_per_prim` is not specified or less than 1. - will not be calculated. - - """ - - friction_forces_w: torch.Tensor | None = None - """Sum of the friction forces between sensor body and filter prim in world frame. - - Shape is (N, B, M, 3), where N is the number of sensors, B is number of bodies in each sensor - and M is the number of filtered bodies. - - Collision pairs not in contact will result in NaN. - - Note: - - * If the :attr:`ContactSensorCfg.track_friction_forces` is False, then this quantity is None. - * If the :attr:`ContactSensorCfg.track_friction_forces` is True, a ValueError will be raised if: - - * The :attr:`ContactSensorCfg.filter_prim_paths_expr` is empty. - * The :attr:`ContactSensorCfg.max_contact_data_per_prim` is not specified or less than 1. - - """ - - quat_w: torch.Tensor | None = None - """Orientation of the sensor origin in quaternion (w, x, y, z) in world frame. - - Shape is (N, 4), where N is the number of sensors. - - Note: - If the :attr:`ContactSensorCfg.track_pose` is False, then this quantity is None. - """ - - net_forces_w: torch.Tensor | None = None - """The net normal contact forces in world frame. - - Shape is (N, B, 3), where N is the number of sensors and B is the number of bodies in each sensor. - - Note: - This quantity is the sum of the normal contact forces acting on the sensor bodies. It must not be confused - with the total contact forces acting on the sensor bodies (which also includes the tangential forces). - """ - - net_forces_w_history: torch.Tensor | None = None - """The net normal contact forces in world frame. - - Shape is (N, T, B, 3), where N is the number of sensors, T is the configured history length - and B is the number of bodies in each sensor. - - In the history dimension, the first index is the most recent and the last index is the oldest. - - Note: - This quantity is the sum of the normal contact forces acting on the sensor bodies. It must not be confused - with the total contact forces acting on the sensor bodies (which also includes the tangential forces). - """ - - force_matrix_w: torch.Tensor | None = None - """The normal contact forces filtered between the sensor bodies and filtered bodies in world frame. - - Shape is (N, B, M, 3), where N is the number of sensors, B is number of bodies in each sensor - and M is the number of filtered bodies. - - Note: - If the :attr:`ContactSensorCfg.filter_prim_paths_expr` is empty, then this quantity is None. - """ - - force_matrix_w_history: torch.Tensor | None = None - """The normal contact forces filtered between the sensor bodies and filtered bodies in world frame. - - Shape is (N, T, B, M, 3), where N is the number of sensors, T is the configured history length, - B is number of bodies in each sensor and M is the number of filtered bodies. - - In the history dimension, the first index is the most recent and the last index is the oldest. - - Note: - If the :attr:`ContactSensorCfg.filter_prim_paths_expr` is empty, then this quantity is None. - """ - - last_air_time: torch.Tensor | None = None - """Time spent (in s) in the air before the last contact. - - Shape is (N, B), where N is the number of sensors and B is the number of bodies in each sensor. - - Note: - If the :attr:`ContactSensorCfg.track_air_time` is False, then this quantity is None. - """ - - current_air_time: torch.Tensor | None = None - """Time spent (in s) in the air since the last detach. - - Shape is (N, B), where N is the number of sensors and B is the number of bodies in each sensor. - - Note: - If the :attr:`ContactSensorCfg.track_air_time` is False, then this quantity is None. - """ - - last_contact_time: torch.Tensor | None = None - """Time spent (in s) in contact before the last detach. - - Shape is (N, B), where N is the number of sensors and B is the number of bodies in each sensor. - - Note: - If the :attr:`ContactSensorCfg.track_air_time` is False, then this quantity is None. - """ - - current_contact_time: torch.Tensor | None = None - """Time spent (in s) in contact since the last contact. - - Shape is (N, B), where N is the number of sensors and B is the number of bodies in each sensor. - - Note: - If the :attr:`ContactSensorCfg.track_air_time` is False, then this quantity is None. - """ +__all__ = ["BaseContactSensorData", "ContactSensorData"] diff --git a/source/isaaclab/isaaclab/sensors/frame_transformer/__init__.py b/source/isaaclab/isaaclab/sensors/frame_transformer/__init__.py index d5db853e8cc..938f1501298 100644 --- a/source/isaaclab/isaaclab/sensors/frame_transformer/__init__.py +++ b/source/isaaclab/isaaclab/sensors/frame_transformer/__init__.py @@ -5,6 +5,8 @@ """Sub-module for frame transformer sensor.""" +from .base_frame_transformer import BaseFrameTransformer +from .base_frame_transformer_data import BaseFrameTransformerData from .frame_transformer import FrameTransformer from .frame_transformer_cfg import FrameTransformerCfg, OffsetCfg from .frame_transformer_data import FrameTransformerData diff --git a/source/isaaclab/isaaclab/sensors/frame_transformer/base_frame_transformer.py b/source/isaaclab/isaaclab/sensors/frame_transformer/base_frame_transformer.py new file mode 100644 index 00000000000..9ac1ef22c73 --- /dev/null +++ b/source/isaaclab/isaaclab/sensors/frame_transformer/base_frame_transformer.py @@ -0,0 +1,124 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from __future__ import annotations + +from abc import abstractmethod +from collections.abc import Sequence +from typing import TYPE_CHECKING + +import isaaclab.utils.string as string_utils + +from ..sensor_base import SensorBase +from .base_frame_transformer_data import BaseFrameTransformerData + +if TYPE_CHECKING: + from .frame_transformer_cfg import FrameTransformerCfg + + +class BaseFrameTransformer(SensorBase): + """A sensor for reporting frame transforms. + + This class provides an interface for reporting the transform of one or more frames (target frames) + with respect to another frame (source frame). The source frame is specified by the user as a prim path + (:attr:`FrameTransformerCfg.prim_path`) and the target frames are specified by the user as a list of + prim paths (:attr:`FrameTransformerCfg.target_frames`). + + The source frame and target frames are assumed to be rigid bodies. The transform of the target frames + with respect to the source frame is computed by first extracting the transform of the source frame + and target frames from the physics engine and then computing the relative transform between the two. + + Additionally, the user can specify an offset for the source frame and each target frame. This is useful + for specifying the transform of the desired frame with respect to the body's center of mass, for instance. + + A common example of using this sensor is to track the position and orientation of the end effector of a + robotic manipulator. In this case, the source frame would be the body corresponding to the base frame of the + manipulator, and the target frame would be the body corresponding to the end effector. Since the end-effector is + typically a fictitious body, the user may need to specify an offset from the end-effector to the body of the + manipulator. + + """ + + cfg: FrameTransformerCfg + """The configuration parameters.""" + + __backend_name__: str = "base" + """The name of the backend for the frame transformer sensor.""" + + def __init__(self, cfg: FrameTransformerCfg): + """Initializes the frame transformer object. + + Args: + cfg: The configuration parameters. + """ + # initialize base class + super().__init__(cfg) + + """ + Properties + """ + + @property + @abstractmethod + def data(self) -> BaseFrameTransformerData: + raise NotImplementedError + + @property + @abstractmethod + def num_bodies(self) -> int: + """Returns the number of target bodies being tracked. + + Note: + This is an alias used for consistency with other sensors. Otherwise, we recommend using + :attr:`len(data.target_frame_names)` to access the number of target frames. + """ + raise NotImplementedError + + @property + @abstractmethod + def body_names(self) -> list[str]: + """Returns the names of the target bodies being tracked. + + Note: + This is an alias used for consistency with other sensors. Otherwise, we recommend using + :attr:`data.target_frame_names` to access the target frame names. + """ + raise NotImplementedError + + """ + Operations + """ + + def find_bodies(self, name_keys: str | Sequence[str], preserve_order: bool = False) -> tuple[list[int], list[str]]: + """Find bodies in the articulation based on the name keys. + + Args: + name_keys: A regular expression or a list of regular expressions to match the body names. + preserve_order: Whether to preserve the order of the name keys in the output. Defaults to False. + + Returns: + A tuple of lists containing the body indices and names. + """ + return string_utils.resolve_matching_names(name_keys, self._target_frame_names, preserve_order) + + """ + Implementation - Abstract methods to be implemented by backend-specific subclasses. + """ + + def _initialize_impl(self): + """Initializes the sensor handles and internal buffers. + + Subclasses should call ``super()._initialize_impl()`` first to initialize + the common sensor infrastructure from :class:`SensorBase`. + """ + super()._initialize_impl() + + @abstractmethod + def _update_buffers_impl(self, env_ids: Sequence[int]): + raise NotImplementedError + + def _invalidate_initialize_callback(self, event): + """Invalidates the scene elements.""" + super()._invalidate_initialize_callback(event) diff --git a/source/isaaclab/isaaclab/sensors/frame_transformer/base_frame_transformer_data.py b/source/isaaclab/isaaclab/sensors/frame_transformer/base_frame_transformer_data.py new file mode 100644 index 00000000000..4b63f8fe31b --- /dev/null +++ b/source/isaaclab/isaaclab/sensors/frame_transformer/base_frame_transformer_data.py @@ -0,0 +1,65 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Base class for frame transformer sensor data containers.""" + +from __future__ import annotations + +from abc import ABC, abstractmethod + +import torch + + +class BaseFrameTransformerData(ABC): + """Data container for the frame transformer sensor. + + This base class defines the interface for frame transformer sensor data. Backend-specific + implementations should inherit from this class and provide the actual data storage. + """ + + @property + @abstractmethod + def target_frame_names(self) -> list[str]: + """Target frame names (order matches data ordering). + + Resolved from :attr:`FrameTransformerCfg.FrameCfg.name`. + """ + raise NotImplementedError + + @property + @abstractmethod + def target_pos_source(self) -> torch.Tensor: + """Position of the target frame(s) relative to source frame. Shape is (N, M, 3).""" + raise NotImplementedError + + @property + @abstractmethod + def target_quat_source(self) -> torch.Tensor: + """Orientation of the target frame(s) relative to source frame (w, x, y, z). Shape is (N, M, 4).""" + raise NotImplementedError + + @property + @abstractmethod + def target_pos_w(self) -> torch.Tensor: + """Position of the target frame(s) after offset in world frame. Shape is (N, M, 3).""" + raise NotImplementedError + + @property + @abstractmethod + def target_quat_w(self) -> torch.Tensor: + """Orientation of the target frame(s) after offset in world frame (w, x, y, z). Shape is (N, M, 4).""" + raise NotImplementedError + + @property + @abstractmethod + def source_pos_w(self) -> torch.Tensor: + """Position of the source frame after offset in world frame. Shape is (N, 3).""" + raise NotImplementedError + + @property + @abstractmethod + def source_quat_w(self) -> torch.Tensor: + """Orientation of the source frame after offset in world frame (w, x, y, z). Shape is (N, 4).""" + raise NotImplementedError diff --git a/source/isaaclab/isaaclab/sensors/frame_transformer/frame_transformer.py b/source/isaaclab/isaaclab/sensors/frame_transformer/frame_transformer.py index ed83392b3aa..f01ce2e3f64 100644 --- a/source/isaaclab/isaaclab/sensors/frame_transformer/frame_transformer.py +++ b/source/isaaclab/isaaclab/sensors/frame_transformer/frame_transformer.py @@ -5,556 +5,23 @@ from __future__ import annotations -import logging -import re -from collections.abc import Sequence from typing import TYPE_CHECKING -import torch +from isaaclab.utils.backend_utils import FactoryBase -from isaacsim.core.simulation_manager import SimulationManager -from pxr import UsdPhysics - -import isaaclab.sim as sim_utils -import isaaclab.utils.string as string_utils -from isaaclab.markers import VisualizationMarkers -from isaaclab.utils.math import ( - combine_frame_transforms, - convert_quat, - is_identity_pose, - normalize, - quat_from_angle_axis, - subtract_frame_transforms, -) - -from ..sensor_base import SensorBase -from .frame_transformer_data import FrameTransformerData +from .base_frame_transformer import BaseFrameTransformer +from .base_frame_transformer_data import BaseFrameTransformerData if TYPE_CHECKING: - from .frame_transformer_cfg import FrameTransformerCfg - -# import logger -logger = logging.getLogger(__name__) - - -class FrameTransformer(SensorBase): - """A sensor for reporting frame transforms. - - This class provides an interface for reporting the transform of one or more frames (target frames) - with respect to another frame (source frame). The source frame is specified by the user as a prim path - (:attr:`FrameTransformerCfg.prim_path`) and the target frames are specified by the user as a list of - prim paths (:attr:`FrameTransformerCfg.target_frames`). - - The source frame and target frames are assumed to be rigid bodies. The transform of the target frames - with respect to the source frame is computed by first extracting the transform of the source frame - and target frames from the physics engine and then computing the relative transform between the two. - - Additionally, the user can specify an offset for the source frame and each target frame. This is useful - for specifying the transform of the desired frame with respect to the body's center of mass, for instance. - - A common example of using this sensor is to track the position and orientation of the end effector of a - robotic manipulator. In this case, the source frame would be the body corresponding to the base frame of the - manipulator, and the target frame would be the body corresponding to the end effector. Since the end-effector is - typically a fictitious body, the user may need to specify an offset from the end-effector to the body of the - manipulator. - - """ - - cfg: FrameTransformerCfg - """The configuration parameters.""" - - def __init__(self, cfg: FrameTransformerCfg): - """Initializes the frame transformer object. - - Args: - cfg: The configuration parameters. - """ - # initialize base class - super().__init__(cfg) - # Create empty variables for storing output data - self._data: FrameTransformerData = FrameTransformerData() - - def __str__(self) -> str: - """Returns: A string containing information about the instance.""" - return ( - f"FrameTransformer @ '{self.cfg.prim_path}': \n" - f"\ttracked body frames: {[self._source_frame_body_name] + self._target_frame_body_names} \n" - f"\tnumber of envs: {self._num_envs}\n" - f"\tsource body frame: {self._source_frame_body_name}\n" - f"\ttarget frames (count: {self._target_frame_names}): {len(self._target_frame_names)}\n" - ) - - """ - Properties - """ - - @property - def data(self) -> FrameTransformerData: - # update sensors if needed - self._update_outdated_buffers() - # return the data - return self._data - - @property - def num_bodies(self) -> int: - """Returns the number of target bodies being tracked. - - Note: - This is an alias used for consistency with other sensors. Otherwise, we recommend using - :attr:`len(data.target_frame_names)` to access the number of target frames. - """ - return len(self._target_frame_body_names) - - @property - def body_names(self) -> list[str]: - """Returns the names of the target bodies being tracked. - - Note: - This is an alias used for consistency with other sensors. Otherwise, we recommend using - :attr:`data.target_frame_names` to access the target frame names. - """ - return self._target_frame_body_names - - """ - Operations - """ - - def reset(self, env_ids: Sequence[int] | None = None): - # reset the timers and counters - super().reset(env_ids) - # resolve None - if env_ids is None: - env_ids = ... - - def find_bodies(self, name_keys: str | Sequence[str], preserve_order: bool = False) -> tuple[list[int], list[str]]: - """Find bodies in the articulation based on the name keys. - - Args: - name_keys: A regular expression or a list of regular expressions to match the body names. - preserve_order: Whether to preserve the order of the name keys in the output. Defaults to False. - - Returns: - A tuple of lists containing the body indices and names. - """ - return string_utils.resolve_matching_names(name_keys, self._target_frame_names, preserve_order) - - """ - Implementation. - """ - - def _initialize_impl(self): - super()._initialize_impl() - - # resolve source frame offset - source_frame_offset_pos = torch.tensor(self.cfg.source_frame_offset.pos, device=self.device) - source_frame_offset_quat = torch.tensor(self.cfg.source_frame_offset.rot, device=self.device) - # Only need to perform offsetting of source frame if the position offsets is non-zero and rotation offset is - # not the identity quaternion for efficiency in _update_buffer_impl - self._apply_source_frame_offset = True - # Handle source frame offsets - if is_identity_pose(source_frame_offset_pos, source_frame_offset_quat): - logger.debug(f"No offset application needed for source frame as it is identity: {self.cfg.prim_path}") - self._apply_source_frame_offset = False - else: - logger.debug(f"Applying offset to source frame as it is not identity: {self.cfg.prim_path}") - # Store offsets as tensors (duplicating each env's offsets for ease of multiplication later) - self._source_frame_offset_pos = source_frame_offset_pos.unsqueeze(0).repeat(self._num_envs, 1) - self._source_frame_offset_quat = source_frame_offset_quat.unsqueeze(0).repeat(self._num_envs, 1) - - # Keep track of mapping from the rigid body name to the desired frames and prim path, - # as there may be multiple frames based upon the same body name and we don't want to - # create unnecessary views. - body_names_to_frames: dict[str, dict[str, set[str] | str]] = {} - # The offsets associated with each target frame - target_offsets: dict[str, dict[str, torch.Tensor]] = {} - # The frames whose offsets are not identity - non_identity_offset_frames: list[str] = [] - - # Only need to perform offsetting of target frame if any of the position offsets are non-zero or any of the - # rotation offsets are not the identity quaternion for efficiency in _update_buffer_impl - self._apply_target_frame_offset = False - - # Need to keep track of whether the source frame is also a target frame - self._source_is_also_target_frame = False - - # Collect all target frames, their associated body prim paths and their offsets so that we can extract - # the prim, check that it has the appropriate rigid body API in a single loop. - # First element is None because user can't specify source frame name - frames = [None] + [target_frame.name for target_frame in self.cfg.target_frames] - frame_prim_paths = [self.cfg.prim_path] + [target_frame.prim_path for target_frame in self.cfg.target_frames] - # First element is None because source frame offset is handled separately - frame_offsets = [None] + [target_frame.offset for target_frame in self.cfg.target_frames] - frame_types = ["source"] + ["target"] * len(self.cfg.target_frames) - for frame, prim_path, offset, frame_type in zip(frames, frame_prim_paths, frame_offsets, frame_types): - # Find correct prim - matching_prims = sim_utils.find_matching_prims(prim_path) - if len(matching_prims) == 0: - raise ValueError( - f"Failed to create frame transformer for frame '{frame}' with path '{prim_path}'." - " No matching prims were found." - ) - for prim in matching_prims: - # Get the prim path of the matching prim - matching_prim_path = prim.GetPath().pathString - # Check if it is a rigid prim - if not prim.HasAPI(UsdPhysics.RigidBodyAPI): - raise ValueError( - f"While resolving expression '{prim_path}' found a prim '{matching_prim_path}' which is not a" - " rigid body. The class only supports transformations between rigid bodies." - ) - - # Get the name of the body: use relative prim path for unique identification - body_name = self._get_relative_body_path(matching_prim_path) - # Use leaf name of prim path if frame name isn't specified by user - frame_name = frame if frame is not None else matching_prim_path.rsplit("/", 1)[-1] - - # Keep track of which frames are associated with which bodies - if body_name in body_names_to_frames: - body_names_to_frames[body_name]["frames"].add(frame_name) - - # This is a corner case where the source frame is also a target frame - if body_names_to_frames[body_name]["type"] == "source" and frame_type == "target": - self._source_is_also_target_frame = True - - else: - # Store the first matching prim path and the type of frame - body_names_to_frames[body_name] = { - "frames": {frame_name}, - "prim_path": matching_prim_path, - "type": frame_type, - } - - if offset is not None: - offset_pos = torch.tensor(offset.pos, device=self.device) - offset_quat = torch.tensor(offset.rot, device=self.device) - # Check if we need to apply offsets (optimized code path in _update_buffer_impl) - if not is_identity_pose(offset_pos, offset_quat): - non_identity_offset_frames.append(frame_name) - self._apply_target_frame_offset = True - target_offsets[frame_name] = {"pos": offset_pos, "quat": offset_quat} - - if not self._apply_target_frame_offset: - logger.info( - f"No offsets application needed from '{self.cfg.prim_path}' to target frames as all" - f" are identity: {frames[1:]}" - ) - else: - logger.info( - f"Offsets application needed from '{self.cfg.prim_path}' to the following target frames:" - f" {non_identity_offset_frames}" - ) - - # The names of bodies that RigidPrim will be tracking to later extract transforms from - tracked_prim_paths = [body_names_to_frames[body_name]["prim_path"] for body_name in body_names_to_frames.keys()] - tracked_body_names = [body_name for body_name in body_names_to_frames.keys()] - - body_names_regex = [tracked_prim_path.replace("env_0", "env_*") for tracked_prim_path in tracked_prim_paths] - - # obtain global simulation view - self._physics_sim_view = SimulationManager.get_physics_sim_view() - # Create a prim view for all frames and initialize it - # order of transforms coming out of view will be source frame followed by target frame(s) - self._frame_physx_view = self._physics_sim_view.create_rigid_body_view(body_names_regex) - - # Determine the order in which regex evaluated body names so we can later index into frame transforms - # by frame name correctly - all_prim_paths = self._frame_physx_view.prim_paths - - if "env_" in all_prim_paths[0]: - - def extract_env_num_and_prim_path(item: str) -> tuple[int, str]: - """Separates the environment number and prim_path from the item. - - Args: - item: The item to extract the environment number from. Assumes item is of the form - `/World/envs/env_1/blah` or `/World/envs/env_11/blah`. - Returns: - The environment number and the prim_path. - """ - match = re.search(r"env_(\d+)(.*)", item) - return (int(match.group(1)), match.group(2)) - - # Find the indices that would reorganize output to be per environment. - # We want `env_1/blah` to come before `env_11/blah` and env_1/Robot/base - # to come before env_1/Robot/foot so we need to use custom key function - self._per_env_indices = [ - index - for index, _ in sorted( - list(enumerate(all_prim_paths)), key=lambda x: extract_env_num_and_prim_path(x[1]) - ) - ] - - # Only need 0th env as the names and their ordering are the same across environments - sorted_prim_paths = [ - all_prim_paths[index] for index in self._per_env_indices if "env_0" in all_prim_paths[index] - ] - - else: - # If no environment is present, then the order of the body names is the same as the order of the - # prim paths sorted alphabetically - self._per_env_indices = [index for index, _ in sorted(enumerate(all_prim_paths), key=lambda x: x[1])] - sorted_prim_paths = [all_prim_paths[index] for index in self._per_env_indices] - - # -- target frames: use relative prim path for unique identification - self._target_frame_body_names = [self._get_relative_body_path(prim_path) for prim_path in sorted_prim_paths] - - # -- source frame: use relative prim path for unique identification - self._source_frame_body_name = self._get_relative_body_path(self.cfg.prim_path) - source_frame_index = self._target_frame_body_names.index(self._source_frame_body_name) - - # Only remove source frame from tracked bodies if it is not also a target frame - if not self._source_is_also_target_frame: - self._target_frame_body_names.remove(self._source_frame_body_name) - - # Determine indices into all tracked body frames for both source and target frames - all_ids = torch.arange(self._num_envs * len(tracked_body_names)) - self._source_frame_body_ids = torch.arange(self._num_envs) * len(tracked_body_names) + source_frame_index - - # If source frame is also a target frame, then the target frame body ids are the same as - # the source frame body ids - if self._source_is_also_target_frame: - self._target_frame_body_ids = all_ids - else: - self._target_frame_body_ids = all_ids[~torch.isin(all_ids, self._source_frame_body_ids)] - - # The name of each of the target frame(s) - either user specified or defaulted to the body name - self._target_frame_names: list[str] = [] - # The position and rotation components of target frame offsets - target_frame_offset_pos = [] - target_frame_offset_quat = [] - # Stores the indices of bodies that need to be duplicated. For instance, if body "LF_SHANK" is needed - # for 2 frames, this list enables us to duplicate the body to both frames when doing the calculations - # when updating sensor in _update_buffers_impl - duplicate_frame_indices = [] - - # Go through each body name and determine the number of duplicates we need for that frame - # and extract the offsets. This is all done to handle the case where multiple frames - # reference the same body, but have different names and/or offsets - for i, body_name in enumerate(self._target_frame_body_names): - for frame in body_names_to_frames[body_name]["frames"]: - # Only need to handle target frames here as source frame is handled separately - if frame in target_offsets: - target_frame_offset_pos.append(target_offsets[frame]["pos"]) - target_frame_offset_quat.append(target_offsets[frame]["quat"]) - self._target_frame_names.append(frame) - duplicate_frame_indices.append(i) - - # To handle multiple environments, need to expand so [0, 1, 1, 2] with 2 environments becomes - # [0, 1, 1, 2, 3, 4, 4, 5]. Again, this is a optimization to make _update_buffer_impl more efficient - duplicate_frame_indices = torch.tensor(duplicate_frame_indices, device=self.device) - if self._source_is_also_target_frame: - num_target_body_frames = len(tracked_body_names) - else: - num_target_body_frames = len(tracked_body_names) - 1 - - self._duplicate_frame_indices = torch.cat( - [duplicate_frame_indices + num_target_body_frames * env_num for env_num in range(self._num_envs)] - ) - - # Target frame offsets are only applied if at least one of the offsets are non-identity - if self._apply_target_frame_offset: - # Stack up all the frame offsets for shape (num_envs, num_frames, 3) and (num_envs, num_frames, 4) - self._target_frame_offset_pos = torch.stack(target_frame_offset_pos).repeat(self._num_envs, 1) - self._target_frame_offset_quat = torch.stack(target_frame_offset_quat).repeat(self._num_envs, 1) - - # fill the data buffer - self._data.target_frame_names = self._target_frame_names - self._data.source_pos_w = torch.zeros(self._num_envs, 3, device=self._device) - self._data.source_quat_w = torch.zeros(self._num_envs, 4, device=self._device) - self._data.target_pos_w = torch.zeros(self._num_envs, len(duplicate_frame_indices), 3, device=self._device) - self._data.target_quat_w = torch.zeros(self._num_envs, len(duplicate_frame_indices), 4, device=self._device) - self._data.target_pos_source = torch.zeros_like(self._data.target_pos_w) - self._data.target_quat_source = torch.zeros_like(self._data.target_quat_w) - - def _update_buffers_impl(self, env_ids: Sequence[int]): - """Fills the buffers of the sensor data.""" - # default to all sensors - if len(env_ids) == self._num_envs: - env_ids = ... - - # Extract transforms from view - shape is: - # (the total number of source and target body frames being tracked * self._num_envs, 7) - transforms = self._frame_physx_view.get_transforms() - - # Reorder the transforms to be per environment as is expected of SensorData - transforms = transforms[self._per_env_indices] - - # Convert quaternions as PhysX uses xyzw form - transforms[:, 3:] = convert_quat(transforms[:, 3:], to="wxyz") - - # Process source frame transform - source_frames = transforms[self._source_frame_body_ids] - # Only apply offset if the offsets will result in a coordinate frame transform - if self._apply_source_frame_offset: - source_pos_w, source_quat_w = combine_frame_transforms( - source_frames[:, :3], - source_frames[:, 3:], - self._source_frame_offset_pos, - self._source_frame_offset_quat, - ) - else: - source_pos_w = source_frames[:, :3] - source_quat_w = source_frames[:, 3:] - - # Process target frame transforms - target_frames = transforms[self._target_frame_body_ids] - duplicated_target_frame_pos_w = target_frames[self._duplicate_frame_indices, :3] - duplicated_target_frame_quat_w = target_frames[self._duplicate_frame_indices, 3:] - - # Only apply offset if the offsets will result in a coordinate frame transform - if self._apply_target_frame_offset: - target_pos_w, target_quat_w = combine_frame_transforms( - duplicated_target_frame_pos_w, - duplicated_target_frame_quat_w, - self._target_frame_offset_pos, - self._target_frame_offset_quat, - ) - else: - target_pos_w = duplicated_target_frame_pos_w - target_quat_w = duplicated_target_frame_quat_w - - # Compute the transform of the target frame with respect to the source frame - total_num_frames = len(self._target_frame_names) - target_pos_source, target_quat_source = subtract_frame_transforms( - source_pos_w.unsqueeze(1).expand(-1, total_num_frames, -1).reshape(-1, 3), - source_quat_w.unsqueeze(1).expand(-1, total_num_frames, -1).reshape(-1, 4), - target_pos_w, - target_quat_w, - ) - - # Update buffers - # note: The frame names / ordering don't change so no need to update them after initialization - self._data.source_pos_w[:] = source_pos_w.view(-1, 3) - self._data.source_quat_w[:] = source_quat_w.view(-1, 4) - self._data.target_pos_w[:] = target_pos_w.view(-1, total_num_frames, 3) - self._data.target_quat_w[:] = target_quat_w.view(-1, total_num_frames, 4) - self._data.target_pos_source[:] = target_pos_source.view(-1, total_num_frames, 3) - self._data.target_quat_source[:] = target_quat_source.view(-1, total_num_frames, 4) - - def _set_debug_vis_impl(self, debug_vis: bool): - # set visibility of markers - # note: parent only deals with callbacks. not their visibility - if debug_vis: - if not hasattr(self, "frame_visualizer"): - self.frame_visualizer = VisualizationMarkers(self.cfg.visualizer_cfg) - - # set their visibility to true - self.frame_visualizer.set_visibility(True) - else: - if hasattr(self, "frame_visualizer"): - self.frame_visualizer.set_visibility(False) - - def _debug_vis_callback(self, event): - # Get the all frames pose - frames_pos = torch.cat([self._data.source_pos_w, self._data.target_pos_w.view(-1, 3)], dim=0) - frames_quat = torch.cat([self._data.source_quat_w, self._data.target_quat_w.view(-1, 4)], dim=0) - - # Get the all connecting lines between frames pose - lines_pos, lines_quat, lines_length = self._get_connecting_lines( - start_pos=self._data.source_pos_w.repeat_interleave(self._data.target_pos_w.size(1), dim=0), - end_pos=self._data.target_pos_w.view(-1, 3), - ) - - # Initialize default (identity) scales and marker indices for all markers (frames + lines) - marker_scales = torch.ones(frames_pos.size(0) + lines_pos.size(0), 3) - marker_indices = torch.zeros(marker_scales.size(0)) - - # Set the z-scale of line markers to represent their actual length - marker_scales[-lines_length.size(0) :, -1] = lines_length - - # Assign marker config index 1 to line markers - marker_indices[-lines_length.size(0) :] = 1 - - # Update the frame and the connecting line visualizer - self.frame_visualizer.visualize( - translations=torch.cat((frames_pos, lines_pos), dim=0), - orientations=torch.cat((frames_quat, lines_quat), dim=0), - scales=marker_scales, - marker_indices=marker_indices, - ) - - """ - Internal simulation callbacks. - """ - - def _invalidate_initialize_callback(self, event): - """Invalidates the scene elements.""" - # call parent - super()._invalidate_initialize_callback(event) - # set all existing views to None to invalidate them - self._frame_physx_view = None - - """ - Internal helpers. - """ - - def _get_connecting_lines( - self, start_pos: torch.Tensor, end_pos: torch.Tensor - ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]: - """Draws connecting lines between frames. - - Given start and end points, this function computes the positions (mid-point), orientations, - and lengths of the connecting lines. - - Args: - start_pos: The start positions of the connecting lines. Shape is (N, 3). - end_pos: The end positions of the connecting lines. Shape is (N, 3). - - Returns: - A tuple containing: - - The positions of each connecting line. Shape is (N, 3). - - The orientations of each connecting line in quaternion. Shape is (N, 4). - - The lengths of each connecting line. Shape is (N,). - """ - direction = end_pos - start_pos - lengths = torch.norm(direction, dim=-1) - positions = (start_pos + end_pos) / 2 - - # Get default direction (along z-axis) - default_direction = torch.tensor([0.0, 0.0, 1.0], device=self.device).expand(start_pos.size(0), -1) - - # Normalize direction vector - direction_norm = normalize(direction) - - # Calculate rotation from default direction to target direction - rotation_axis = torch.linalg.cross(default_direction, direction_norm) - rotation_axis_norm = torch.norm(rotation_axis, dim=-1) - - # Handle case where vectors are parallel - mask = rotation_axis_norm > 1e-6 - rotation_axis = torch.where( - mask.unsqueeze(-1), - normalize(rotation_axis), - torch.tensor([1.0, 0.0, 0.0], device=self.device).expand(start_pos.size(0), -1), - ) - - # Calculate rotation angle - cos_angle = torch.sum(default_direction * direction_norm, dim=-1) - cos_angle = torch.clamp(cos_angle, -1.0, 1.0) - angle = torch.acos(cos_angle) - orientations = quat_from_angle_axis(angle, rotation_axis) - - return positions, orientations, lengths - - @staticmethod - def _get_relative_body_path(prim_path: str) -> str: - """Extract a normalized body path from a prim path. + from isaaclab_physx.sensors.frame_transformer import FrameTransformer as PhysXFrameTransformer + from isaaclab_physx.sensors.frame_transformer import FrameTransformerData as PhysXFrameTransformerData - Removes the environment instance segment `/envs/env_/` to normalize paths - across multiple environments, while preserving the `/envs/` prefix to - distinguish environment-scoped paths from non-environment paths. - Examples: - - '/World/envs/env_0/Robot/torso' -> '/World/envs/Robot/torso' - - '/World/envs/env_123/Robot/left_hand' -> '/World/envs/Robot/left_hand' - - '/World/Robot' -> '/World/Robot' - - '/World/Robot_2/left_hand' -> '/World/Robot_2/left_hand' +class FrameTransformer(FactoryBase, BaseFrameTransformer): + """Factory for creating frame transformer instances.""" - Args: - prim_path: The full prim path. + data: BaseFrameTransformerData | PhysXFrameTransformerData - Returns: - The prim path with `/envs/env_/` removed, preserving `/envs/`. - """ - pattern = re.compile(r"/envs/env_[^/]+/") - return pattern.sub("/envs/", prim_path) + def __new__(cls, *args, **kwargs) -> BaseFrameTransformer | PhysXFrameTransformer: + """Create a new instance of a frame transformer based on the backend.""" + return super().__new__(cls, *args, **kwargs) diff --git a/source/isaaclab/isaaclab/sensors/frame_transformer/frame_transformer_data.py b/source/isaaclab/isaaclab/sensors/frame_transformer/frame_transformer_data.py index 942ddbd5144..2a9fa56516b 100644 --- a/source/isaaclab/isaaclab/sensors/frame_transformer/frame_transformer_data.py +++ b/source/isaaclab/isaaclab/sensors/frame_transformer/frame_transformer_data.py @@ -3,55 +3,11 @@ # # SPDX-License-Identifier: BSD-3-Clause -from dataclasses import dataclass +"""Re-exports the base frame transformer data class for backwards compatibility.""" -import torch +from .base_frame_transformer_data import BaseFrameTransformerData +# Re-export for backwards compatibility +FrameTransformerData = BaseFrameTransformerData -@dataclass -class FrameTransformerData: - """Data container for the frame transformer sensor.""" - - target_frame_names: list[str] = None - """Target frame names (this denotes the order in which that frame data is ordered). - - The frame names are resolved from the :attr:`FrameTransformerCfg.FrameCfg.name` field. - This does not necessarily follow the order in which the frames are defined in the config due to - the regex matching. - """ - - target_pos_source: torch.Tensor = None - """Position of the target frame(s) relative to source frame. - - Shape is (N, M, 3), where N is the number of environments, and M is the number of target frames. - """ - - target_quat_source: torch.Tensor = None - """Orientation of the target frame(s) relative to source frame quaternion (w, x, y, z). - - Shape is (N, M, 4), where N is the number of environments, and M is the number of target frames. - """ - - target_pos_w: torch.Tensor = None - """Position of the target frame(s) after offset (in world frame). - - Shape is (N, M, 3), where N is the number of environments, and M is the number of target frames. - """ - - target_quat_w: torch.Tensor = None - """Orientation of the target frame(s) after offset (in world frame) quaternion (w, x, y, z). - - Shape is (N, M, 4), where N is the number of environments, and M is the number of target frames. - """ - - source_pos_w: torch.Tensor = None - """Position of the source frame after offset (in world frame). - - Shape is (N, 3), where N is the number of environments. - """ - - source_quat_w: torch.Tensor = None - """Orientation of the source frame after offset (in world frame) quaternion (w, x, y, z). - - Shape is (N, 4), where N is the number of environments. - """ +__all__ = ["BaseFrameTransformerData", "FrameTransformerData"] diff --git a/source/isaaclab/isaaclab/sensors/imu/__init__.py b/source/isaaclab/isaaclab/sensors/imu/__init__.py index 7501e41cf49..a16340919b3 100644 --- a/source/isaaclab/isaaclab/sensors/imu/__init__.py +++ b/source/isaaclab/isaaclab/sensors/imu/__init__.py @@ -7,6 +7,8 @@ Imu Sensor """ +from .base_imu import BaseImu +from .base_imu_data import BaseImuData from .imu import Imu from .imu_cfg import ImuCfg from .imu_data import ImuData diff --git a/source/isaaclab/isaaclab/sensors/imu/base_imu.py b/source/isaaclab/isaaclab/sensors/imu/base_imu.py new file mode 100644 index 00000000000..d3d87eb4937 --- /dev/null +++ b/source/isaaclab/isaaclab/sensors/imu/base_imu.py @@ -0,0 +1,86 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from __future__ import annotations + +from abc import abstractmethod +from collections.abc import Sequence +from typing import TYPE_CHECKING + +from ..sensor_base import SensorBase +from .base_imu_data import BaseImuData + +if TYPE_CHECKING: + from .imu_cfg import ImuCfg + + +class BaseImu(SensorBase): + """The Inertia Measurement Unit (IMU) sensor. + + The sensor can be attached to any prim path with a rigid ancestor in its tree and produces body-frame + linear acceleration and angular velocity, along with world-frame pose and body-frame linear and angular + accelerations/velocities. + + If the provided path is not a rigid body, the closest rigid-body ancestor is used for simulation queries. + The fixed transform from that ancestor to the target prim is computed once during initialization and + composed with the configured sensor offset. + + .. note:: + + We are computing the accelerations using numerical differentiation from the velocities. Consequently, the + IMU sensor accuracy depends on the chosen phsyx timestep. For a sufficient accuracy, we recommend to keep the + timestep at least as 200Hz. + + .. note:: + + The user can configure the sensor offset in the configuration file. The offset is applied relative to the + rigid source prim. If the target prim is not a rigid body, the offset is composed with the fixed transform + from the rigid ancestor to the target prim. The offset is applied in the body frame of the rigid source prim. + The offset is defined as a position vector and a quaternion rotation, which + are applied in the order: position, then rotation. The position is applied as a translation + in the body frame of the rigid source prim, and the rotation is applied as a rotation + in the body frame of the rigid source prim. + + """ + + cfg: ImuCfg + """The configuration parameters.""" + + __backend_name__: str = "base" + """The name of the backend for the IMU sensor.""" + + def __init__(self, cfg: ImuCfg): + """Initializes the Imu sensor. + + Args: + cfg: The configuration parameters. + """ + # initialize base class + super().__init__(cfg) + + """ + Properties + """ + + @property + @abstractmethod + def data(self) -> BaseImuData: + raise NotImplementedError + + """ + Implementation - Abstract methods to be implemented by backend-specific subclasses. + """ + + def _initialize_impl(self): + """Initializes the sensor handles and internal buffers. + + Subclasses should call ``super()._initialize_impl()`` first to initialize + the common sensor infrastructure from :class:`SensorBase`. + """ + super()._initialize_impl() + + @abstractmethod + def _update_buffers_impl(self, env_ids: Sequence[int]): + raise NotImplementedError diff --git a/source/isaaclab/isaaclab/sensors/imu/base_imu_data.py b/source/isaaclab/isaaclab/sensors/imu/base_imu_data.py new file mode 100644 index 00000000000..0c4204f910e --- /dev/null +++ b/source/isaaclab/isaaclab/sensors/imu/base_imu_data.py @@ -0,0 +1,62 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Base class for IMU sensor data containers.""" + +from __future__ import annotations + +from abc import ABC, abstractmethod + +import torch + + +class BaseImuData(ABC): + """Data container for the Imu sensor. + + This base class defines the interface for IMU sensor data. Backend-specific + implementations should inherit from this class and provide the actual data storage. + """ + + @property + @abstractmethod + def pos_w(self) -> torch.Tensor: + """Position of the sensor origin in world frame. Shape is (N, 3).""" + raise NotImplementedError + + @property + @abstractmethod + def quat_w(self) -> torch.Tensor: + """Orientation of the sensor origin in quaternion (w, x, y, z) in world frame. Shape is (N, 4).""" + raise NotImplementedError + + @property + @abstractmethod + def projected_gravity_b(self) -> torch.Tensor: + """Gravity direction unit vector projected on the imu frame. Shape is (N, 3).""" + raise NotImplementedError + + @property + @abstractmethod + def lin_vel_b(self) -> torch.Tensor: + """IMU frame linear velocity relative to the world expressed in IMU frame. Shape is (N, 3).""" + raise NotImplementedError + + @property + @abstractmethod + def ang_vel_b(self) -> torch.Tensor: + """IMU frame angular velocity relative to the world expressed in IMU frame. Shape is (N, 3).""" + raise NotImplementedError + + @property + @abstractmethod + def lin_acc_b(self) -> torch.Tensor: + """IMU frame linear acceleration relative to the world expressed in IMU frame. Shape is (N, 3).""" + raise NotImplementedError + + @property + @abstractmethod + def ang_acc_b(self) -> torch.Tensor: + """IMU frame angular acceleration relative to the world expressed in IMU frame. Shape is (N, 3).""" + raise NotImplementedError diff --git a/source/isaaclab/isaaclab/sensors/imu/imu.py b/source/isaaclab/isaaclab/sensors/imu/imu.py index e092b39502e..5fdc1c81453 100644 --- a/source/isaaclab/isaaclab/sensors/imu/imu.py +++ b/source/isaaclab/isaaclab/sensors/imu/imu.py @@ -5,295 +5,23 @@ from __future__ import annotations -from collections.abc import Sequence from typing import TYPE_CHECKING -import torch +from isaaclab.utils.backend_utils import FactoryBase -from isaacsim.core.simulation_manager import SimulationManager -from pxr import UsdGeom, UsdPhysics - -import isaaclab.sim as sim_utils -import isaaclab.utils.math as math_utils -from isaaclab.markers import VisualizationMarkers - -from ..sensor_base import SensorBase -from .imu_data import ImuData +from .base_imu import BaseImu +from .base_imu_data import BaseImuData if TYPE_CHECKING: - from .imu_cfg import ImuCfg - - -class Imu(SensorBase): - """The Inertia Measurement Unit (IMU) sensor. - - The sensor can be attached to any prim path with a rigid ancestor in its tree and produces body-frame - linear acceleration and angular velocity, along with world-frame pose and body-frame linear and angular - accelerations/velocities. - - If the provided path is not a rigid body, the closest rigid-body ancestor is used for simulation queries. - The fixed transform from that ancestor to the target prim is computed once during initialization and - composed with the configured sensor offset. - - .. note:: - - We are computing the accelerations using numerical differentiation from the velocities. Consequently, the - IMU sensor accuracy depends on the chosen phsyx timestep. For a sufficient accuracy, we recommend to keep the - timestep at least as 200Hz. - - .. note:: - - The user can configure the sensor offset in the configuration file. The offset is applied relative to the - rigid source prim. If the target prim is not a rigid body, the offset is composed with the fixed transform - from the rigid ancestor to the target prim. The offset is applied in the body frame of the rigid source prim. - The offset is defined as a position vector and a quaternion rotation, which - are applied in the order: position, then rotation. The position is applied as a translation - in the body frame of the rigid source prim, and the rotation is applied as a rotation - in the body frame of the rigid source prim. - - """ - - cfg: ImuCfg - """The configuration parameters.""" - - def __init__(self, cfg: ImuCfg): - """Initializes the Imu sensor. - - Args: - cfg: The configuration parameters. - """ - # initialize base class - super().__init__(cfg) - # Create empty variables for storing output data - self._data = ImuData() - - # Internal: expression used to build the rigid body view (may be different from cfg.prim_path) - self._rigid_parent_expr: str | None = None - - def __str__(self) -> str: - """Returns: A string containing information about the instance.""" - return ( - f"Imu sensor @ '{self.cfg.prim_path}': \n" - f"\tview type : {self._view.__class__}\n" - f"\tupdate period (s) : {self.cfg.update_period}\n" - f"\tnumber of sensors : {self._view.count}\n" - ) - - """ - Properties - """ - - @property - def data(self) -> ImuData: - # update sensors if needed - self._update_outdated_buffers() - # return the data - return self._data - - @property - def num_instances(self) -> int: - return self._view.count - - """ - Operations - """ - - def reset(self, env_ids: Sequence[int] | None = None): - # reset the timestamps - super().reset(env_ids) - # resolve None - if env_ids is None: - env_ids = slice(None) - # reset accumulative data buffers - self._data.pos_w[env_ids] = 0.0 - self._data.quat_w[env_ids] = 0.0 - self._data.quat_w[env_ids, 0] = 1.0 - self._data.projected_gravity_b[env_ids] = 0.0 - self._data.projected_gravity_b[env_ids, 2] = -1.0 - self._data.lin_vel_b[env_ids] = 0.0 - self._data.ang_vel_b[env_ids] = 0.0 - self._data.lin_acc_b[env_ids] = 0.0 - self._data.ang_acc_b[env_ids] = 0.0 - self._prev_lin_vel_w[env_ids] = 0.0 - self._prev_ang_vel_w[env_ids] = 0.0 - - def update(self, dt: float, force_recompute: bool = False): - # save timestamp - self._dt = dt - # execute updating - super().update(dt, force_recompute) - - """ - Implementation. - """ - - def _initialize_impl(self): - """Initializes the sensor handles and internal buffers. - - - If the target prim path is a rigid body, build the view directly on it. - - Otherwise find the closest rigid-body ancestor, cache the fixed transform from that ancestor - to the target prim, and build the view on the ancestor expression. - """ - # Initialize parent class - super()._initialize_impl() - # obtain global simulation view - self._physics_sim_view = SimulationManager.get_physics_sim_view() - # check if the prim at path is a rigid prim - prim = sim_utils.find_first_matching_prim(self.cfg.prim_path) - if prim is None: - raise RuntimeError(f"Failed to find a prim at path expression: {self.cfg.prim_path}") - - # Find the first matching ancestor prim that implements rigid body API - ancestor_prim = sim_utils.get_first_matching_ancestor_prim( - prim.GetPath(), predicate=lambda _prim: _prim.HasAPI(UsdPhysics.RigidBodyAPI) - ) - if ancestor_prim is None: - raise RuntimeError(f"Failed to find a rigid body ancestor prim at path expression: {self.cfg.prim_path}") - # Convert ancestor prim path to expression - if ancestor_prim == prim: - self._rigid_parent_expr = self.cfg.prim_path - fixed_pos_b, fixed_quat_b = None, None - else: - # Convert ancestor prim path to expression - relative_path = prim.GetPath().MakeRelativePath(ancestor_prim.GetPath()).pathString - self._rigid_parent_expr = self.cfg.prim_path.replace(relative_path, "") - # Resolve the relative pose between the target prim and the ancestor prim - fixed_pos_b, fixed_quat_b = sim_utils.resolve_prim_pose(prim, ancestor_prim) - - # Create the rigid body view on the ancestor - self._view = self._physics_sim_view.create_rigid_body_view(self._rigid_parent_expr.replace(".*", "*")) - - # Get world gravity - gravity = self._physics_sim_view.get_gravity() - gravity_dir = torch.tensor((gravity[0], gravity[1], gravity[2]), device=self.device) - gravity_dir = math_utils.normalize(gravity_dir.unsqueeze(0)).squeeze(0) - self.GRAVITY_VEC_W = gravity_dir.repeat(self.num_instances, 1) - - # Create internal buffers - self._initialize_buffers_impl() - - # Compose the configured offset with the fixed ancestor->target transform (done once) - # new_offset = fixed * cfg.offset - # where composition is: p = p_fixed + R_fixed * p_cfg, q = q_fixed * q_cfg - if fixed_pos_b is not None and fixed_quat_b is not None: - # Broadcast fixed transform across instances - fixed_p = torch.tensor(fixed_pos_b, device=self._device).repeat(self._view.count, 1) - fixed_q = torch.tensor(fixed_quat_b, device=self._device).repeat(self._view.count, 1) - - cfg_p = self._offset_pos_b.clone() - cfg_q = self._offset_quat_b.clone() - - composed_p = fixed_p + math_utils.quat_apply(fixed_q, cfg_p) - composed_q = math_utils.quat_mul(fixed_q, cfg_q) - - self._offset_pos_b = composed_p - self._offset_quat_b = composed_q - - def _update_buffers_impl(self, env_ids: Sequence[int]): - """Fills the buffers of the sensor data.""" - - # default to all sensors - if len(env_ids) == self._num_envs: - env_ids = slice(None) - # world pose of the rigid source (ancestor) from the PhysX view - pos_w, quat_w = self._view.get_transforms()[env_ids].split([3, 4], dim=-1) - quat_w = quat_w.roll(1, dims=-1) - - # sensor pose in world: apply composed offset - self._data.pos_w[env_ids] = pos_w + math_utils.quat_apply(quat_w, self._offset_pos_b[env_ids]) - self._data.quat_w[env_ids] = math_utils.quat_mul(quat_w, self._offset_quat_b[env_ids]) - - # COM of rigid source (body frame) - com_pos_b = self._view.get_coms().to(self.device).split([3, 4], dim=-1)[0] - - # Velocities at rigid source COM - lin_vel_w, ang_vel_w = self._view.get_velocities()[env_ids].split([3, 3], dim=-1) - - # If sensor offset or COM != link origin, account for angular velocity contribution - lin_vel_w += torch.linalg.cross( - ang_vel_w, math_utils.quat_apply(quat_w, self._offset_pos_b[env_ids] - com_pos_b[env_ids]), dim=-1 - ) - - # numerical derivative (world frame) - lin_acc_w = (lin_vel_w - self._prev_lin_vel_w[env_ids]) / self._dt + self._gravity_bias_w[env_ids] - ang_acc_w = (ang_vel_w - self._prev_ang_vel_w[env_ids]) / self._dt - - # batch rotate world->body using current sensor orientation - dynamics_data = torch.stack((lin_vel_w, ang_vel_w, lin_acc_w, ang_acc_w, self.GRAVITY_VEC_W[env_ids]), dim=0) - dynamics_data_rot = math_utils.quat_apply_inverse(self._data.quat_w[env_ids].repeat(5, 1), dynamics_data).chunk( - 5, dim=0 - ) - # store the velocities. - self._data.lin_vel_b[env_ids] = dynamics_data_rot[0] - self._data.ang_vel_b[env_ids] = dynamics_data_rot[1] - # store the accelerations - self._data.lin_acc_b[env_ids] = dynamics_data_rot[2] - self._data.ang_acc_b[env_ids] = dynamics_data_rot[3] - # store projected gravity - self._data.projected_gravity_b[env_ids] = dynamics_data_rot[4] - - self._prev_lin_vel_w[env_ids] = lin_vel_w - self._prev_ang_vel_w[env_ids] = ang_vel_w + from isaaclab_physx.sensors.imu import Imu as PhysXImu + from isaaclab_physx.sensors.imu import ImuData as PhysXImuData - def _initialize_buffers_impl(self): - """Create buffers for storing data.""" - # data buffers - self._data.pos_w = torch.zeros(self._view.count, 3, device=self._device) - self._data.quat_w = torch.zeros(self._view.count, 4, device=self._device) - self._data.quat_w[:, 0] = 1.0 - self._data.projected_gravity_b = torch.zeros(self._view.count, 3, device=self._device) - self._data.lin_vel_b = torch.zeros_like(self._data.pos_w) - self._data.ang_vel_b = torch.zeros_like(self._data.pos_w) - self._data.lin_acc_b = torch.zeros_like(self._data.pos_w) - self._data.ang_acc_b = torch.zeros_like(self._data.pos_w) - self._prev_lin_vel_w = torch.zeros_like(self._data.pos_w) - self._prev_ang_vel_w = torch.zeros_like(self._data.pos_w) - # store sensor offset (applied relative to rigid source). - # This may be composed later with a fixed ancestor->target transform. - self._offset_pos_b = torch.tensor(list(self.cfg.offset.pos), device=self._device).repeat(self._view.count, 1) - self._offset_quat_b = torch.tensor(list(self.cfg.offset.rot), device=self._device).repeat(self._view.count, 1) - # set gravity bias - self._gravity_bias_w = torch.tensor(list(self.cfg.gravity_bias), device=self._device).repeat( - self._view.count, 1 - ) +class Imu(FactoryBase, BaseImu): + """Factory for creating IMU sensor instances.""" - def _set_debug_vis_impl(self, debug_vis: bool): - # set visibility of markers - # note: parent only deals with callbacks. not their visibility - if debug_vis: - # create markers if necessary for the first time - if not hasattr(self, "acceleration_visualizer"): - self.acceleration_visualizer = VisualizationMarkers(self.cfg.visualizer_cfg) - # set their visibility to true - self.acceleration_visualizer.set_visibility(True) - else: - if hasattr(self, "acceleration_visualizer"): - self.acceleration_visualizer.set_visibility(False) + data: BaseImuData | PhysXImuData - def _debug_vis_callback(self, event): - # safely return if view becomes invalid - # note: this invalidity happens because of isaac sim view callbacks - if self._view is None: - return - # get marker location - # -- base state - base_pos_w = self._data.pos_w.clone() - base_pos_w[:, 2] += 0.5 - # -- resolve the scales - default_scale = self.acceleration_visualizer.cfg.markers["arrow"].scale - arrow_scale = torch.tensor(default_scale, device=self.device).repeat(self._data.lin_acc_b.shape[0], 1) - # get up axis of current stage - up_axis = UsdGeom.GetStageUpAxis(self.stage) - # arrow-direction - quat_opengl = math_utils.quat_from_matrix( - math_utils.create_rotation_matrix_from_view( - self._data.pos_w, - self._data.pos_w + math_utils.quat_apply(self._data.quat_w, self._data.lin_acc_b), - up_axis=up_axis, - device=self._device, - ) - ) - quat_w = math_utils.convert_camera_frame_orientation_convention(quat_opengl, "opengl", "world") - # display markers - self.acceleration_visualizer.visualize(base_pos_w, quat_w, arrow_scale) + def __new__(cls, *args, **kwargs) -> BaseImu | PhysXImu: + """Create a new instance of an IMU sensor based on the backend.""" + return super().__new__(cls, *args, **kwargs) diff --git a/source/isaaclab/isaaclab/sensors/imu/imu_data.py b/source/isaaclab/isaaclab/sensors/imu/imu_data.py index dd06e09a8b7..a7a2fed703d 100644 --- a/source/isaaclab/isaaclab/sensors/imu/imu_data.py +++ b/source/isaaclab/isaaclab/sensors/imu/imu_data.py @@ -3,55 +3,11 @@ # # SPDX-License-Identifier: BSD-3-Clause -from __future__ import annotations +"""Re-exports the base IMU data class for backwards compatibility.""" -from dataclasses import dataclass +from .base_imu_data import BaseImuData -import torch +# Re-export for backwards compatibility +ImuData = BaseImuData - -@dataclass -class ImuData: - """Data container for the Imu sensor.""" - - pos_w: torch.Tensor = None - """Position of the sensor origin in world frame. - - Shape is (N, 3), where ``N`` is the number of environments. - """ - - quat_w: torch.Tensor = None - """Orientation of the sensor origin in quaternion ``(w, x, y, z)`` in world frame. - - Shape is (N, 4), where ``N`` is the number of environments. - """ - - projected_gravity_b: torch.Tensor = None - """Gravity direction unit vector projected on the imu frame. - - Shape is (N,3), where ``N`` is the number of environments. - """ - - lin_vel_b: torch.Tensor = None - """IMU frame angular velocity relative to the world expressed in IMU frame. - - Shape is (N, 3), where ``N`` is the number of environments. - """ - - ang_vel_b: torch.Tensor = None - """IMU frame angular velocity relative to the world expressed in IMU frame. - - Shape is (N, 3), where ``N`` is the number of environments. - """ - - lin_acc_b: torch.Tensor = None - """IMU frame linear acceleration relative to the world expressed in IMU frame. - - Shape is (N, 3), where ``N`` is the number of environments. - """ - - ang_acc_b: torch.Tensor = None - """IMU frame angular acceleration relative to the world expressed in IMU frame. - - Shape is (N, 3), where ``N`` is the number of environments. - """ +__all__ = ["BaseImuData", "ImuData"] diff --git a/source/isaaclab_physx/docs/CHANGELOG.rst b/source/isaaclab_physx/docs/CHANGELOG.rst index 39ed35abf14..08f3f939a6e 100644 --- a/source/isaaclab_physx/docs/CHANGELOG.rst +++ b/source/isaaclab_physx/docs/CHANGELOG.rst @@ -1,6 +1,38 @@ Changelog --------- +0.1.1 (2026-01-28) +~~~~~~~~~~~~~~~~~~~ + +Added +^^^^^ + +* Added :mod:`isaaclab_physx.sensors` module containing PhysX-specific sensor implementations: + + * :class:`~isaaclab_physx.sensors.ContactSensor` and :class:`~isaaclab_physx.sensors.ContactSensorData`: + PhysX-specific implementation for contact force sensing. Extends + :class:`~isaaclab.sensors.contact_sensor.BaseContactSensor` with PhysX tensor API for contact + force queries, contact filtering, and contact point tracking. + + * :class:`~isaaclab_physx.sensors.Imu` and :class:`~isaaclab_physx.sensors.ImuData`: + PhysX-specific implementation for inertial measurement unit simulation. Extends + :class:`~isaaclab.sensors.imu.BaseImu` with PhysX rigid body views for velocity and + acceleration computation. + + * :class:`~isaaclab_physx.sensors.FrameTransformer` and :class:`~isaaclab_physx.sensors.FrameTransformerData`: + PhysX-specific implementation for coordinate frame transformations. Extends + :class:`~isaaclab.sensors.frame_transformer.BaseFrameTransformer` with PhysX rigid body views + for efficient frame pose queries. + +* Added PhysX-specific sensor tests moved from ``isaaclab/test/sensors/``: + + * ``test_contact_sensor.py`` + * ``test_imu.py`` + * ``test_frame_transformer.py`` + * ``check_contact_sensor.py`` + * ``check_imu_sensor.py`` + + 0.1.0 (2026-01-28) ~~~~~~~~~~~~~~~~~~~ diff --git a/source/isaaclab_physx/isaaclab_physx/__init__.py b/source/isaaclab_physx/isaaclab_physx/__init__.py index 68a8a553287..69bbf740a59 100644 --- a/source/isaaclab_physx/isaaclab_physx/__init__.py +++ b/source/isaaclab_physx/isaaclab_physx/__init__.py @@ -17,3 +17,6 @@ # Configure the module-level variables __version__ = ISAACLAB_PHYSX_METADATA["package"]["version"] + +# Import sensors module for auto-registration with factory +from . import sensors # noqa: F401, E402 diff --git a/source/isaaclab_physx/isaaclab_physx/sensors/__init__.py b/source/isaaclab_physx/isaaclab_physx/sensors/__init__.py new file mode 100644 index 00000000000..dde43d8ce3f --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/sensors/__init__.py @@ -0,0 +1,19 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Sub-package containing PhysX-specific sensor implementations.""" + +from .contact_sensor import ContactSensor, ContactSensorData +from .frame_transformer import FrameTransformer, FrameTransformerData +from .imu import Imu, ImuData + +__all__ = [ + "ContactSensor", + "ContactSensorData", + "FrameTransformer", + "FrameTransformerData", + "Imu", + "ImuData", +] diff --git a/source/isaaclab_physx/isaaclab_physx/sensors/contact_sensor/__init__.py b/source/isaaclab_physx/isaaclab_physx/sensors/contact_sensor/__init__.py new file mode 100644 index 00000000000..54dcd41658c --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/sensors/contact_sensor/__init__.py @@ -0,0 +1,9 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Sub-module for PhysX rigid contact sensor.""" + +from .contact_sensor import ContactSensor +from .contact_sensor_data import ContactSensorData diff --git a/source/isaaclab_physx/isaaclab_physx/sensors/contact_sensor/contact_sensor.py b/source/isaaclab_physx/isaaclab_physx/sensors/contact_sensor/contact_sensor.py new file mode 100644 index 00000000000..e5ebc6c8f84 --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/sensors/contact_sensor/contact_sensor.py @@ -0,0 +1,423 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +# Ignore optional memory usage warning globally +# pyright: reportOptionalSubscript=false + +from __future__ import annotations + +from collections.abc import Sequence +from typing import TYPE_CHECKING + +import torch + +import carb +import omni.physics.tensors.impl.api as physx +from isaacsim.core.simulation_manager import SimulationManager +from pxr import PhysxSchema + +import isaaclab.sim as sim_utils +from isaaclab.markers import VisualizationMarkers +from isaaclab.sensors.contact_sensor import BaseContactSensor +from isaaclab.utils.math import convert_quat + +from .contact_sensor_data import ContactSensorData + +if TYPE_CHECKING: + from isaaclab.sensors.contact_sensor import ContactSensorCfg + + +class ContactSensor(BaseContactSensor): + """A PhysX contact reporting sensor. + + The contact sensor reports the normal contact forces on a rigid body in the world frame. + It relies on the `PhysX ContactReporter`_ API to be activated on the rigid bodies. + + To enable the contact reporter on a rigid body, please make sure to enable the + :attr:`isaaclab.sim.spawner.RigidObjectSpawnerCfg.activate_contact_sensors` on your + asset spawner configuration. This will enable the contact reporter on all the rigid bodies + in the asset. + + The sensor can be configured to report the contact forces on a set of bodies with a given + filter pattern using the :attr:`ContactSensorCfg.filter_prim_paths_expr`. This is useful + when you want to report the contact forces between the sensor bodies and a specific set of + bodies in the scene. The data can be accessed using the :attr:`ContactSensorData.force_matrix_w`. + Please check the documentation on `RigidContact`_ for more details. + + The reporting of the filtered contact forces is only possible as one-to-many. This means that only one + sensor body in an environment can be filtered against multiple bodies in that environment. If you need to + filter multiple sensor bodies against multiple bodies, you need to create separate sensors for each sensor + body. + + As an example, suppose you want to report the contact forces for all the feet of a robot against an object + exclusively. In that case, setting the :attr:`ContactSensorCfg.prim_path` and + :attr:`ContactSensorCfg.filter_prim_paths_expr` with ``{ENV_REGEX_NS}/Robot/.*_FOOT`` and ``{ENV_REGEX_NS}/Object`` + respectively will not work. Instead, you need to create a separate sensor for each foot and filter + it against the object. + + .. _PhysX ContactReporter: https://docs.omniverse.nvidia.com/kit/docs/omni_usd_schema_physics/104.2/class_physx_schema_physx_contact_report_a_p_i.html + .. _RigidContact: https://docs.isaacsim.omniverse.nvidia.com/latest/py/source/extensions/isaacsim.core.api/docs/index.html#isaacsim.core.api.sensors.RigidContactView + """ + + cfg: ContactSensorCfg + """The configuration parameters.""" + + __backend_name__: str = "physx" + """The name of the backend for the contact sensor.""" + + def __init__(self, cfg: ContactSensorCfg): + """Initializes the contact sensor object. + + Args: + cfg: The configuration parameters. + """ + # initialize base class + super().__init__(cfg) + + # Enable contact processing + carb_settings_iface = carb.settings.get_settings() + carb_settings_iface.set_bool("/physics/disableContactProcessing", False) + + # Create empty variables for storing output data + self._data: ContactSensorData = ContactSensorData() + # initialize self._body_physx_view for running in extension mode + self._body_physx_view = None + + def __str__(self) -> str: + """Returns: A string containing information about the instance.""" + return ( + f"Contact sensor @ '{self.cfg.prim_path}': \n" + f"\tview type : {self.body_physx_view.__class__}\n" + f"\tupdate period (s) : {self.cfg.update_period}\n" + f"\tnumber of bodies : {self.num_bodies}\n" + f"\tbody names : {self.body_names}\n" + ) + + """ + Properties + """ + + @property + def num_instances(self) -> int: + return self.body_physx_view.count + + @property + def data(self) -> ContactSensorData: + # update sensors if needed + self._update_outdated_buffers() + # return the data + return self._data + + @property + def num_bodies(self) -> int: + """Number of bodies with contact sensors attached.""" + return self._num_bodies + + @property + def body_names(self) -> list[str]: + """Ordered names of bodies with contact sensors attached.""" + prim_paths = self.body_physx_view.prim_paths[: self.num_bodies] + return [path.split("/")[-1] for path in prim_paths] + + @property + def body_physx_view(self) -> physx.RigidBodyView: + """View for the rigid bodies captured (PhysX). + + Note: + Use this view with caution. It requires handling of tensors in a specific way. + """ + return self._body_physx_view + + @property + def contact_physx_view(self) -> physx.RigidContactView: + """Contact reporter view for the bodies (PhysX). + + Note: + Use this view with caution. It requires handling of tensors in a specific way. + """ + return self._contact_physx_view + + """ + Operations + """ + + def reset(self, env_ids: Sequence[int] | None = None): + # reset the timers and counters + super().reset(env_ids) + # resolve None + if env_ids is None: + env_ids = slice(None) + # reset accumulative data buffers + self._data.net_forces_w[env_ids] = 0.0 + self._data.net_forces_w_history[env_ids] = 0.0 + # reset force matrix + if len(self.cfg.filter_prim_paths_expr) != 0: + self._data.force_matrix_w[env_ids] = 0.0 + self._data.force_matrix_w_history[env_ids] = 0.0 + # reset the current air time + if self.cfg.track_air_time: + self._data.current_air_time[env_ids] = 0.0 + self._data.last_air_time[env_ids] = 0.0 + self._data.current_contact_time[env_ids] = 0.0 + self._data.last_contact_time[env_ids] = 0.0 + # reset contact positions + if self.cfg.track_contact_points: + self._data.contact_pos_w[env_ids, :] = torch.nan + # reset friction forces + if self.cfg.track_friction_forces: + self._data.friction_forces_w[env_ids, :] = 0.0 + + """ + Implementation. + """ + + def _initialize_impl(self): + super()._initialize_impl() + # obtain global simulation view + self._physics_sim_view = SimulationManager.get_physics_sim_view() + # check that only rigid bodies are selected + leaf_pattern = self.cfg.prim_path.rsplit("/", 1)[-1] + template_prim_path = self._parent_prims[0].GetPath().pathString + body_names = list() + for prim in sim_utils.find_matching_prims(template_prim_path + "/" + leaf_pattern): + # check if prim has contact reporter API + if prim.HasAPI(PhysxSchema.PhysxContactReportAPI): + prim_path = prim.GetPath().pathString + body_names.append(prim_path.rsplit("/", 1)[-1]) + # check that there is at least one body with contact reporter API + if not body_names: + raise RuntimeError( + f"Sensor at path '{self.cfg.prim_path}' could not find any bodies with contact reporter API." + "\nHINT: Make sure to enable 'activate_contact_sensors' in the corresponding asset spawn configuration." + ) + + # construct regex expression for the body names + body_names_regex = r"(" + "|".join(body_names) + r")" + body_names_regex = f"{self.cfg.prim_path.rsplit('/', 1)[0]}/{body_names_regex}" + # convert regex expressions to glob expressions for PhysX + body_names_glob = body_names_regex.replace(".*", "*") + filter_prim_paths_glob = [expr.replace(".*", "*") for expr in self.cfg.filter_prim_paths_expr] + + # create a rigid prim view for the sensor + self._body_physx_view = self._physics_sim_view.create_rigid_body_view(body_names_glob) + self._contact_physx_view = self._physics_sim_view.create_rigid_contact_view( + body_names_glob, + filter_patterns=filter_prim_paths_glob, + max_contact_data_count=self.cfg.max_contact_data_count_per_prim * len(body_names) * self._num_envs, + ) + # resolve the true count of bodies + self._num_bodies = self.body_physx_view.count // self._num_envs + # check that contact reporter succeeded + if self._num_bodies != len(body_names): + raise RuntimeError( + "Failed to initialize contact reporter for specified bodies." + f"\n\tInput prim path : {self.cfg.prim_path}" + f"\n\tResolved prim paths: {body_names_regex}" + ) + + # check if filter paths are valid + if self.cfg.track_contact_points or self.cfg.track_friction_forces: + if len(self.cfg.filter_prim_paths_expr) == 0: + raise ValueError( + "The 'filter_prim_paths_expr' is empty. Please specify a valid filter pattern to track" + f" {'contact points' if self.cfg.track_contact_points else 'friction forces'}." + ) + if self.cfg.max_contact_data_count_per_prim < 1: + raise ValueError( + f"The 'max_contact_data_count_per_prim' is {self.cfg.max_contact_data_count_per_prim}. " + "Please set it to a value greater than 0 to track" + f" {'contact points' if self.cfg.track_contact_points else 'friction forces'}." + ) + + # prepare data buffers + num_filter_shapes = self.contact_physx_view.filter_count if len(self.cfg.filter_prim_paths_expr) != 0 else 0 + self._data.create_buffers( + num_envs=self._num_envs, + num_bodies=self._num_bodies, + num_filter_shapes=num_filter_shapes, + history_length=self.cfg.history_length, + track_pose=self.cfg.track_pose, + track_air_time=self.cfg.track_air_time, + track_contact_points=self.cfg.track_contact_points, + track_friction_forces=self.cfg.track_friction_forces, + device=self._device, + ) + + def _update_buffers_impl(self, env_ids: Sequence[int]): + """Fills the buffers of the sensor data.""" + # default to all sensors + if len(env_ids) == self._num_envs: + env_ids = slice(None) + + # obtain the contact forces + # TODO: We are handling the indexing ourself because of the shape; (N, B) vs expected (N * B). + # This isn't the most efficient way to do this, but it's the easiest to implement. + net_forces_w = self.contact_physx_view.get_net_contact_forces(dt=self._sim_physics_dt) + self._data.net_forces_w[env_ids, :, :] = net_forces_w.view(-1, self._num_bodies, 3)[env_ids] + # update contact force history + if self.cfg.history_length > 0: + self._data.net_forces_w_history[env_ids] = self._data.net_forces_w_history[env_ids].roll(1, dims=1) + self._data.net_forces_w_history[env_ids, 0] = self._data.net_forces_w[env_ids] + + # obtain the contact force matrix + if len(self.cfg.filter_prim_paths_expr) != 0: + # shape of the filtering matrix: (num_envs, num_bodies, num_filter_shapes, 3) + num_filters = self.contact_physx_view.filter_count + # acquire and shape the force matrix + force_matrix_w = self.contact_physx_view.get_contact_force_matrix(dt=self._sim_physics_dt) + force_matrix_w = force_matrix_w.view(-1, self._num_bodies, num_filters, 3) + self._data.force_matrix_w[env_ids] = force_matrix_w[env_ids] + if self.cfg.history_length > 0: + self._data.force_matrix_w_history[env_ids] = self._data.force_matrix_w_history[env_ids].roll(1, dims=1) + self._data.force_matrix_w_history[env_ids, 0] = self._data.force_matrix_w[env_ids] + + # obtain the pose of the sensor origin + if self.cfg.track_pose: + pose = self.body_physx_view.get_transforms().view(-1, self._num_bodies, 7)[env_ids] + pose[..., 3:] = convert_quat(pose[..., 3:], to="wxyz") + self._data.pos_w[env_ids], self._data.quat_w[env_ids] = pose.split([3, 4], dim=-1) + + # obtain contact points + if self.cfg.track_contact_points: + _, buffer_contact_points, _, _, buffer_count, buffer_start_indices = ( + self.contact_physx_view.get_contact_data(dt=self._sim_physics_dt) + ) + self._data.contact_pos_w[env_ids] = self._unpack_contact_buffer_data( + buffer_contact_points, buffer_count, buffer_start_indices + )[env_ids] + + # obtain friction forces + if self.cfg.track_friction_forces: + friction_forces, _, buffer_count, buffer_start_indices = self.contact_physx_view.get_friction_data( + dt=self._sim_physics_dt + ) + self._data.friction_forces_w[env_ids] = self._unpack_contact_buffer_data( + friction_forces, buffer_count, buffer_start_indices, avg=False, default=0.0 + )[env_ids] + + # obtain the air time + if self.cfg.track_air_time: + # -- time elapsed since last update + # since this function is called every frame, we can use the difference to get the elapsed time + elapsed_time = self._timestamp[env_ids] - self._timestamp_last_update[env_ids] + # -- check contact state of bodies + is_contact = torch.norm(self._data.net_forces_w[env_ids, :, :], dim=-1) > self.cfg.force_threshold + is_first_contact = (self._data.current_air_time[env_ids] > 0) * is_contact + is_first_detached = (self._data.current_contact_time[env_ids] > 0) * ~is_contact + # -- update the last contact time if body has just become in contact + self._data.last_air_time[env_ids] = torch.where( + is_first_contact, + self._data.current_air_time[env_ids] + elapsed_time.unsqueeze(-1), + self._data.last_air_time[env_ids], + ) + # -- increment time for bodies that are not in contact + self._data.current_air_time[env_ids] = torch.where( + ~is_contact, self._data.current_air_time[env_ids] + elapsed_time.unsqueeze(-1), 0.0 + ) + # -- update the last contact time if body has just detached + self._data.last_contact_time[env_ids] = torch.where( + is_first_detached, + self._data.current_contact_time[env_ids] + elapsed_time.unsqueeze(-1), + self._data.last_contact_time[env_ids], + ) + # -- increment time for bodies that are in contact + self._data.current_contact_time[env_ids] = torch.where( + is_contact, self._data.current_contact_time[env_ids] + elapsed_time.unsqueeze(-1), 0.0 + ) + + def _unpack_contact_buffer_data( + self, + contact_data: torch.Tensor, + buffer_count: torch.Tensor, + buffer_start_indices: torch.Tensor, + avg: bool = True, + default: float = float("nan"), + ) -> torch.Tensor: + """ + Unpacks and aggregates contact data for each (env, body, filter) group. + + This function vectorizes the following nested loop: + + for i in range(self._num_bodies * self._num_envs): + for j in range(self.contact_physx_view.filter_count): + start_index_ij = buffer_start_indices[i, j] + count_ij = buffer_count[i, j] + self._contact_position_aggregate_buffer[i, j, :] = torch.mean( + contact_data[start_index_ij : (start_index_ij + count_ij), :], dim=0 + ) + + For more details, see the `RigidContactView.get_contact_data() documentation `_. + + Args: + contact_data: Flat tensor of contact data, shape (N_envs * N_bodies, 3). + buffer_count: Number of contact points per (env, body, filter), shape (N_envs * N_bodies, N_filters). + buffer_start_indices: Start indices for each (env, body, filter), shape (N_envs * N_bodies, N_filters). + avg: If True, average the contact data for each group; if False, sum the data. Defaults to True. + default: Default value to use for groups with zero contacts. Defaults to NaN. + + Returns: + Aggregated contact data, shape (N_envs, N_bodies, N_filters, 3). + """ + counts, starts = buffer_count.view(-1), buffer_start_indices.view(-1) + n_rows, total = counts.numel(), int(counts.sum()) + agg = torch.full((n_rows, 3), default, device=self._device, dtype=contact_data.dtype) + if total > 0: + row_ids = torch.repeat_interleave(torch.arange(n_rows, device=self._device), counts) + + block_starts = counts.cumsum(0) - counts + deltas = torch.arange(row_ids.numel(), device=counts.device) - block_starts.repeat_interleave(counts) + flat_idx = starts[row_ids] + deltas + + pts = contact_data.index_select(0, flat_idx) + agg = agg.zero_().index_add_(0, row_ids, pts) + agg = agg / counts.clamp_min(1).unsqueeze(-1) if avg else agg + agg[counts == 0] = default + + return agg.view(self._num_envs * self.num_bodies, -1, 3).view( + self._num_envs, self._num_bodies, self.contact_physx_view.filter_count, 3 + ) + + def _set_debug_vis_impl(self, debug_vis: bool): + # set visibility of markers + # note: parent only deals with callbacks. not their visibility + if debug_vis: + # create markers if necessary for the first time + if not hasattr(self, "contact_visualizer"): + self.contact_visualizer = VisualizationMarkers(self.cfg.visualizer_cfg) + # set their visibility to true + self.contact_visualizer.set_visibility(True) + else: + if hasattr(self, "contact_visualizer"): + self.contact_visualizer.set_visibility(False) + + def _debug_vis_callback(self, event): + # safely return if view becomes invalid + # note: this invalidity happens because of isaac sim view callbacks + if self.body_physx_view is None: + return + # marker indices + # 0: contact, 1: no contact + net_contact_force_w = torch.norm(self._data.net_forces_w, dim=-1) + marker_indices = torch.where(net_contact_force_w > self.cfg.force_threshold, 0, 1) + # check if prim is visualized + if self.cfg.track_pose: + frame_origins: torch.Tensor = self._data.pos_w + else: + pose = self.body_physx_view.get_transforms() + frame_origins = pose.view(-1, self._num_bodies, 7)[:, :, :3] + # visualize + self.contact_visualizer.visualize(frame_origins.view(-1, 3), marker_indices=marker_indices.view(-1)) + + """ + Internal simulation callbacks. + """ + + def _invalidate_initialize_callback(self, event): + """Invalidates the scene elements.""" + # call parent + super()._invalidate_initialize_callback(event) + # set all existing views to None to invalidate them + self._body_physx_view = None + self._contact_physx_view = None diff --git a/source/isaaclab_physx/isaaclab_physx/sensors/contact_sensor/contact_sensor_data.py b/source/isaaclab_physx/isaaclab_physx/sensors/contact_sensor/contact_sensor_data.py new file mode 100644 index 00000000000..52d40ac1b09 --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/sensors/contact_sensor/contact_sensor_data.py @@ -0,0 +1,181 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from __future__ import annotations + +import torch + +from isaaclab.sensors.contact_sensor import BaseContactSensorData + + +class ContactSensorData(BaseContactSensorData): + """Data container for the PhysX contact reporting sensor.""" + + @property + def pos_w(self) -> torch.Tensor | None: + """Position of the sensor origin in world frame. Shape is (N, 3). + + None if :attr:`ContactSensorCfg.track_pose` is False. + """ + return self._pos_w + + @property + def quat_w(self) -> torch.Tensor | None: + """Orientation of the sensor origin in quaternion (w, x, y, z) in world frame. Shape is (N, 4). + + None if :attr:`ContactSensorCfg.track_pose` is False. + """ + return self._quat_w + + @property + def net_forces_w(self) -> torch.Tensor | None: + """The net normal contact forces in world frame. Shape is (N, B, 3).""" + return self._net_forces_w + + @property + def net_forces_w_history(self) -> torch.Tensor | None: + """History of net normal contact forces. Shape is (N, T, B, 3).""" + return self._net_forces_w_history + + @property + def force_matrix_w(self) -> torch.Tensor | None: + """Normal contact forces filtered between sensor and filtered bodies. Shape is (N, B, M, 3). + + None if :attr:`ContactSensorCfg.filter_prim_paths_expr` is empty. + """ + return self._force_matrix_w + + @property + def force_matrix_w_history(self) -> torch.Tensor | None: + """History of filtered contact forces. Shape is (N, T, B, M, 3). + + None if :attr:`ContactSensorCfg.filter_prim_paths_expr` is empty. + """ + return self._force_matrix_w_history + + @property + def contact_pos_w(self) -> torch.Tensor | None: + """Average position of contact points. Shape is (N, B, M, 3). + + None if :attr:`ContactSensorCfg.track_contact_points` is False. + """ + return self._contact_pos_w + + @property + def friction_forces_w(self) -> torch.Tensor | None: + """Sum of friction forces. Shape is (N, B, M, 3). + + None if :attr:`ContactSensorCfg.track_friction_forces` is False. + """ + return self._friction_forces_w + + @property + def last_air_time(self) -> torch.Tensor | None: + """Time spent in air before last contact. Shape is (N, B). + + None if :attr:`ContactSensorCfg.track_air_time` is False. + """ + return self._last_air_time + + @property + def current_air_time(self) -> torch.Tensor | None: + """Time spent in air since last detach. Shape is (N, B). + + None if :attr:`ContactSensorCfg.track_air_time` is False. + """ + return self._current_air_time + + @property + def last_contact_time(self) -> torch.Tensor | None: + """Time spent in contact before last detach. Shape is (N, B). + + None if :attr:`ContactSensorCfg.track_air_time` is False. + """ + return self._last_contact_time + + @property + def current_contact_time(self) -> torch.Tensor | None: + """Time spent in contact since last contact. Shape is (N, B). + + None if :attr:`ContactSensorCfg.track_air_time` is False. + """ + return self._current_contact_time + + def create_buffers( + self, + num_envs: int, + num_bodies: int, + num_filter_shapes: int, + history_length: int, + track_pose: bool, + track_air_time: bool, + track_contact_points: bool, + track_friction_forces: bool, + device: str, + ) -> None: + """Create internal buffers for sensor data. + + Args: + num_envs: Number of environments. + num_bodies: Number of bodies per environment. + num_filter_shapes: Number of filtered shapes for force matrix. + history_length: Length of force history buffer. + track_pose: Whether to track sensor pose. + track_air_time: Whether to track air/contact time. + track_contact_points: Whether to track contact points. + track_friction_forces: Whether to track friction forces. + device: Device for tensor storage. + """ + # Net forces (always tracked) + self._net_forces_w = torch.zeros(num_envs, num_bodies, 3, device=device) + if history_length > 0: + self._net_forces_w_history = torch.zeros(num_envs, history_length, num_bodies, 3, device=device) + else: + self._net_forces_w_history = self._net_forces_w.unsqueeze(1) + + # Force matrix (optional - only with filter) + if num_filter_shapes > 0: + self._force_matrix_w = torch.zeros(num_envs, num_bodies, num_filter_shapes, 3, device=device) + if history_length > 0: + self._force_matrix_w_history = torch.zeros( + num_envs, history_length, num_bodies, num_filter_shapes, 3, device=device + ) + else: + self._force_matrix_w_history = self._force_matrix_w.unsqueeze(1) + else: + self._force_matrix_w = None + self._force_matrix_w_history = None + + # Pose tracking (optional) + if track_pose: + self._pos_w = torch.zeros(num_envs, num_bodies, 3, device=device) + self._quat_w = torch.zeros(num_envs, num_bodies, 4, device=device) + else: + self._pos_w = None + self._quat_w = None + + # Air/contact time tracking (optional) + if track_air_time: + self._last_air_time = torch.zeros(num_envs, num_bodies, device=device) + self._current_air_time = torch.zeros(num_envs, num_bodies, device=device) + self._last_contact_time = torch.zeros(num_envs, num_bodies, device=device) + self._current_contact_time = torch.zeros(num_envs, num_bodies, device=device) + else: + self._last_air_time = None + self._current_air_time = None + self._last_contact_time = None + self._current_contact_time = None + + # Contact points (optional) + if track_contact_points: + self._contact_pos_w = torch.full((num_envs, num_bodies, num_filter_shapes, 3), torch.nan, device=device) + else: + self._contact_pos_w = None + + # Friction forces (optional) + if track_friction_forces: + self._friction_forces_w = torch.zeros(num_envs, num_bodies, num_filter_shapes, 3, device=device) + else: + self._friction_forces_w = None diff --git a/source/isaaclab_physx/isaaclab_physx/sensors/frame_transformer/__init__.py b/source/isaaclab_physx/isaaclab_physx/sensors/frame_transformer/__init__.py new file mode 100644 index 00000000000..e2b43f5d8d3 --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/sensors/frame_transformer/__init__.py @@ -0,0 +1,9 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Sub-module for PhysX frame transformer sensor.""" + +from .frame_transformer import FrameTransformer +from .frame_transformer_data import FrameTransformerData diff --git a/source/isaaclab_physx/isaaclab_physx/sensors/frame_transformer/frame_transformer.py b/source/isaaclab_physx/isaaclab_physx/sensors/frame_transformer/frame_transformer.py new file mode 100644 index 00000000000..6892b033d67 --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/sensors/frame_transformer/frame_transformer.py @@ -0,0 +1,549 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from __future__ import annotations + +import logging +import re +from collections.abc import Sequence +from typing import TYPE_CHECKING + +import torch + +from isaacsim.core.simulation_manager import SimulationManager +from pxr import UsdPhysics + +import isaaclab.sim as sim_utils +from isaaclab.markers import VisualizationMarkers +from isaaclab.sensors.frame_transformer import BaseFrameTransformer +from isaaclab.utils.math import ( + combine_frame_transforms, + convert_quat, + is_identity_pose, + normalize, + quat_from_angle_axis, + subtract_frame_transforms, +) + +from .frame_transformer_data import FrameTransformerData + +if TYPE_CHECKING: + from isaaclab.sensors.frame_transformer import FrameTransformerCfg + +# import logger +logger = logging.getLogger(__name__) + + +class FrameTransformer(BaseFrameTransformer): + """A PhysX sensor for reporting frame transforms. + + This class provides an interface for reporting the transform of one or more frames (target frames) + with respect to another frame (source frame). The source frame is specified by the user as a prim path + (:attr:`FrameTransformerCfg.prim_path`) and the target frames are specified by the user as a list of + prim paths (:attr:`FrameTransformerCfg.target_frames`). + + The source frame and target frames are assumed to be rigid bodies. The transform of the target frames + with respect to the source frame is computed by first extracting the transform of the source frame + and target frames from the physics engine and then computing the relative transform between the two. + + Additionally, the user can specify an offset for the source frame and each target frame. This is useful + for specifying the transform of the desired frame with respect to the body's center of mass, for instance. + + A common example of using this sensor is to track the position and orientation of the end effector of a + robotic manipulator. In this case, the source frame would be the body corresponding to the base frame of the + manipulator, and the target frame would be the body corresponding to the end effector. Since the end-effector is + typically a fictitious body, the user may need to specify an offset from the end-effector to the body of the + manipulator. + + """ + + cfg: FrameTransformerCfg + """The configuration parameters.""" + + __backend_name__: str = "physx" + """The name of the backend for the frame transformer sensor.""" + + def __init__(self, cfg: FrameTransformerCfg): + """Initializes the frame transformer object. + + Args: + cfg: The configuration parameters. + """ + # initialize base class + super().__init__(cfg) + # Create empty variables for storing output data + self._data: FrameTransformerData = FrameTransformerData() + + def __str__(self) -> str: + """Returns: A string containing information about the instance.""" + return ( + f"FrameTransformer @ '{self.cfg.prim_path}': \n" + f"\ttracked body frames: {[self._source_frame_body_name] + self._target_frame_body_names} \n" + f"\tnumber of envs: {self._num_envs}\n" + f"\tsource body frame: {self._source_frame_body_name}\n" + f"\ttarget frames (count: {self._target_frame_names}): {len(self._target_frame_names)}\n" + ) + + """ + Properties + """ + + @property + def data(self) -> FrameTransformerData: + # update sensors if needed + self._update_outdated_buffers() + # return the data + return self._data + + @property + def num_bodies(self) -> int: + """Returns the number of target bodies being tracked. + + Note: + This is an alias used for consistency with other sensors. Otherwise, we recommend using + :attr:`len(data.target_frame_names)` to access the number of target frames. + """ + return len(self._target_frame_body_names) + + @property + def body_names(self) -> list[str]: + """Returns the names of the target bodies being tracked. + + Note: + This is an alias used for consistency with other sensors. Otherwise, we recommend using + :attr:`data.target_frame_names` to access the target frame names. + """ + return self._target_frame_body_names + + """ + Operations + """ + + def reset(self, env_ids: Sequence[int] | None = None): + # reset the timers and counters + super().reset(env_ids) + # resolve None + if env_ids is None: + env_ids = ... + + """ + Implementation. + """ + + def _initialize_impl(self): + super()._initialize_impl() + + # resolve source frame offset + source_frame_offset_pos = torch.tensor(self.cfg.source_frame_offset.pos, device=self.device) + source_frame_offset_quat = torch.tensor(self.cfg.source_frame_offset.rot, device=self.device) + # Only need to perform offsetting of source frame if the position offsets is non-zero and rotation offset is + # not the identity quaternion for efficiency in _update_buffer_impl + self._apply_source_frame_offset = True + # Handle source frame offsets + if is_identity_pose(source_frame_offset_pos, source_frame_offset_quat): + logger.debug(f"No offset application needed for source frame as it is identity: {self.cfg.prim_path}") + self._apply_source_frame_offset = False + else: + logger.debug(f"Applying offset to source frame as it is not identity: {self.cfg.prim_path}") + # Store offsets as tensors (duplicating each env's offsets for ease of multiplication later) + self._source_frame_offset_pos = source_frame_offset_pos.unsqueeze(0).repeat(self._num_envs, 1) + self._source_frame_offset_quat = source_frame_offset_quat.unsqueeze(0).repeat(self._num_envs, 1) + + # Keep track of mapping from the rigid body name to the desired frames and prim path, + # as there may be multiple frames based upon the same body name and we don't want to + # create unnecessary views. + body_names_to_frames: dict[str, dict[str, set[str] | str]] = {} + # The offsets associated with each target frame + target_offsets: dict[str, dict[str, torch.Tensor]] = {} + # The frames whose offsets are not identity + non_identity_offset_frames: list[str] = [] + + # Only need to perform offsetting of target frame if any of the position offsets are non-zero or any of the + # rotation offsets are not the identity quaternion for efficiency in _update_buffer_impl + self._apply_target_frame_offset = False + + # Need to keep track of whether the source frame is also a target frame + self._source_is_also_target_frame = False + + # Collect all target frames, their associated body prim paths and their offsets so that we can extract + # the prim, check that it has the appropriate rigid body API in a single loop. + # First element is None because user can't specify source frame name + frames = [None] + [target_frame.name for target_frame in self.cfg.target_frames] + frame_prim_paths = [self.cfg.prim_path] + [target_frame.prim_path for target_frame in self.cfg.target_frames] + # First element is None because source frame offset is handled separately + frame_offsets = [None] + [target_frame.offset for target_frame in self.cfg.target_frames] + frame_types = ["source"] + ["target"] * len(self.cfg.target_frames) + for frame, prim_path, offset, frame_type in zip(frames, frame_prim_paths, frame_offsets, frame_types): + # Find correct prim + matching_prims = sim_utils.find_matching_prims(prim_path) + if len(matching_prims) == 0: + raise ValueError( + f"Failed to create frame transformer for frame '{frame}' with path '{prim_path}'." + " No matching prims were found." + ) + for prim in matching_prims: + # Get the prim path of the matching prim + matching_prim_path = prim.GetPath().pathString + # Check if it is a rigid prim + if not prim.HasAPI(UsdPhysics.RigidBodyAPI): + raise ValueError( + f"While resolving expression '{prim_path}' found a prim '{matching_prim_path}' which is not a" + " rigid body. The class only supports transformations between rigid bodies." + ) + + # Get the name of the body: use relative prim path for unique identification + body_name = self._get_relative_body_path(matching_prim_path) + # Use leaf name of prim path if frame name isn't specified by user + frame_name = frame if frame is not None else matching_prim_path.rsplit("/", 1)[-1] + + # Keep track of which frames are associated with which bodies + if body_name in body_names_to_frames: + body_names_to_frames[body_name]["frames"].add(frame_name) + + # This is a corner case where the source frame is also a target frame + if body_names_to_frames[body_name]["type"] == "source" and frame_type == "target": + self._source_is_also_target_frame = True + + else: + # Store the first matching prim path and the type of frame + body_names_to_frames[body_name] = { + "frames": {frame_name}, + "prim_path": matching_prim_path, + "type": frame_type, + } + + if offset is not None: + offset_pos = torch.tensor(offset.pos, device=self.device) + offset_quat = torch.tensor(offset.rot, device=self.device) + # Check if we need to apply offsets (optimized code path in _update_buffer_impl) + if not is_identity_pose(offset_pos, offset_quat): + non_identity_offset_frames.append(frame_name) + self._apply_target_frame_offset = True + target_offsets[frame_name] = {"pos": offset_pos, "quat": offset_quat} + + if not self._apply_target_frame_offset: + logger.info( + f"No offsets application needed from '{self.cfg.prim_path}' to target frames as all" + f" are identity: {frames[1:]}" + ) + else: + logger.info( + f"Offsets application needed from '{self.cfg.prim_path}' to the following target frames:" + f" {non_identity_offset_frames}" + ) + + # The names of bodies that RigidPrim will be tracking to later extract transforms from + tracked_prim_paths = [body_names_to_frames[body_name]["prim_path"] for body_name in body_names_to_frames.keys()] + tracked_body_names = [body_name for body_name in body_names_to_frames.keys()] + + body_names_regex = [tracked_prim_path.replace("env_0", "env_*") for tracked_prim_path in tracked_prim_paths] + + # obtain global simulation view + self._physics_sim_view = SimulationManager.get_physics_sim_view() + # Create a prim view for all frames and initialize it + # order of transforms coming out of view will be source frame followed by target frame(s) + self._frame_physx_view = self._physics_sim_view.create_rigid_body_view(body_names_regex) + + # Determine the order in which regex evaluated body names so we can later index into frame transforms + # by frame name correctly + all_prim_paths = self._frame_physx_view.prim_paths + + if "env_" in all_prim_paths[0]: + + def extract_env_num_and_prim_path(item: str) -> tuple[int, str]: + """Separates the environment number and prim_path from the item. + + Args: + item: The item to extract the environment number from. Assumes item is of the form + `/World/envs/env_1/blah` or `/World/envs/env_11/blah`. + Returns: + The environment number and the prim_path. + """ + match = re.search(r"env_(\d+)(.*)", item) + return (int(match.group(1)), match.group(2)) + + # Find the indices that would reorganize output to be per environment. + # We want `env_1/blah` to come before `env_11/blah` and env_1/Robot/base + # to come before env_1/Robot/foot so we need to use custom key function + self._per_env_indices = [ + index + for index, _ in sorted( + list(enumerate(all_prim_paths)), key=lambda x: extract_env_num_and_prim_path(x[1]) + ) + ] + + # Only need 0th env as the names and their ordering are the same across environments + sorted_prim_paths = [ + all_prim_paths[index] for index in self._per_env_indices if "env_0" in all_prim_paths[index] + ] + + else: + # If no environment is present, then the order of the body names is the same as the order of the + # prim paths sorted alphabetically + self._per_env_indices = [index for index, _ in sorted(enumerate(all_prim_paths), key=lambda x: x[1])] + sorted_prim_paths = [all_prim_paths[index] for index in self._per_env_indices] + + # -- target frames: use relative prim path for unique identification + self._target_frame_body_names = [self._get_relative_body_path(prim_path) for prim_path in sorted_prim_paths] + + # -- source frame: use relative prim path for unique identification + self._source_frame_body_name = self._get_relative_body_path(self.cfg.prim_path) + source_frame_index = self._target_frame_body_names.index(self._source_frame_body_name) + + # Only remove source frame from tracked bodies if it is not also a target frame + if not self._source_is_also_target_frame: + self._target_frame_body_names.remove(self._source_frame_body_name) + + # Determine indices into all tracked body frames for both source and target frames + all_ids = torch.arange(self._num_envs * len(tracked_body_names)) + self._source_frame_body_ids = torch.arange(self._num_envs) * len(tracked_body_names) + source_frame_index + + # If source frame is also a target frame, then the target frame body ids are the same as + # the source frame body ids + if self._source_is_also_target_frame: + self._target_frame_body_ids = all_ids + else: + self._target_frame_body_ids = all_ids[~torch.isin(all_ids, self._source_frame_body_ids)] + + # The name of each of the target frame(s) - either user specified or defaulted to the body name + self._target_frame_names: list[str] = [] + # The position and rotation components of target frame offsets + target_frame_offset_pos = [] + target_frame_offset_quat = [] + # Stores the indices of bodies that need to be duplicated. For instance, if body "LF_SHANK" is needed + # for 2 frames, this list enables us to duplicate the body to both frames when doing the calculations + # when updating sensor in _update_buffers_impl + duplicate_frame_indices = [] + + # Go through each body name and determine the number of duplicates we need for that frame + # and extract the offsets. This is all done to handle the case where multiple frames + # reference the same body, but have different names and/or offsets + for i, body_name in enumerate(self._target_frame_body_names): + for frame in body_names_to_frames[body_name]["frames"]: + # Only need to handle target frames here as source frame is handled separately + if frame in target_offsets: + target_frame_offset_pos.append(target_offsets[frame]["pos"]) + target_frame_offset_quat.append(target_offsets[frame]["quat"]) + self._target_frame_names.append(frame) + duplicate_frame_indices.append(i) + + # To handle multiple environments, need to expand so [0, 1, 1, 2] with 2 environments becomes + # [0, 1, 1, 2, 3, 4, 4, 5]. Again, this is a optimization to make _update_buffer_impl more efficient + duplicate_frame_indices = torch.tensor(duplicate_frame_indices, device=self.device) + if self._source_is_also_target_frame: + num_target_body_frames = len(tracked_body_names) + else: + num_target_body_frames = len(tracked_body_names) - 1 + + self._duplicate_frame_indices = torch.cat( + [duplicate_frame_indices + num_target_body_frames * env_num for env_num in range(self._num_envs)] + ) + + # Target frame offsets are only applied if at least one of the offsets are non-identity + if self._apply_target_frame_offset: + # Stack up all the frame offsets for shape (num_envs, num_frames, 3) and (num_envs, num_frames, 4) + self._target_frame_offset_pos = torch.stack(target_frame_offset_pos).repeat(self._num_envs, 1) + self._target_frame_offset_quat = torch.stack(target_frame_offset_quat).repeat(self._num_envs, 1) + + # Create data buffers + self._data.create_buffers( + num_envs=self._num_envs, + num_target_frames=len(duplicate_frame_indices), + target_frame_names=self._target_frame_names, + device=self._device, + ) + + def _update_buffers_impl(self, env_ids: Sequence[int]): + """Fills the buffers of the sensor data.""" + # default to all sensors + if len(env_ids) == self._num_envs: + env_ids = ... + + # Extract transforms from view - shape is: + # (the total number of source and target body frames being tracked * self._num_envs, 7) + transforms = self._frame_physx_view.get_transforms() + + # Reorder the transforms to be per environment as is expected of SensorData + transforms = transforms[self._per_env_indices] + + # Convert quaternions as PhysX uses xyzw form + transforms[:, 3:] = convert_quat(transforms[:, 3:], to="wxyz") + + # Process source frame transform + source_frames = transforms[self._source_frame_body_ids] + # Only apply offset if the offsets will result in a coordinate frame transform + if self._apply_source_frame_offset: + source_pos_w, source_quat_w = combine_frame_transforms( + source_frames[:, :3], + source_frames[:, 3:], + self._source_frame_offset_pos, + self._source_frame_offset_quat, + ) + else: + source_pos_w = source_frames[:, :3] + source_quat_w = source_frames[:, 3:] + + # Process target frame transforms + target_frames = transforms[self._target_frame_body_ids] + duplicated_target_frame_pos_w = target_frames[self._duplicate_frame_indices, :3] + duplicated_target_frame_quat_w = target_frames[self._duplicate_frame_indices, 3:] + + # Only apply offset if the offsets will result in a coordinate frame transform + if self._apply_target_frame_offset: + target_pos_w, target_quat_w = combine_frame_transforms( + duplicated_target_frame_pos_w, + duplicated_target_frame_quat_w, + self._target_frame_offset_pos, + self._target_frame_offset_quat, + ) + else: + target_pos_w = duplicated_target_frame_pos_w + target_quat_w = duplicated_target_frame_quat_w + + # Compute the transform of the target frame with respect to the source frame + total_num_frames = len(self._target_frame_names) + target_pos_source, target_quat_source = subtract_frame_transforms( + source_pos_w.unsqueeze(1).expand(-1, total_num_frames, -1).reshape(-1, 3), + source_quat_w.unsqueeze(1).expand(-1, total_num_frames, -1).reshape(-1, 4), + target_pos_w, + target_quat_w, + ) + + # Update buffers + # note: The frame names / ordering don't change so no need to update them after initialization + self._data.source_pos_w[:] = source_pos_w.view(-1, 3) + self._data.source_quat_w[:] = source_quat_w.view(-1, 4) + self._data.target_pos_w[:] = target_pos_w.view(-1, total_num_frames, 3) + self._data.target_quat_w[:] = target_quat_w.view(-1, total_num_frames, 4) + self._data.target_pos_source[:] = target_pos_source.view(-1, total_num_frames, 3) + self._data.target_quat_source[:] = target_quat_source.view(-1, total_num_frames, 4) + + def _set_debug_vis_impl(self, debug_vis: bool): + # set visibility of markers + # note: parent only deals with callbacks. not their visibility + if debug_vis: + if not hasattr(self, "frame_visualizer"): + self.frame_visualizer = VisualizationMarkers(self.cfg.visualizer_cfg) + + # set their visibility to true + self.frame_visualizer.set_visibility(True) + else: + if hasattr(self, "frame_visualizer"): + self.frame_visualizer.set_visibility(False) + + def _debug_vis_callback(self, event): + # Get the all frames pose + frames_pos = torch.cat([self._data.source_pos_w, self._data.target_pos_w.view(-1, 3)], dim=0) + frames_quat = torch.cat([self._data.source_quat_w, self._data.target_quat_w.view(-1, 4)], dim=0) + + # Get the all connecting lines between frames pose + lines_pos, lines_quat, lines_length = self._get_connecting_lines( + start_pos=self._data.source_pos_w.repeat_interleave(self._data.target_pos_w.size(1), dim=0), + end_pos=self._data.target_pos_w.view(-1, 3), + ) + + # Initialize default (identity) scales and marker indices for all markers (frames + lines) + marker_scales = torch.ones(frames_pos.size(0) + lines_pos.size(0), 3) + marker_indices = torch.zeros(marker_scales.size(0)) + + # Set the z-scale of line markers to represent their actual length + marker_scales[-lines_length.size(0) :, -1] = lines_length + + # Assign marker config index 1 to line markers + marker_indices[-lines_length.size(0) :] = 1 + + # Update the frame and the connecting line visualizer + self.frame_visualizer.visualize( + translations=torch.cat((frames_pos, lines_pos), dim=0), + orientations=torch.cat((frames_quat, lines_quat), dim=0), + scales=marker_scales, + marker_indices=marker_indices, + ) + + """ + Internal simulation callbacks. + """ + + def _invalidate_initialize_callback(self, event): + """Invalidates the scene elements.""" + # call parent + super()._invalidate_initialize_callback(event) + # set all existing views to None to invalidate them + self._frame_physx_view = None + + """ + Internal helpers. + """ + + def _get_connecting_lines( + self, start_pos: torch.Tensor, end_pos: torch.Tensor + ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + """Draws connecting lines between frames. + + Given start and end points, this function computes the positions (mid-point), orientations, + and lengths of the connecting lines. + + Args: + start_pos: The start positions of the connecting lines. Shape is (N, 3). + end_pos: The end positions of the connecting lines. Shape is (N, 3). + + Returns: + A tuple containing: + - The positions of each connecting line. Shape is (N, 3). + - The orientations of each connecting line in quaternion. Shape is (N, 4). + - The lengths of each connecting line. Shape is (N,). + """ + direction = end_pos - start_pos + lengths = torch.norm(direction, dim=-1) + positions = (start_pos + end_pos) / 2 + + # Get default direction (along z-axis) + default_direction = torch.tensor([0.0, 0.0, 1.0], device=self.device).expand(start_pos.size(0), -1) + + # Normalize direction vector + direction_norm = normalize(direction) + + # Calculate rotation from default direction to target direction + rotation_axis = torch.linalg.cross(default_direction, direction_norm) + rotation_axis_norm = torch.norm(rotation_axis, dim=-1) + + # Handle case where vectors are parallel + mask = rotation_axis_norm > 1e-6 + rotation_axis = torch.where( + mask.unsqueeze(-1), + normalize(rotation_axis), + torch.tensor([1.0, 0.0, 0.0], device=self.device).expand(start_pos.size(0), -1), + ) + + # Calculate rotation angle + cos_angle = torch.sum(default_direction * direction_norm, dim=-1) + cos_angle = torch.clamp(cos_angle, -1.0, 1.0) + angle = torch.acos(cos_angle) + orientations = quat_from_angle_axis(angle, rotation_axis) + + return positions, orientations, lengths + + @staticmethod + def _get_relative_body_path(prim_path: str) -> str: + """Extract a normalized body path from a prim path. + + Removes the environment instance segment `/envs/env_/` to normalize paths + across multiple environments, while preserving the `/envs/` prefix to + distinguish environment-scoped paths from non-environment paths. + + Examples: + - '/World/envs/env_0/Robot/torso' -> '/World/envs/Robot/torso' + - '/World/envs/env_123/Robot/left_hand' -> '/World/envs/Robot/left_hand' + - '/World/Robot' -> '/World/Robot' + - '/World/Robot_2/left_hand' -> '/World/Robot_2/left_hand' + + Args: + prim_path: The full prim path. + + Returns: + The prim path with `/envs/env_/` removed, preserving `/envs/`. + """ + pattern = re.compile(r"/envs/env_[^/]+/") + return pattern.sub("/envs/", prim_path) diff --git a/source/isaaclab_physx/isaaclab_physx/sensors/frame_transformer/frame_transformer_data.py b/source/isaaclab_physx/isaaclab_physx/sensors/frame_transformer/frame_transformer_data.py new file mode 100644 index 00000000000..59c476cb204 --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/sensors/frame_transformer/frame_transformer_data.py @@ -0,0 +1,72 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from __future__ import annotations + +import torch + +from isaaclab.sensors.frame_transformer import BaseFrameTransformerData + + +class FrameTransformerData(BaseFrameTransformerData): + """Data container for the PhysX frame transformer sensor.""" + + @property + def target_frame_names(self) -> list[str]: + """Target frame names (order matches data ordering).""" + return self._target_frame_names + + @property + def target_pos_source(self) -> torch.Tensor: + """Position of target frame(s) relative to source frame. Shape is (N, M, 3).""" + return self._target_pos_source + + @property + def target_quat_source(self) -> torch.Tensor: + """Orientation of target frame(s) relative to source frame (w, x, y, z). Shape is (N, M, 4).""" + return self._target_quat_source + + @property + def target_pos_w(self) -> torch.Tensor: + """Position of target frame(s) after offset in world frame. Shape is (N, M, 3).""" + return self._target_pos_w + + @property + def target_quat_w(self) -> torch.Tensor: + """Orientation of target frame(s) after offset in world frame (w, x, y, z). Shape is (N, M, 4).""" + return self._target_quat_w + + @property + def source_pos_w(self) -> torch.Tensor: + """Position of source frame after offset in world frame. Shape is (N, 3).""" + return self._source_pos_w + + @property + def source_quat_w(self) -> torch.Tensor: + """Orientation of source frame after offset in world frame (w, x, y, z). Shape is (N, 4).""" + return self._source_quat_w + + def create_buffers( + self, + num_envs: int, + num_target_frames: int, + target_frame_names: list[str], + device: str, + ) -> None: + """Create internal buffers for sensor data. + + Args: + num_envs: Number of environments. + num_target_frames: Number of target frames. + target_frame_names: Names of target frames. + device: Device for tensor storage. + """ + self._target_frame_names = target_frame_names + self._source_pos_w = torch.zeros(num_envs, 3, device=device) + self._source_quat_w = torch.zeros(num_envs, 4, device=device) + self._target_pos_w = torch.zeros(num_envs, num_target_frames, 3, device=device) + self._target_quat_w = torch.zeros(num_envs, num_target_frames, 4, device=device) + self._target_pos_source = torch.zeros(num_envs, num_target_frames, 3, device=device) + self._target_quat_source = torch.zeros(num_envs, num_target_frames, 4, device=device) diff --git a/source/isaaclab_physx/isaaclab_physx/sensors/imu/__init__.py b/source/isaaclab_physx/isaaclab_physx/sensors/imu/__init__.py new file mode 100644 index 00000000000..474e88f9e29 --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/sensors/imu/__init__.py @@ -0,0 +1,9 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Sub-module for PhysX IMU sensor.""" + +from .imu import Imu +from .imu_data import ImuData diff --git a/source/isaaclab_physx/isaaclab_physx/sensors/imu/imu.py b/source/isaaclab_physx/isaaclab_physx/sensors/imu/imu.py new file mode 100644 index 00000000000..a9d6a3e1e11 --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/sensors/imu/imu.py @@ -0,0 +1,297 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from __future__ import annotations + +from collections.abc import Sequence +from typing import TYPE_CHECKING + +import torch + +from isaacsim.core.simulation_manager import SimulationManager +from pxr import UsdGeom, UsdPhysics + +import isaaclab.sim as sim_utils +import isaaclab.utils.math as math_utils +from isaaclab.markers import VisualizationMarkers +from isaaclab.sensors.imu import BaseImu + +from .imu_data import ImuData + +if TYPE_CHECKING: + from isaaclab.sensors.imu import ImuCfg + + +class Imu(BaseImu): + """The PhysX Inertia Measurement Unit (IMU) sensor. + + The sensor can be attached to any prim path with a rigid ancestor in its tree and produces body-frame + linear acceleration and angular velocity, along with world-frame pose and body-frame linear and angular + accelerations/velocities. + + If the provided path is not a rigid body, the closest rigid-body ancestor is used for simulation queries. + The fixed transform from that ancestor to the target prim is computed once during initialization and + composed with the configured sensor offset. + + .. note:: + + We are computing the accelerations using numerical differentiation from the velocities. Consequently, the + IMU sensor accuracy depends on the chosen phsyx timestep. For a sufficient accuracy, we recommend to keep the + timestep at least as 200Hz. + + .. note:: + + The user can configure the sensor offset in the configuration file. The offset is applied relative to the + rigid source prim. If the target prim is not a rigid body, the offset is composed with the fixed transform + from the rigid ancestor to the target prim. The offset is applied in the body frame of the rigid source prim. + The offset is defined as a position vector and a quaternion rotation, which + are applied in the order: position, then rotation. The position is applied as a translation + in the body frame of the rigid source prim, and the rotation is applied as a rotation + in the body frame of the rigid source prim. + + """ + + cfg: ImuCfg + """The configuration parameters.""" + + __backend_name__: str = "physx" + """The name of the backend for the IMU sensor.""" + + def __init__(self, cfg: ImuCfg): + """Initializes the Imu sensor. + + Args: + cfg: The configuration parameters. + """ + # initialize base class + super().__init__(cfg) + # Create empty variables for storing output data + self._data = ImuData() + + # Internal: expression used to build the rigid body view (may be different from cfg.prim_path) + self._rigid_parent_expr: str | None = None + + def __str__(self) -> str: + """Returns: A string containing information about the instance.""" + return ( + f"Imu sensor @ '{self.cfg.prim_path}': \n" + f"\tview type : {self._view.__class__}\n" + f"\tupdate period (s) : {self.cfg.update_period}\n" + f"\tnumber of sensors : {self._view.count}\n" + ) + + """ + Properties + """ + + @property + def data(self) -> ImuData: + # update sensors if needed + self._update_outdated_buffers() + # return the data + return self._data + + @property + def num_instances(self) -> int: + return self._view.count + + """ + Operations + """ + + def reset(self, env_ids: Sequence[int] | None = None): + # reset the timestamps + super().reset(env_ids) + # resolve None + if env_ids is None: + env_ids = slice(None) + # reset accumulative data buffers + self._data.pos_w[env_ids] = 0.0 + self._data.quat_w[env_ids] = 0.0 + self._data.quat_w[env_ids, 0] = 1.0 + self._data.projected_gravity_b[env_ids] = 0.0 + self._data.projected_gravity_b[env_ids, 2] = -1.0 + self._data.lin_vel_b[env_ids] = 0.0 + self._data.ang_vel_b[env_ids] = 0.0 + self._data.lin_acc_b[env_ids] = 0.0 + self._data.ang_acc_b[env_ids] = 0.0 + self._prev_lin_vel_w[env_ids] = 0.0 + self._prev_ang_vel_w[env_ids] = 0.0 + + def update(self, dt: float, force_recompute: bool = False): + # save timestamp + self._dt = dt + # execute updating + super().update(dt, force_recompute) + + """ + Implementation. + """ + + def _initialize_impl(self): + """Initializes the sensor handles and internal buffers. + + - If the target prim path is a rigid body, build the view directly on it. + - Otherwise find the closest rigid-body ancestor, cache the fixed transform from that ancestor + to the target prim, and build the view on the ancestor expression. + """ + # Initialize parent class + super()._initialize_impl() + # obtain global simulation view + self._physics_sim_view = SimulationManager.get_physics_sim_view() + # check if the prim at path is a rigid prim + prim = sim_utils.find_first_matching_prim(self.cfg.prim_path) + if prim is None: + raise RuntimeError(f"Failed to find a prim at path expression: {self.cfg.prim_path}") + + # Find the first matching ancestor prim that implements rigid body API + ancestor_prim = sim_utils.get_first_matching_ancestor_prim( + prim.GetPath(), predicate=lambda _prim: _prim.HasAPI(UsdPhysics.RigidBodyAPI) + ) + if ancestor_prim is None: + raise RuntimeError(f"Failed to find a rigid body ancestor prim at path expression: {self.cfg.prim_path}") + # Convert ancestor prim path to expression + if ancestor_prim == prim: + self._rigid_parent_expr = self.cfg.prim_path + fixed_pos_b, fixed_quat_b = None, None + else: + # Convert ancestor prim path to expression + relative_path = prim.GetPath().MakeRelativePath(ancestor_prim.GetPath()).pathString + self._rigid_parent_expr = self.cfg.prim_path.replace(relative_path, "") + # Resolve the relative pose between the target prim and the ancestor prim + fixed_pos_b, fixed_quat_b = sim_utils.resolve_prim_pose(prim, ancestor_prim) + + # Create the rigid body view on the ancestor + self._view = self._physics_sim_view.create_rigid_body_view(self._rigid_parent_expr.replace(".*", "*")) + + # Get world gravity + gravity = self._physics_sim_view.get_gravity() + gravity_dir = torch.tensor((gravity[0], gravity[1], gravity[2]), device=self.device) + gravity_dir = math_utils.normalize(gravity_dir.unsqueeze(0)).squeeze(0) + self.GRAVITY_VEC_W = gravity_dir.repeat(self.num_instances, 1) + + # Create internal buffers + self._initialize_buffers_impl() + + # Compose the configured offset with the fixed ancestor->target transform (done once) + # new_offset = fixed * cfg.offset + # where composition is: p = p_fixed + R_fixed * p_cfg, q = q_fixed * q_cfg + if fixed_pos_b is not None and fixed_quat_b is not None: + # Broadcast fixed transform across instances + fixed_p = torch.tensor(fixed_pos_b, device=self._device).repeat(self._view.count, 1) + fixed_q = torch.tensor(fixed_quat_b, device=self._device).repeat(self._view.count, 1) + + cfg_p = self._offset_pos_b.clone() + cfg_q = self._offset_quat_b.clone() + + composed_p = fixed_p + math_utils.quat_apply(fixed_q, cfg_p) + composed_q = math_utils.quat_mul(fixed_q, cfg_q) + + self._offset_pos_b = composed_p + self._offset_quat_b = composed_q + + def _update_buffers_impl(self, env_ids: Sequence[int]): + """Fills the buffers of the sensor data.""" + + # default to all sensors + if len(env_ids) == self._num_envs: + env_ids = slice(None) + # world pose of the rigid source (ancestor) from the PhysX view + pos_w, quat_w = self._view.get_transforms()[env_ids].split([3, 4], dim=-1) + quat_w = quat_w.roll(1, dims=-1) + + # sensor pose in world: apply composed offset + self._data.pos_w[env_ids] = pos_w + math_utils.quat_apply(quat_w, self._offset_pos_b[env_ids]) + self._data.quat_w[env_ids] = math_utils.quat_mul(quat_w, self._offset_quat_b[env_ids]) + + # COM of rigid source (body frame) + com_pos_b = self._view.get_coms().to(self.device).split([3, 4], dim=-1)[0] + + # Velocities at rigid source COM + lin_vel_w, ang_vel_w = self._view.get_velocities()[env_ids].split([3, 3], dim=-1) + + # If sensor offset or COM != link origin, account for angular velocity contribution + lin_vel_w += torch.linalg.cross( + ang_vel_w, math_utils.quat_apply(quat_w, self._offset_pos_b[env_ids] - com_pos_b[env_ids]), dim=-1 + ) + + # numerical derivative (world frame) + lin_acc_w = (lin_vel_w - self._prev_lin_vel_w[env_ids]) / self._dt + self._gravity_bias_w[env_ids] + ang_acc_w = (ang_vel_w - self._prev_ang_vel_w[env_ids]) / self._dt + + # batch rotate world->body using current sensor orientation + dynamics_data = torch.stack((lin_vel_w, ang_vel_w, lin_acc_w, ang_acc_w, self.GRAVITY_VEC_W[env_ids]), dim=0) + dynamics_data_rot = math_utils.quat_apply_inverse(self._data.quat_w[env_ids].repeat(5, 1), dynamics_data).chunk( + 5, dim=0 + ) + # store the velocities. + self._data.lin_vel_b[env_ids] = dynamics_data_rot[0] + self._data.ang_vel_b[env_ids] = dynamics_data_rot[1] + # store the accelerations + self._data.lin_acc_b[env_ids] = dynamics_data_rot[2] + self._data.ang_acc_b[env_ids] = dynamics_data_rot[3] + # store projected gravity + self._data.projected_gravity_b[env_ids] = dynamics_data_rot[4] + + self._prev_lin_vel_w[env_ids] = lin_vel_w + self._prev_ang_vel_w[env_ids] = ang_vel_w + + def _initialize_buffers_impl(self): + """Create buffers for storing data.""" + # Create data buffers via data class + self._data.create_buffers(num_envs=self._view.count, device=self._device) + + # Sensor-internal buffers for velocity tracking (not exposed via data) + self._prev_lin_vel_w = torch.zeros(self._view.count, 3, device=self._device) + self._prev_ang_vel_w = torch.zeros(self._view.count, 3, device=self._device) + + # Store sensor offset (applied relative to rigid source). + # This may be composed later with a fixed ancestor->target transform. + self._offset_pos_b = torch.tensor(list(self.cfg.offset.pos), device=self._device).repeat(self._view.count, 1) + self._offset_quat_b = torch.tensor(list(self.cfg.offset.rot), device=self._device).repeat(self._view.count, 1) + # Set gravity bias + self._gravity_bias_w = torch.tensor(list(self.cfg.gravity_bias), device=self._device).repeat( + self._view.count, 1 + ) + + def _set_debug_vis_impl(self, debug_vis: bool): + # set visibility of markers + # note: parent only deals with callbacks. not their visibility + if debug_vis: + # create markers if necessary for the first time + if not hasattr(self, "acceleration_visualizer"): + self.acceleration_visualizer = VisualizationMarkers(self.cfg.visualizer_cfg) + # set their visibility to true + self.acceleration_visualizer.set_visibility(True) + else: + if hasattr(self, "acceleration_visualizer"): + self.acceleration_visualizer.set_visibility(False) + + def _debug_vis_callback(self, event): + # safely return if view becomes invalid + # note: this invalidity happens because of isaac sim view callbacks + if self._view is None: + return + # get marker location + # -- base state + base_pos_w = self._data.pos_w.clone() + base_pos_w[:, 2] += 0.5 + # -- resolve the scales + default_scale = self.acceleration_visualizer.cfg.markers["arrow"].scale + arrow_scale = torch.tensor(default_scale, device=self.device).repeat(self._data.lin_acc_b.shape[0], 1) + # get up axis of current stage + up_axis = UsdGeom.GetStageUpAxis(self.stage) + # arrow-direction + quat_opengl = math_utils.quat_from_matrix( + math_utils.create_rotation_matrix_from_view( + self._data.pos_w, + self._data.pos_w + math_utils.quat_apply(self._data.quat_w, self._data.lin_acc_b), + up_axis=up_axis, + device=self._device, + ) + ) + quat_w = math_utils.convert_camera_frame_orientation_convention(quat_opengl, "opengl", "world") + # display markers + self.acceleration_visualizer.visualize(base_pos_w, quat_w, arrow_scale) diff --git a/source/isaaclab_physx/isaaclab_physx/sensors/imu/imu_data.py b/source/isaaclab_physx/isaaclab_physx/sensors/imu/imu_data.py new file mode 100644 index 00000000000..18de813ac5f --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/sensors/imu/imu_data.py @@ -0,0 +1,65 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from __future__ import annotations + +import torch + +from isaaclab.sensors.imu import BaseImuData + + +class ImuData(BaseImuData): + """Data container for the PhysX Imu sensor.""" + + @property + def pos_w(self) -> torch.Tensor: + """Position of the sensor origin in world frame. Shape is (N, 3).""" + return self._pos_w + + @property + def quat_w(self) -> torch.Tensor: + """Orientation of the sensor origin in quaternion (w, x, y, z) in world frame. Shape is (N, 4).""" + return self._quat_w + + @property + def projected_gravity_b(self) -> torch.Tensor: + """Gravity direction unit vector projected on the imu frame. Shape is (N, 3).""" + return self._projected_gravity_b + + @property + def lin_vel_b(self) -> torch.Tensor: + """IMU frame linear velocity relative to the world expressed in IMU frame. Shape is (N, 3).""" + return self._lin_vel_b + + @property + def ang_vel_b(self) -> torch.Tensor: + """IMU frame angular velocity relative to the world expressed in IMU frame. Shape is (N, 3).""" + return self._ang_vel_b + + @property + def lin_acc_b(self) -> torch.Tensor: + """IMU frame linear acceleration relative to the world expressed in IMU frame. Shape is (N, 3).""" + return self._lin_acc_b + + @property + def ang_acc_b(self) -> torch.Tensor: + """IMU frame angular acceleration relative to the world expressed in IMU frame. Shape is (N, 3).""" + return self._ang_acc_b + + def create_buffers(self, num_envs: int, device: str) -> None: + """Create internal buffers for sensor data. + + Args: + num_envs: Number of environments. + device: Device for tensor storage. + """ + self._pos_w = torch.zeros(num_envs, 3, device=device) + self._quat_w = torch.zeros(num_envs, 4, device=device) + self._quat_w[:, 0] = 1.0 + self._projected_gravity_b = torch.zeros(num_envs, 3, device=device) + self._lin_vel_b = torch.zeros(num_envs, 3, device=device) + self._ang_vel_b = torch.zeros(num_envs, 3, device=device) + self._lin_acc_b = torch.zeros(num_envs, 3, device=device) + self._ang_acc_b = torch.zeros(num_envs, 3, device=device) diff --git a/source/isaaclab/test/sensors/check_contact_sensor.py b/source/isaaclab_physx/test/sensors/check_contact_sensor.py similarity index 100% rename from source/isaaclab/test/sensors/check_contact_sensor.py rename to source/isaaclab_physx/test/sensors/check_contact_sensor.py diff --git a/source/isaaclab/test/sensors/check_imu_sensor.py b/source/isaaclab_physx/test/sensors/check_imu_sensor.py similarity index 100% rename from source/isaaclab/test/sensors/check_imu_sensor.py rename to source/isaaclab_physx/test/sensors/check_imu_sensor.py diff --git a/source/isaaclab/test/sensors/test_contact_sensor.py b/source/isaaclab_physx/test/sensors/test_contact_sensor.py similarity index 100% rename from source/isaaclab/test/sensors/test_contact_sensor.py rename to source/isaaclab_physx/test/sensors/test_contact_sensor.py diff --git a/source/isaaclab/test/sensors/test_frame_transformer.py b/source/isaaclab_physx/test/sensors/test_frame_transformer.py similarity index 100% rename from source/isaaclab/test/sensors/test_frame_transformer.py rename to source/isaaclab_physx/test/sensors/test_frame_transformer.py diff --git a/source/isaaclab/test/sensors/test_imu.py b/source/isaaclab_physx/test/sensors/test_imu.py similarity index 100% rename from source/isaaclab/test/sensors/test_imu.py rename to source/isaaclab_physx/test/sensors/test_imu.py diff --git a/source/isaaclab/test/sensors/urdfs/simple_2_link.urdf b/source/isaaclab_physx/test/sensors/urdfs/simple_2_link.urdf similarity index 100% rename from source/isaaclab/test/sensors/urdfs/simple_2_link.urdf rename to source/isaaclab_physx/test/sensors/urdfs/simple_2_link.urdf From 28f62c930cde22027cf30e424d5075afa9598946 Mon Sep 17 00:00:00 2001 From: Antoine Richard Date: Thu, 29 Jan 2026 11:00:19 +0100 Subject: [PATCH 20/38] Iterating. --- .../assets/articulation/base_articulation.py | 9 +++++---- .../articulation/base_articulation_data.py | 2 +- source/isaaclab/isaaclab/assets/asset_base.py | 6 +++--- .../assets/rigid_object/base_rigid_object.py | 4 ++-- .../rigid_object/base_rigid_object_data.py | 2 +- .../base_rigid_object_collection.py | 4 ++-- .../base_rigid_object_collection_data.py | 2 +- .../isaaclab/isaaclab/sensors/camera/camera.py | 2 +- .../isaaclab/sensors/camera/camera_cfg.py | 2 +- .../contact_sensor/base_contact_sensor.py | 16 ++++++++++++++-- .../contact_sensor/base_contact_sensor_data.py | 6 ++++++ .../base_frame_transformer.py | 4 ++-- .../base_frame_transformer_data.py | 18 ++++++++++++++++++ .../frame_transformer/frame_transformer_cfg.py | 2 +- .../ray_caster/patterns/patterns_cfg.py | 4 ++-- .../sensors/ray_caster/ray_caster_cfg.py | 4 ++-- .../isaaclab/isaaclab/sensors/sensor_base.py | 4 ++-- .../deformable_object/deformable_object.py | 6 +++--- .../deformable_object/deformable_object_cfg.py | 2 +- .../assets/rigid_object/rigid_object.py | 4 ++-- .../rigid_object_collection.py | 6 +++--- .../assets/surface_gripper/surface_gripper.py | 8 ++++---- .../sensors/contact_sensor/contact_sensor.py | 6 +++--- .../contact_sensor/contact_sensor_data.py | 5 +++++ .../frame_transformer/frame_transformer.py | 4 ++-- .../frame_transformer_data.py | 15 +++++++++++++++ 26 files changed, 102 insertions(+), 45 deletions(-) diff --git a/source/isaaclab/isaaclab/assets/articulation/base_articulation.py b/source/isaaclab/isaaclab/assets/articulation/base_articulation.py index a614705ed3d..6d75d2d32cb 100644 --- a/source/isaaclab/isaaclab/assets/articulation/base_articulation.py +++ b/source/isaaclab/isaaclab/assets/articulation/base_articulation.py @@ -166,7 +166,7 @@ def body_names(self) -> list[str]: def root_view(self): """Root view for the asset. - Note: + .. note:: Use this view with caution. It requires handling of tensors in a specific way. """ raise NotImplementedError() @@ -203,7 +203,7 @@ def write_data_to_sim(self) -> None: If any explicit actuators are present, then the actuator models are used to compute the joint commands. Otherwise, the joint commands are directly set into the simulation. - Note: + .. note:: We write external wrench to the simulation here since this function is called before the simulation step. This ensures that the external wrench is applied at every simulation step. """ @@ -1107,7 +1107,7 @@ def _apply_actuator_model(self) -> None: def _validate_cfg(self) -> None: """Validate the configuration after processing. - Note: + .. note:: This function should be called only after the configuration has been processed and the buffers have been created. Otherwise, some settings that are altered during processing may not be validated. For instance, the actuator models may change the joint max velocity limits. @@ -1118,7 +1118,8 @@ def _validate_cfg(self) -> None: def _log_articulation_info(self) -> None: """Log information about the articulation. - Note: We purposefully read the values from the simulator to ensure that the values are configured as expected. + .. note:: + We purposefully read the values from the simulator to ensure that the values are configured as expected. """ raise NotImplementedError() diff --git a/source/isaaclab/isaaclab/assets/articulation/base_articulation_data.py b/source/isaaclab/isaaclab/assets/articulation/base_articulation_data.py index 323692df243..5e306e1180f 100644 --- a/source/isaaclab/isaaclab/assets/articulation/base_articulation_data.py +++ b/source/isaaclab/isaaclab/assets/articulation/base_articulation_data.py @@ -590,7 +590,7 @@ def projected_gravity_b(self) -> torch.Tensor: def heading_w(self) -> torch.Tensor: """Yaw heading of the base frame (in radians). Shape is (num_instances,). - Note: + .. note:: This quantity is computed by assuming that the forward-direction of the base frame is along x-direction, i.e. :math:`(1, 0, 0)`. """ diff --git a/source/isaaclab/isaaclab/assets/asset_base.py b/source/isaaclab/isaaclab/assets/asset_base.py index 158b6993351..8a7fecedbb8 100644 --- a/source/isaaclab/isaaclab/assets/asset_base.py +++ b/source/isaaclab/isaaclab/assets/asset_base.py @@ -155,7 +155,7 @@ def set_visibility(self, visible: bool, env_ids: Sequence[int] | None = None): It is useful for toggling the visibility of the asset in the simulator. For instance, one can hide the asset when it is not being used to reduce the rendering overhead. - Note: + .. note:: This operation uses the PXR API to set the visibility of the prims. Thus, the operation may have an overhead if the number of prims is large. @@ -304,7 +304,7 @@ def safe_callback(callback_name, event, obj_ref): def _initialize_callback(self, event): """Initializes the scene elements. - Note: + .. note:: PhysX handles are only enabled once the simulator starts playing. Hence, this function needs to be called whenever the simulator "plays" from a "stop" state. """ @@ -334,7 +334,7 @@ def _on_prim_deletion(self, prim_path: str) -> None: Args: prim_path: The path to the prim that is being deleted. - Note: + .. note:: This function is called when the prim is deleted. """ if prim_path == "/": diff --git a/source/isaaclab/isaaclab/assets/rigid_object/base_rigid_object.py b/source/isaaclab/isaaclab/assets/rigid_object/base_rigid_object.py index 7308bceea92..34ca3c4ad7e 100644 --- a/source/isaaclab/isaaclab/assets/rigid_object/base_rigid_object.py +++ b/source/isaaclab/isaaclab/assets/rigid_object/base_rigid_object.py @@ -89,7 +89,7 @@ def body_names(self) -> list[str]: def root_view(self): """Root view for the asset. - Note: + .. note:: Use this view with caution. It requires handling of tensors in a specific way. """ raise NotImplementedError() @@ -123,7 +123,7 @@ def reset(self, env_ids: Sequence[int] | None = None) -> None: def write_data_to_sim(self) -> None: """Write external wrench to the simulation. - Note: + .. note:: We write external wrench to the simulation here since this function is called before the simulation step. This ensures that the external wrench is applied at every simulation step. """ diff --git a/source/isaaclab/isaaclab/assets/rigid_object/base_rigid_object_data.py b/source/isaaclab/isaaclab/assets/rigid_object/base_rigid_object_data.py index 4d8a16151a6..849e648de57 100644 --- a/source/isaaclab/isaaclab/assets/rigid_object/base_rigid_object_data.py +++ b/source/isaaclab/isaaclab/assets/rigid_object/base_rigid_object_data.py @@ -288,7 +288,7 @@ def projected_gravity_b(self) -> torch.Tensor: def heading_w(self) -> torch.Tensor: """Yaw heading of the base frame (in radians). Shape is (num_instances,). - Note: + .. note:: This quantity is computed by assuming that the forward-direction of the base frame is along x-direction, i.e. :math:`(1, 0, 0)`. """ diff --git a/source/isaaclab/isaaclab/assets/rigid_object_collection/base_rigid_object_collection.py b/source/isaaclab/isaaclab/assets/rigid_object_collection/base_rigid_object_collection.py index 42f504070e8..e7814154fbd 100644 --- a/source/isaaclab/isaaclab/assets/rigid_object_collection/base_rigid_object_collection.py +++ b/source/isaaclab/isaaclab/assets/rigid_object_collection/base_rigid_object_collection.py @@ -87,7 +87,7 @@ def body_names(self) -> list[str]: def root_view(self): """Root view for the rigid object collection. - Note: + .. note:: Use this view with caution. It requires handling of tensors in a specific way. """ raise NotImplementedError() @@ -122,7 +122,7 @@ def reset(self, env_ids: Sequence[int] | None = None, object_ids: slice | torch. def write_data_to_sim(self) -> None: """Write external wrench to the simulation. - Note: + .. note:: We write external wrench to the simulation here since this function is called before the simulation step. This ensures that the external wrench is applied at every simulation step. """ diff --git a/source/isaaclab/isaaclab/assets/rigid_object_collection/base_rigid_object_collection_data.py b/source/isaaclab/isaaclab/assets/rigid_object_collection/base_rigid_object_collection_data.py index 8f1ed6f6f4a..5ecad670dbb 100644 --- a/source/isaaclab/isaaclab/assets/rigid_object_collection/base_rigid_object_collection_data.py +++ b/source/isaaclab/isaaclab/assets/rigid_object_collection/base_rigid_object_collection_data.py @@ -217,7 +217,7 @@ def projected_gravity_b(self) -> torch.Tensor: def heading_w(self) -> torch.Tensor: """Yaw heading of the base frame (in radians). Shape is (num_instances, num_bodies). - Note: + .. note:: This quantity is computed by assuming that the forward-direction of the base frame is along x-direction, i.e. :math:`(1, 0, 0)`. """ diff --git a/source/isaaclab/isaaclab/sensors/camera/camera.py b/source/isaaclab/isaaclab/sensors/camera/camera.py index 26fd0713c71..22cea78fb4f 100644 --- a/source/isaaclab/isaaclab/sensors/camera/camera.py +++ b/source/isaaclab/isaaclab/sensors/camera/camera.py @@ -577,7 +577,7 @@ def _update_intrinsic_matrices(self, env_ids: Sequence[int]): Also called calibration matrix. This matrix works for linear depth images. We assume square pixels. - Note: + .. note:: The calibration matrix projects points in the 3D scene onto an imaginary screen of the camera. The coordinates of points on the image plane are in the homogeneous representation. """ diff --git a/source/isaaclab/isaaclab/sensors/camera/camera_cfg.py b/source/isaaclab/isaaclab/sensors/camera/camera_cfg.py index 8fd9f307d18..b3b11d182df 100644 --- a/source/isaaclab/isaaclab/sensors/camera/camera_cfg.py +++ b/source/isaaclab/isaaclab/sensors/camera/camera_cfg.py @@ -42,7 +42,7 @@ class OffsetCfg: offset: OffsetCfg = OffsetCfg() """The offset pose of the sensor's frame from the sensor's parent frame. Defaults to identity. - Note: + .. note:: The parent frame is the frame the sensor attaches to. For example, the parent frame of a camera at path ``/World/envs/env_0/Robot/Camera`` is ``/World/envs/env_0/Robot``. """ diff --git a/source/isaaclab/isaaclab/sensors/contact_sensor/base_contact_sensor.py b/source/isaaclab/isaaclab/sensors/contact_sensor/base_contact_sensor.py index d32631bba5e..a37339158d0 100644 --- a/source/isaaclab/isaaclab/sensors/contact_sensor/base_contact_sensor.py +++ b/source/isaaclab/isaaclab/sensors/contact_sensor/base_contact_sensor.py @@ -84,6 +84,17 @@ def body_names(self) -> list[str]: """Ordered names of bodies with contact sensors attached.""" raise NotImplementedError + @property + @abstractmethod + def contact_view(self): + """View for the contact reporting. + + .. note:: + None if there is no view associated with the sensor. + """ + raise NotImplementedError + + """ Operations """ @@ -107,7 +118,7 @@ def compute_first_contact(self, dt: float, abs_tol: float = 1.0e-8) -> torch.Ten by comparing the current contact time with the given time period. If the contact time is less than the given time period, then the bodies are considered to be in contact. - Note: + .. note:: The function assumes that :attr:`dt` is a factor of the sensor update time-step. In other words :math:`dt / dt_sensor = n`, where :math:`n` is a natural number. This is always true if the sensor is updated by the physics or the environment stepping time-step and the sensor @@ -143,7 +154,7 @@ def compute_first_air(self, dt: float, abs_tol: float = 1.0e-8) -> torch.Tensor: by comparing the current air time with the given time period. If the air time is less than the given time period, then the bodies are considered to not be in contact. - Note: + .. note:: It assumes that :attr:`dt` is a factor of the sensor update time-step. In other words, :math:`dt / dt_sensor = n`, where :math:`n` is a natural number. This is always true if the sensor is updated by the physics or the environment stepping time-step and the sensor @@ -175,6 +186,7 @@ def compute_first_air(self, dt: float, abs_tol: float = 1.0e-8) -> torch.Tensor: Implementation - Abstract methods to be implemented by backend-specific subclasses. """ + @abstractmethod def _initialize_impl(self): """Initializes the sensor handles and internal buffers. diff --git a/source/isaaclab/isaaclab/sensors/contact_sensor/base_contact_sensor_data.py b/source/isaaclab/isaaclab/sensors/contact_sensor/base_contact_sensor_data.py index aeccf133033..0675a01a703 100644 --- a/source/isaaclab/isaaclab/sensors/contact_sensor/base_contact_sensor_data.py +++ b/source/isaaclab/isaaclab/sensors/contact_sensor/base_contact_sensor_data.py @@ -19,6 +19,12 @@ class BaseContactSensorData(ABC): implementations should inherit from this class and provide the actual data storage. """ + @property + @abstractmethod + def pose_w(self) -> torch.Tensor | None: + """Pose of the sensor origin in world frame. Shape is (N, 7). Quaternion in wxyz order.""" + raise NotImplementedError + @property @abstractmethod def pos_w(self) -> torch.Tensor | None: diff --git a/source/isaaclab/isaaclab/sensors/frame_transformer/base_frame_transformer.py b/source/isaaclab/isaaclab/sensors/frame_transformer/base_frame_transformer.py index 9ac1ef22c73..78b6b545635 100644 --- a/source/isaaclab/isaaclab/sensors/frame_transformer/base_frame_transformer.py +++ b/source/isaaclab/isaaclab/sensors/frame_transformer/base_frame_transformer.py @@ -70,7 +70,7 @@ def data(self) -> BaseFrameTransformerData: def num_bodies(self) -> int: """Returns the number of target bodies being tracked. - Note: + .. note:: This is an alias used for consistency with other sensors. Otherwise, we recommend using :attr:`len(data.target_frame_names)` to access the number of target frames. """ @@ -81,7 +81,7 @@ def num_bodies(self) -> int: def body_names(self) -> list[str]: """Returns the names of the target bodies being tracked. - Note: + .. note:: This is an alias used for consistency with other sensors. Otherwise, we recommend using :attr:`data.target_frame_names` to access the target frame names. """ diff --git a/source/isaaclab/isaaclab/sensors/frame_transformer/base_frame_transformer_data.py b/source/isaaclab/isaaclab/sensors/frame_transformer/base_frame_transformer_data.py index 4b63f8fe31b..b8d5a4d2af3 100644 --- a/source/isaaclab/isaaclab/sensors/frame_transformer/base_frame_transformer_data.py +++ b/source/isaaclab/isaaclab/sensors/frame_transformer/base_frame_transformer_data.py @@ -28,6 +28,12 @@ def target_frame_names(self) -> list[str]: """ raise NotImplementedError + @property + @abstractmethod + def target_pose_source(self) -> list[int]: + """Pose of the target frame(s) relative to source frame. Shape is (N, M, 7). Quaternion in wxyz order.""" + raise NotImplementedError + @property @abstractmethod def target_pos_source(self) -> torch.Tensor: @@ -40,6 +46,12 @@ def target_quat_source(self) -> torch.Tensor: """Orientation of the target frame(s) relative to source frame (w, x, y, z). Shape is (N, M, 4).""" raise NotImplementedError + @property + @abstractmethod + def target_pose_w(self) -> torch.Tensor: + """Pose of the target frame(s) after offset in world frame. Shape is (N, M, 7). Quaternion in wxyz order.""" + raise NotImplementedError + @property @abstractmethod def target_pos_w(self) -> torch.Tensor: @@ -52,6 +64,12 @@ def target_quat_w(self) -> torch.Tensor: """Orientation of the target frame(s) after offset in world frame (w, x, y, z). Shape is (N, M, 4).""" raise NotImplementedError + @property + @abstractmethod + def source_pose_w(self) -> torch.Tensor: + """Pose of the source frame after offset in world frame. Shape is (N, 7). Quaternion in wxyz order.""" + raise NotImplementedError + @property @abstractmethod def source_pos_w(self) -> torch.Tensor: diff --git a/source/isaaclab/isaaclab/sensors/frame_transformer/frame_transformer_cfg.py b/source/isaaclab/isaaclab/sensors/frame_transformer/frame_transformer_cfg.py index ca9b528aa1d..b42cb274b3e 100644 --- a/source/isaaclab/isaaclab/sensors/frame_transformer/frame_transformer_cfg.py +++ b/source/isaaclab/isaaclab/sensors/frame_transformer/frame_transformer_cfg.py @@ -71,6 +71,6 @@ class FrameCfg: visualizer_cfg: VisualizationMarkersCfg = FRAME_MARKER_CFG.replace(prim_path="/Visuals/FrameTransformer") """The configuration object for the visualization markers. Defaults to FRAME_MARKER_CFG. - Note: + .. note:: This attribute is only used when debug visualization is enabled. """ diff --git a/source/isaaclab/isaaclab/sensors/ray_caster/patterns/patterns_cfg.py b/source/isaaclab/isaaclab/sensors/ray_caster/patterns/patterns_cfg.py index f50ba272b70..aa946ffac7b 100644 --- a/source/isaaclab/isaaclab/sensors/ray_caster/patterns/patterns_cfg.py +++ b/source/isaaclab/isaaclab/sensors/ray_caster/patterns/patterns_cfg.py @@ -92,7 +92,7 @@ class PinholeCameraPatternCfg(PatternBaseCfg): Emulates sensor/film width on a camera. - Note: + .. note:: The default value is the horizontal aperture of a 35 mm spherical projector. """ vertical_aperture: float | None = None @@ -194,7 +194,7 @@ class BpearlPatternCfg(PatternBaseCfg): # fmt: on """Vertical ray angles (in degrees). Defaults to a list of 32 angles. - Note: + .. note:: We manually set the vertical ray angles to match the Bpearl sensor. The ray-angles are not evenly spaced. """ diff --git a/source/isaaclab/isaaclab/sensors/ray_caster/ray_caster_cfg.py b/source/isaaclab/isaaclab/sensors/ray_caster/ray_caster_cfg.py index dbdebfad3a5..70107d5d6cd 100644 --- a/source/isaaclab/isaaclab/sensors/ray_caster/ray_caster_cfg.py +++ b/source/isaaclab/isaaclab/sensors/ray_caster/ray_caster_cfg.py @@ -35,7 +35,7 @@ class OffsetCfg: mesh_prim_paths: list[str] = MISSING """The list of mesh primitive paths to ray cast against. - Note: + .. note:: Currently, only a single static mesh is supported. We are working on supporting multiple static meshes and dynamic meshes. """ @@ -93,6 +93,6 @@ class OffsetCfg: visualizer_cfg: VisualizationMarkersCfg = RAY_CASTER_MARKER_CFG.replace(prim_path="/Visuals/RayCaster") """The configuration object for the visualization markers. Defaults to RAY_CASTER_MARKER_CFG. - Note: + .. note:: This attribute is only used when debug visualization is enabled. """ diff --git a/source/isaaclab/isaaclab/sensors/sensor_base.py b/source/isaaclab/isaaclab/sensors/sensor_base.py index 4ece160bbe5..59c8581463b 100644 --- a/source/isaaclab/isaaclab/sensors/sensor_base.py +++ b/source/isaaclab/isaaclab/sensors/sensor_base.py @@ -294,7 +294,7 @@ def safe_callback(callback_name, event, obj_ref): def _initialize_callback(self, event): """Initializes the scene elements. - Note: + .. note:: PhysX handles are only enabled once the simulator starts playing. Hence, this function needs to be called whenever the simulator "plays" from a "stop" state. """ @@ -319,7 +319,7 @@ def _on_prim_deletion(self, prim_path: str) -> None: Args: prim_path: The path to the prim that is being deleted. - Note: + .. note:: This function is called when the prim is deleted. """ if prim_path == "/": diff --git a/source/isaaclab_physx/isaaclab_physx/assets/deformable_object/deformable_object.py b/source/isaaclab_physx/isaaclab_physx/assets/deformable_object/deformable_object.py index 02cb1281e7f..ae99b34445a 100644 --- a/source/isaaclab_physx/isaaclab_physx/assets/deformable_object/deformable_object.py +++ b/source/isaaclab_physx/isaaclab_physx/assets/deformable_object/deformable_object.py @@ -92,7 +92,7 @@ def num_bodies(self) -> int: def root_view(self) -> physx.SoftBodyView: """Deformable body view for the asset. - Note: + .. note:: Use this view with caution. It requires handling of tensors in a specific way. """ return self._root_physx_view @@ -112,7 +112,7 @@ def material_physx_view(self) -> physx.SoftBodyMaterialView | None: This view is optional and may not be available if the material is not bound to the deformable body. If the material is not available, then the material properties will be set to default values. - Note: + .. note:: Use this view with caution. It requires handling of tensors in a specific way. """ return self._material_physx_view @@ -223,7 +223,7 @@ def write_nodal_kinematic_target_to_sim(self, targets: torch.Tensor, env_ids: Se and a flag indicating whether the node is kinematically driven or not. The positions are in the simulation frame. - Note: + .. note:: The flag is set to 0.0 for kinematically driven nodes and 1.0 for free nodes. Args: diff --git a/source/isaaclab_physx/isaaclab_physx/assets/deformable_object/deformable_object_cfg.py b/source/isaaclab_physx/isaaclab_physx/assets/deformable_object/deformable_object_cfg.py index d917aef810c..280577baefb 100644 --- a/source/isaaclab_physx/isaaclab_physx/assets/deformable_object/deformable_object_cfg.py +++ b/source/isaaclab_physx/isaaclab_physx/assets/deformable_object/deformable_object_cfg.py @@ -24,6 +24,6 @@ class DeformableObjectCfg(AssetBaseCfg): ) """The configuration object for the visualization markers. Defaults to DEFORMABLE_TARGET_MARKER_CFG. - Note: + .. note:: This attribute is only used when debug visualization is enabled. """ diff --git a/source/isaaclab_physx/isaaclab_physx/assets/rigid_object/rigid_object.py b/source/isaaclab_physx/isaaclab_physx/assets/rigid_object/rigid_object.py index a344161378e..8ed484a1914 100644 --- a/source/isaaclab_physx/isaaclab_physx/assets/rigid_object/rigid_object.py +++ b/source/isaaclab_physx/isaaclab_physx/assets/rigid_object/rigid_object.py @@ -96,7 +96,7 @@ def body_names(self) -> list[str]: def root_view(self): """Root view for the asset. - Note: + .. note:: Use this view with caution. It requires handling of tensors in a specific way. """ return self._root_view @@ -142,7 +142,7 @@ def reset(self, env_ids: Sequence[int] | None = None): def write_data_to_sim(self) -> None: """Write external wrench to the simulation. - Note: + .. note:: We write external wrench to the simulation here since this function is called before the simulation step. This ensures that the external wrench is applied at every simulation step. """ diff --git a/source/isaaclab_physx/isaaclab_physx/assets/rigid_object_collection/rigid_object_collection.py b/source/isaaclab_physx/isaaclab_physx/assets/rigid_object_collection/rigid_object_collection.py index e7f757fec30..29cd8263f3a 100644 --- a/source/isaaclab_physx/isaaclab_physx/assets/rigid_object_collection/rigid_object_collection.py +++ b/source/isaaclab_physx/isaaclab_physx/assets/rigid_object_collection/rigid_object_collection.py @@ -128,7 +128,7 @@ def body_names(self) -> list[str]: def root_view(self): """Root view for the rigid object collection. - Note: + .. note:: Use this view with caution. It requires handling of tensors in a specific way. """ return self._root_view @@ -166,7 +166,7 @@ def reset(self, env_ids: torch.Tensor | None = None, object_ids: slice | torch.T def write_data_to_sim(self) -> None: """Write external wrench to the simulation. - Note: + .. note:: We write external wrench to the simulation here since this function is called before the simulation step. This ensures that the external wrench is applied at every simulation step. """ @@ -828,7 +828,7 @@ def _on_prim_deletion(self, prim_path: str) -> None: Args: prim_path: The path to the prim that is being deleted. - Note: + .. note:: This function is called when the prim is deleted. """ if prim_path == "/": diff --git a/source/isaaclab_physx/isaaclab_physx/assets/surface_gripper/surface_gripper.py b/source/isaaclab_physx/isaaclab_physx/assets/surface_gripper/surface_gripper.py index fcf02950285..721d182817c 100644 --- a/source/isaaclab_physx/isaaclab_physx/assets/surface_gripper/surface_gripper.py +++ b/source/isaaclab_physx/isaaclab_physx/assets/surface_gripper/surface_gripper.py @@ -40,12 +40,12 @@ class SurfaceGripper(AssetBase): properties of the grippers at runtime. Finally, the :func:`set_grippers_command` function should be used to set the desired command for the grippers. - Note: + .. note:: The :func:`set_grippers_command` function does not write to the simulation. The simulation automatically calls :func:`write_data_to_sim` function to write the command to the simulation. Similarly, the update function is called automatically for every simulation step, and does not need to be called by the user. - Note: + .. note:: The SurfaceGripper is only supported on CPU for now. Please set the simulation backend to run on CPU. Use `--device cpu` to run the simulation on CPU. """ @@ -176,7 +176,7 @@ def update(self, dt: float) -> None: - "Closing" --> 0.0 - "Closed" --> 1.0 - Note: + .. note:: We need to do this conversion for every single step of the simulation because the gripper can lose contact with the object if some conditions are met: such as if a large force is applied to the gripped object. """ @@ -251,7 +251,7 @@ def _initialize_impl(self) -> None: ValueError: If the simulation backend is not CPU. RuntimeError: If the Simulation Context is not initialized or if gripper prims are not found. - Note: + .. note:: The SurfaceGripper is only supported on CPU for now. Please set the simulation backend to run on CPU. Use `--device cpu` to run the simulation on CPU. """ diff --git a/source/isaaclab_physx/isaaclab_physx/sensors/contact_sensor/contact_sensor.py b/source/isaaclab_physx/isaaclab_physx/sensors/contact_sensor/contact_sensor.py index e5ebc6c8f84..632fff6de79 100644 --- a/source/isaaclab_physx/isaaclab_physx/sensors/contact_sensor/contact_sensor.py +++ b/source/isaaclab_physx/isaaclab_physx/sensors/contact_sensor/contact_sensor.py @@ -125,16 +125,16 @@ def body_names(self) -> list[str]: def body_physx_view(self) -> physx.RigidBodyView: """View for the rigid bodies captured (PhysX). - Note: + .. note:: Use this view with caution. It requires handling of tensors in a specific way. """ return self._body_physx_view @property - def contact_physx_view(self) -> physx.RigidContactView: + def contact_view(self) -> physx.RigidContactView: """Contact reporter view for the bodies (PhysX). - Note: + .. note:: Use this view with caution. It requires handling of tensors in a specific way. """ return self._contact_physx_view diff --git a/source/isaaclab_physx/isaaclab_physx/sensors/contact_sensor/contact_sensor_data.py b/source/isaaclab_physx/isaaclab_physx/sensors/contact_sensor/contact_sensor_data.py index 52d40ac1b09..c46bfd7e183 100644 --- a/source/isaaclab_physx/isaaclab_physx/sensors/contact_sensor/contact_sensor_data.py +++ b/source/isaaclab_physx/isaaclab_physx/sensors/contact_sensor/contact_sensor_data.py @@ -13,6 +13,11 @@ class ContactSensorData(BaseContactSensorData): """Data container for the PhysX contact reporting sensor.""" + @property + def pose_w(self) -> torch.Tensor | None: + """Pose of the sensor origin in world frame. Shape is (N, 7). Quaternion in wxyz order.""" + return torch.cat([self._pos_w, self._quat_w], dim=-1) + @property def pos_w(self) -> torch.Tensor | None: """Position of the sensor origin in world frame. Shape is (N, 3). diff --git a/source/isaaclab_physx/isaaclab_physx/sensors/frame_transformer/frame_transformer.py b/source/isaaclab_physx/isaaclab_physx/sensors/frame_transformer/frame_transformer.py index 6892b033d67..90c31192276 100644 --- a/source/isaaclab_physx/isaaclab_physx/sensors/frame_transformer/frame_transformer.py +++ b/source/isaaclab_physx/isaaclab_physx/sensors/frame_transformer/frame_transformer.py @@ -101,7 +101,7 @@ def data(self) -> FrameTransformerData: def num_bodies(self) -> int: """Returns the number of target bodies being tracked. - Note: + .. note:: This is an alias used for consistency with other sensors. Otherwise, we recommend using :attr:`len(data.target_frame_names)` to access the number of target frames. """ @@ -111,7 +111,7 @@ def num_bodies(self) -> int: def body_names(self) -> list[str]: """Returns the names of the target bodies being tracked. - Note: + .. note:: This is an alias used for consistency with other sensors. Otherwise, we recommend using :attr:`data.target_frame_names` to access the target frame names. """ diff --git a/source/isaaclab_physx/isaaclab_physx/sensors/frame_transformer/frame_transformer_data.py b/source/isaaclab_physx/isaaclab_physx/sensors/frame_transformer/frame_transformer_data.py index 59c476cb204..b5f307aae0a 100644 --- a/source/isaaclab_physx/isaaclab_physx/sensors/frame_transformer/frame_transformer_data.py +++ b/source/isaaclab_physx/isaaclab_physx/sensors/frame_transformer/frame_transformer_data.py @@ -18,6 +18,11 @@ def target_frame_names(self) -> list[str]: """Target frame names (order matches data ordering).""" return self._target_frame_names + @property + def target_pose_source(self) -> torch.Tensor: + """Pose of target frame(s) relative to source frame. Shape is (N, M, 7). Quaternion in wxyz order.""" + return torch.cat([self._target_pos_source, self._target_quat_source], dim=-1) + @property def target_pos_source(self) -> torch.Tensor: """Position of target frame(s) relative to source frame. Shape is (N, M, 3).""" @@ -28,6 +33,11 @@ def target_quat_source(self) -> torch.Tensor: """Orientation of target frame(s) relative to source frame (w, x, y, z). Shape is (N, M, 4).""" return self._target_quat_source + @property + def target_pose_w(self) -> torch.Tensor: + """Pose of target frame(s) after offset in world frame. Shape is (N, M, 7). Quaternion in wxyz order.""" + return torch.cat([self._target_pos_w, self._target_quat_w], dim=-1) + @property def target_pos_w(self) -> torch.Tensor: """Position of target frame(s) after offset in world frame. Shape is (N, M, 3).""" @@ -38,6 +48,11 @@ def target_quat_w(self) -> torch.Tensor: """Orientation of target frame(s) after offset in world frame (w, x, y, z). Shape is (N, M, 4).""" return self._target_quat_w + @property + def source_pose_w(self) -> torch.Tensor: + """Pose of source frame after offset in world frame. Shape is (N, 7). Quaternion in wxyz order.""" + return torch.cat([self._source_pos_w, self._source_quat_w], dim=-1) + @property def source_pos_w(self) -> torch.Tensor: """Position of source frame after offset in world frame. Shape is (N, 3).""" From aa984011f045073ed5f49900b3523a0a695ba913 Mon Sep 17 00:00:00 2001 From: Antoine Richard Date: Thu, 29 Jan 2026 13:36:44 +0100 Subject: [PATCH 21/38] Should be ready --- .../migration/migrating_to_isaaclab_3-0.rst | 32 +++++++++++++++++++ source/isaaclab/docs/CHANGELOG.rst | 3 ++ .../sensors/contact_sensor/__init__.py | 2 ++ .../base_contact_sensor_data.py | 2 ++ .../contact_sensor/contact_sensor_data.py | 18 +++++++++-- .../sensors/frame_transformer/__init__.py | 2 ++ .../frame_transformer_data.py | 18 +++++++++-- .../isaaclab/isaaclab/sensors/imu/__init__.py | 2 ++ .../isaaclab/sensors/imu/base_imu_data.py | 6 ++++ .../isaaclab/isaaclab/sensors/imu/imu_data.py | 18 +++++++++-- source/isaaclab_physx/docs/CHANGELOG.rst | 8 +++++ .../sensors/contact_sensor/__init__.py | 2 ++ .../sensors/contact_sensor/contact_sensor.py | 22 ++++++------- .../contact_sensor/contact_sensor_data.py | 15 +++++++++ .../sensors/frame_transformer/__init__.py | 2 ++ .../isaaclab_physx/sensors/imu/__init__.py | 2 ++ .../isaaclab_physx/sensors/imu/imu_data.py | 21 ++++++++++++ .../test/sensors/test_contact_sensor.py | 14 ++++---- 18 files changed, 162 insertions(+), 27 deletions(-) diff --git a/docs/source/migration/migrating_to_isaaclab_3-0.rst b/docs/source/migration/migrating_to_isaaclab_3-0.rst index 1024d9a6d3f..1a16cd3a06c 100644 --- a/docs/source/migration/migrating_to_isaaclab_3-0.rst +++ b/docs/source/migration/migrating_to_isaaclab_3-0.rst @@ -73,6 +73,38 @@ you can import from ``isaaclab_physx.sensors``: from isaaclab_physx.sensors import FrameTransformer, FrameTransformerData +Sensor Pose Properties Deprecation +---------------------------------- + +The ``pose_w``, ``pos_w``, and ``quat_w`` properties on :class:`~isaaclab.sensors.ContactSensorData` +and :class:`~isaaclab.sensors.ImuData` are deprecated and will be removed in a future release. + +If you need to track sensor poses in world frame, please use a dedicated sensor such as +:class:`~isaaclab.sensors.FrameTransformer` instead. + +**Before (deprecated):** + +.. code-block:: python + + # Using pose properties directly on sensor data + sensor_pos = contact_sensor.data.pos_w + sensor_quat = contact_sensor.data.quat_w + +**After (recommended):** + +.. code-block:: python + + # Use FrameTransformer to track sensor pose + frame_transformer = FrameTransformer(FrameTransformerCfg( + prim_path="{ENV_REGEX_NS}/Robot/base", + target_frames=[ + FrameTransformerCfg.FrameCfg(prim_path="{ENV_REGEX_NS}/Robot/sensor_link"), + ], + )) + sensor_pos = frame_transformer.data.target_pos_w + sensor_quat = frame_transformer.data.target_quat_w + + ``root_physx_view`` Deprecation ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/source/isaaclab/docs/CHANGELOG.rst b/source/isaaclab/docs/CHANGELOG.rst index ba621e1c45e..7118fcc3e65 100644 --- a/source/isaaclab/docs/CHANGELOG.rst +++ b/source/isaaclab/docs/CHANGELOG.rst @@ -25,6 +25,9 @@ Changed :class:`~isaaclab.sensors.Imu`, :class:`~isaaclab.sensors.FrameTransformer`) to follow the multi-backend architecture. The classes now act as factory wrappers that instantiate the appropriate backend-specific implementation (PhysX by default). +* Refactored the sensor data classes (:class:`~isaaclab.sensors.ContactSensorData`, + :class:`~isaaclab.sensors.ImuData`, :class:`~isaaclab.sensors.FrameTransformerData`) to use the + factory pattern for backend-specific instantiation. * Moved PhysX-specific sensor tests to the ``isaaclab_physx`` package: * ``test_contact_sensor.py`` → ``isaaclab_physx/test/sensors/`` diff --git a/source/isaaclab/isaaclab/sensors/contact_sensor/__init__.py b/source/isaaclab/isaaclab/sensors/contact_sensor/__init__.py index b43f57f2daa..f6c2cd33c8e 100644 --- a/source/isaaclab/isaaclab/sensors/contact_sensor/__init__.py +++ b/source/isaaclab/isaaclab/sensors/contact_sensor/__init__.py @@ -10,3 +10,5 @@ from .contact_sensor import ContactSensor from .contact_sensor_cfg import ContactSensorCfg from .contact_sensor_data import ContactSensorData + +__all__ = ["BaseContactSensor", "BaseContactSensorData", "ContactSensor", "ContactSensorCfg", "ContactSensorData"] \ No newline at end of file diff --git a/source/isaaclab/isaaclab/sensors/contact_sensor/base_contact_sensor_data.py b/source/isaaclab/isaaclab/sensors/contact_sensor/base_contact_sensor_data.py index 0675a01a703..cdeff3f72e7 100644 --- a/source/isaaclab/isaaclab/sensors/contact_sensor/base_contact_sensor_data.py +++ b/source/isaaclab/isaaclab/sensors/contact_sensor/base_contact_sensor_data.py @@ -73,6 +73,7 @@ def force_matrix_w_history(self) -> torch.Tensor | None: """ raise NotImplementedError + # Make issues for this in Newton P1/P2s @property @abstractmethod def contact_pos_w(self) -> torch.Tensor | None: @@ -82,6 +83,7 @@ def contact_pos_w(self) -> torch.Tensor | None: """ raise NotImplementedError + # Make issues for this in Newton P1/P2s @property @abstractmethod def friction_forces_w(self) -> torch.Tensor | None: diff --git a/source/isaaclab/isaaclab/sensors/contact_sensor/contact_sensor_data.py b/source/isaaclab/isaaclab/sensors/contact_sensor/contact_sensor_data.py index 64fd537cb97..11fbcd8353f 100644 --- a/source/isaaclab/isaaclab/sensors/contact_sensor/contact_sensor_data.py +++ b/source/isaaclab/isaaclab/sensors/contact_sensor/contact_sensor_data.py @@ -5,9 +5,21 @@ """Re-exports the base contact sensor data class for backwards compatibility.""" +from __future__ import annotations + +from typing import TYPE_CHECKING + +from isaaclab.utils.backend_utils import FactoryBase + from .base_contact_sensor_data import BaseContactSensorData -# Re-export for backwards compatibility -ContactSensorData = BaseContactSensorData +if TYPE_CHECKING: + from isaaclab_physx.sensors.contact_sensor import ContactSensorData as PhysXContactSensorData + + +class ContactSensorData(FactoryBase, BaseContactSensorData): + """Factory for creating contact sensor data instances.""" -__all__ = ["BaseContactSensorData", "ContactSensorData"] + def __new__(cls, *args, **kwargs) -> BaseContactSensorData | PhysXContactSensorData: + """Create a new instance of a contact sensor data based on the backend.""" + return super().__new__(cls, *args, **kwargs) diff --git a/source/isaaclab/isaaclab/sensors/frame_transformer/__init__.py b/source/isaaclab/isaaclab/sensors/frame_transformer/__init__.py index 938f1501298..cb58a0c75e7 100644 --- a/source/isaaclab/isaaclab/sensors/frame_transformer/__init__.py +++ b/source/isaaclab/isaaclab/sensors/frame_transformer/__init__.py @@ -10,3 +10,5 @@ from .frame_transformer import FrameTransformer from .frame_transformer_cfg import FrameTransformerCfg, OffsetCfg from .frame_transformer_data import FrameTransformerData + +__all__ = ["BaseFrameTransformer", "BaseFrameTransformerData", "FrameTransformer", "FrameTransformerCfg", "FrameTransformerData", "OffsetCfg"] \ No newline at end of file diff --git a/source/isaaclab/isaaclab/sensors/frame_transformer/frame_transformer_data.py b/source/isaaclab/isaaclab/sensors/frame_transformer/frame_transformer_data.py index 2a9fa56516b..acae3915c89 100644 --- a/source/isaaclab/isaaclab/sensors/frame_transformer/frame_transformer_data.py +++ b/source/isaaclab/isaaclab/sensors/frame_transformer/frame_transformer_data.py @@ -5,9 +5,21 @@ """Re-exports the base frame transformer data class for backwards compatibility.""" +from __future__ import annotations + +from typing import TYPE_CHECKING + +from isaaclab.utils.backend_utils import FactoryBase + from .base_frame_transformer_data import BaseFrameTransformerData -# Re-export for backwards compatibility -FrameTransformerData = BaseFrameTransformerData +if TYPE_CHECKING: + from isaaclab_physx.sensors.frame_transformer import FrameTransformerData as PhysXFrameTransformerData + + +class FrameTransformerData(FactoryBase, BaseFrameTransformerData): + """Factory for creating frame transformer data instances.""" -__all__ = ["BaseFrameTransformerData", "FrameTransformerData"] + def __new__(cls, *args, **kwargs) -> BaseFrameTransformerData | PhysXFrameTransformerData: + """Create a new instance of a frame transformer data based on the backend.""" + return super().__new__(cls, *args, **kwargs) \ No newline at end of file diff --git a/source/isaaclab/isaaclab/sensors/imu/__init__.py b/source/isaaclab/isaaclab/sensors/imu/__init__.py index a16340919b3..2f372fade3a 100644 --- a/source/isaaclab/isaaclab/sensors/imu/__init__.py +++ b/source/isaaclab/isaaclab/sensors/imu/__init__.py @@ -12,3 +12,5 @@ from .imu import Imu from .imu_cfg import ImuCfg from .imu_data import ImuData + +__all__ = ["BaseImu", "BaseImuData", "Imu", "ImuCfg", "ImuData"] diff --git a/source/isaaclab/isaaclab/sensors/imu/base_imu_data.py b/source/isaaclab/isaaclab/sensors/imu/base_imu_data.py index 0c4204f910e..78078a3ac2b 100644 --- a/source/isaaclab/isaaclab/sensors/imu/base_imu_data.py +++ b/source/isaaclab/isaaclab/sensors/imu/base_imu_data.py @@ -19,6 +19,12 @@ class BaseImuData(ABC): implementations should inherit from this class and provide the actual data storage. """ + @property + @abstractmethod + def pose_w(self) -> torch.Tensor: + """Pose of the sensor origin in world frame. Shape is (N, 7). Quaternion in wxyz order.""" + raise NotImplementedError + @property @abstractmethod def pos_w(self) -> torch.Tensor: diff --git a/source/isaaclab/isaaclab/sensors/imu/imu_data.py b/source/isaaclab/isaaclab/sensors/imu/imu_data.py index a7a2fed703d..81dfbcb1a79 100644 --- a/source/isaaclab/isaaclab/sensors/imu/imu_data.py +++ b/source/isaaclab/isaaclab/sensors/imu/imu_data.py @@ -5,9 +5,21 @@ """Re-exports the base IMU data class for backwards compatibility.""" +from __future__ import annotations + +from typing import TYPE_CHECKING + +from isaaclab.utils.backend_utils import FactoryBase + from .base_imu_data import BaseImuData -# Re-export for backwards compatibility -ImuData = BaseImuData +if TYPE_CHECKING: + from isaaclab_physx.sensors.imu import ImuData as PhysXImuData + + +class ImuData(FactoryBase, BaseImuData): + """Factory for creating IMU data instances.""" -__all__ = ["BaseImuData", "ImuData"] + def __new__(cls, *args, **kwargs) -> BaseImuData | PhysXImuData: + """Create a new instance of an IMU data based on the backend.""" + return super().__new__(cls, *args, **kwargs) \ No newline at end of file diff --git a/source/isaaclab_physx/docs/CHANGELOG.rst b/source/isaaclab_physx/docs/CHANGELOG.rst index 08f3f939a6e..d3d462d2f44 100644 --- a/source/isaaclab_physx/docs/CHANGELOG.rst +++ b/source/isaaclab_physx/docs/CHANGELOG.rst @@ -32,6 +32,14 @@ Added * ``check_contact_sensor.py`` * ``check_imu_sensor.py`` +Deprecated +^^^^^^^^^^ + +* Deprecated the ``pose_w``, ``pos_w``, and ``quat_w`` properties on + :class:`~isaaclab_physx.sensors.ContactSensorData` and :class:`~isaaclab_physx.sensors.ImuData`. + These properties will be removed in a future release. Please use a dedicated sensor (e.g., + :class:`~isaaclab.sensors.FrameTransformer`) to measure sensor poses in world frame. + 0.1.0 (2026-01-28) ~~~~~~~~~~~~~~~~~~~ diff --git a/source/isaaclab_physx/isaaclab_physx/sensors/contact_sensor/__init__.py b/source/isaaclab_physx/isaaclab_physx/sensors/contact_sensor/__init__.py index 54dcd41658c..1f1a0a03427 100644 --- a/source/isaaclab_physx/isaaclab_physx/sensors/contact_sensor/__init__.py +++ b/source/isaaclab_physx/isaaclab_physx/sensors/contact_sensor/__init__.py @@ -7,3 +7,5 @@ from .contact_sensor import ContactSensor from .contact_sensor_data import ContactSensorData + +__all__ = ["ContactSensor", "ContactSensorData"] \ No newline at end of file diff --git a/source/isaaclab_physx/isaaclab_physx/sensors/contact_sensor/contact_sensor.py b/source/isaaclab_physx/isaaclab_physx/sensors/contact_sensor/contact_sensor.py index 632fff6de79..2c9bf94528f 100644 --- a/source/isaaclab_physx/isaaclab_physx/sensors/contact_sensor/contact_sensor.py +++ b/source/isaaclab_physx/isaaclab_physx/sensors/contact_sensor/contact_sensor.py @@ -137,7 +137,7 @@ def contact_view(self) -> physx.RigidContactView: .. note:: Use this view with caution. It requires handling of tensors in a specific way. """ - return self._contact_physx_view + return self._contact_view """ Operations @@ -202,7 +202,7 @@ def _initialize_impl(self): # create a rigid prim view for the sensor self._body_physx_view = self._physics_sim_view.create_rigid_body_view(body_names_glob) - self._contact_physx_view = self._physics_sim_view.create_rigid_contact_view( + self._contact_view = self._physics_sim_view.create_rigid_contact_view( body_names_glob, filter_patterns=filter_prim_paths_glob, max_contact_data_count=self.cfg.max_contact_data_count_per_prim * len(body_names) * self._num_envs, @@ -232,7 +232,7 @@ def _initialize_impl(self): ) # prepare data buffers - num_filter_shapes = self.contact_physx_view.filter_count if len(self.cfg.filter_prim_paths_expr) != 0 else 0 + num_filter_shapes = self.contact_view.filter_count if len(self.cfg.filter_prim_paths_expr) != 0 else 0 self._data.create_buffers( num_envs=self._num_envs, num_bodies=self._num_bodies, @@ -254,7 +254,7 @@ def _update_buffers_impl(self, env_ids: Sequence[int]): # obtain the contact forces # TODO: We are handling the indexing ourself because of the shape; (N, B) vs expected (N * B). # This isn't the most efficient way to do this, but it's the easiest to implement. - net_forces_w = self.contact_physx_view.get_net_contact_forces(dt=self._sim_physics_dt) + net_forces_w = self.contact_view.get_net_contact_forces(dt=self._sim_physics_dt) self._data.net_forces_w[env_ids, :, :] = net_forces_w.view(-1, self._num_bodies, 3)[env_ids] # update contact force history if self.cfg.history_length > 0: @@ -264,9 +264,9 @@ def _update_buffers_impl(self, env_ids: Sequence[int]): # obtain the contact force matrix if len(self.cfg.filter_prim_paths_expr) != 0: # shape of the filtering matrix: (num_envs, num_bodies, num_filter_shapes, 3) - num_filters = self.contact_physx_view.filter_count + num_filters = self.contact_view.filter_count # acquire and shape the force matrix - force_matrix_w = self.contact_physx_view.get_contact_force_matrix(dt=self._sim_physics_dt) + force_matrix_w = self.contact_view.get_contact_force_matrix(dt=self._sim_physics_dt) force_matrix_w = force_matrix_w.view(-1, self._num_bodies, num_filters, 3) self._data.force_matrix_w[env_ids] = force_matrix_w[env_ids] if self.cfg.history_length > 0: @@ -282,7 +282,7 @@ def _update_buffers_impl(self, env_ids: Sequence[int]): # obtain contact points if self.cfg.track_contact_points: _, buffer_contact_points, _, _, buffer_count, buffer_start_indices = ( - self.contact_physx_view.get_contact_data(dt=self._sim_physics_dt) + self.contact_view.get_contact_data(dt=self._sim_physics_dt) ) self._data.contact_pos_w[env_ids] = self._unpack_contact_buffer_data( buffer_contact_points, buffer_count, buffer_start_indices @@ -290,7 +290,7 @@ def _update_buffers_impl(self, env_ids: Sequence[int]): # obtain friction forces if self.cfg.track_friction_forces: - friction_forces, _, buffer_count, buffer_start_indices = self.contact_physx_view.get_friction_data( + friction_forces, _, buffer_count, buffer_start_indices = self.contact_view.get_friction_data( dt=self._sim_physics_dt ) self._data.friction_forces_w[env_ids] = self._unpack_contact_buffer_data( @@ -341,7 +341,7 @@ def _unpack_contact_buffer_data( This function vectorizes the following nested loop: for i in range(self._num_bodies * self._num_envs): - for j in range(self.contact_physx_view.filter_count): + for j in range(self.contact_view.filter_count): start_index_ij = buffer_start_indices[i, j] count_ij = buffer_count[i, j] self._contact_position_aggregate_buffer[i, j, :] = torch.mean( @@ -376,7 +376,7 @@ def _unpack_contact_buffer_data( agg[counts == 0] = default return agg.view(self._num_envs * self.num_bodies, -1, 3).view( - self._num_envs, self._num_bodies, self.contact_physx_view.filter_count, 3 + self._num_envs, self._num_bodies, self.contact_view.filter_count, 3 ) def _set_debug_vis_impl(self, debug_vis: bool): @@ -420,4 +420,4 @@ def _invalidate_initialize_callback(self, event): super()._invalidate_initialize_callback(event) # set all existing views to None to invalidate them self._body_physx_view = None - self._contact_physx_view = None + self._contact_view = None diff --git a/source/isaaclab_physx/isaaclab_physx/sensors/contact_sensor/contact_sensor_data.py b/source/isaaclab_physx/isaaclab_physx/sensors/contact_sensor/contact_sensor_data.py index c46bfd7e183..caea42722b3 100644 --- a/source/isaaclab_physx/isaaclab_physx/sensors/contact_sensor/contact_sensor_data.py +++ b/source/isaaclab_physx/isaaclab_physx/sensors/contact_sensor/contact_sensor_data.py @@ -6,9 +6,12 @@ from __future__ import annotations import torch +import logging from isaaclab.sensors.contact_sensor import BaseContactSensorData +logger = logging.getLogger(__name__) + class ContactSensorData(BaseContactSensorData): """Data container for the PhysX contact reporting sensor.""" @@ -16,6 +19,10 @@ class ContactSensorData(BaseContactSensorData): @property def pose_w(self) -> torch.Tensor | None: """Pose of the sensor origin in world frame. Shape is (N, 7). Quaternion in wxyz order.""" + logger.warning( + "The `pose_w` property will be deprecated in a future release. Please use a dedicated sensor to measure" + "sensor poses in world frame." + ) return torch.cat([self._pos_w, self._quat_w], dim=-1) @property @@ -24,6 +31,10 @@ def pos_w(self) -> torch.Tensor | None: None if :attr:`ContactSensorCfg.track_pose` is False. """ + logger.warning( + "The `pos_w` property will be deprecated in a future release. Please use a dedicated sensor to measure" + "sensor positions in world frame." + ) return self._pos_w @property @@ -32,6 +43,10 @@ def quat_w(self) -> torch.Tensor | None: None if :attr:`ContactSensorCfg.track_pose` is False. """ + logger.warning( + "The `quat_w` property will be deprecated in a future release. Please use a dedicated sensor to measure" + "sensor orientations in world frame." + ) return self._quat_w @property diff --git a/source/isaaclab_physx/isaaclab_physx/sensors/frame_transformer/__init__.py b/source/isaaclab_physx/isaaclab_physx/sensors/frame_transformer/__init__.py index e2b43f5d8d3..02f209c6fd2 100644 --- a/source/isaaclab_physx/isaaclab_physx/sensors/frame_transformer/__init__.py +++ b/source/isaaclab_physx/isaaclab_physx/sensors/frame_transformer/__init__.py @@ -7,3 +7,5 @@ from .frame_transformer import FrameTransformer from .frame_transformer_data import FrameTransformerData + +__all__ = ["FrameTransformer", "FrameTransformerData"] \ No newline at end of file diff --git a/source/isaaclab_physx/isaaclab_physx/sensors/imu/__init__.py b/source/isaaclab_physx/isaaclab_physx/sensors/imu/__init__.py index 474e88f9e29..5a1157f1a72 100644 --- a/source/isaaclab_physx/isaaclab_physx/sensors/imu/__init__.py +++ b/source/isaaclab_physx/isaaclab_physx/sensors/imu/__init__.py @@ -7,3 +7,5 @@ from .imu import Imu from .imu_data import ImuData + +__all__ = ["Imu", "ImuData"] \ No newline at end of file diff --git a/source/isaaclab_physx/isaaclab_physx/sensors/imu/imu_data.py b/source/isaaclab_physx/isaaclab_physx/sensors/imu/imu_data.py index 18de813ac5f..ab44f21d2d2 100644 --- a/source/isaaclab_physx/isaaclab_physx/sensors/imu/imu_data.py +++ b/source/isaaclab_physx/isaaclab_physx/sensors/imu/imu_data.py @@ -9,18 +9,39 @@ from isaaclab.sensors.imu import BaseImuData +import logging + +logger = logging.getLogger(__name__) + class ImuData(BaseImuData): """Data container for the PhysX Imu sensor.""" + @property + def pose_w(self) -> torch.Tensor: + """Pose of the sensor origin in world frame. Shape is (N, 7). Quaternion in wxyz order.""" + logger.warning( + "The `pose_w` property will be deprecated in a future release. Please use a dedicated sensor to measure" + "sensor poses in world frame." + ) + return torch.cat((self._pos_w, self._quat_w), dim=-1) + @property def pos_w(self) -> torch.Tensor: """Position of the sensor origin in world frame. Shape is (N, 3).""" + logger.warning( + "The `pos_w` property will be deprecated in a future release. Please use a dedicated sensor to measure" + "sensor positions in world frame." + ) return self._pos_w @property def quat_w(self) -> torch.Tensor: """Orientation of the sensor origin in quaternion (w, x, y, z) in world frame. Shape is (N, 4).""" + logger.warning( + "The `quat_w` property will be deprecated in a future release. Please use a dedicated sensor to measure" + "sensor orientations in world frame." + ) return self._quat_w @property diff --git a/source/isaaclab_physx/test/sensors/test_contact_sensor.py b/source/isaaclab_physx/test/sensors/test_contact_sensor.py index ed376f97f2d..468047201ef 100644 --- a/source/isaaclab_physx/test/sensors/test_contact_sensor.py +++ b/source/isaaclab_physx/test/sensors/test_contact_sensor.py @@ -291,8 +291,8 @@ def test_cube_stack_contact_filtering(setup_simulation, device, num_envs): contact_sensor_2 = scene["contact_sensor_2"] # Check that contact processing is enabled - assert contact_sensor.contact_physx_view.filter_count == 1 - assert contact_sensor_2.contact_physx_view.filter_count == 1 + assert contact_sensor.contact_view.filter_count == 1 + assert contact_sensor_2.contact_view.filter_count == 1 # Play the simulation scene.reset() @@ -357,8 +357,8 @@ def test_no_contact_reporting(setup_simulation): contact_sensor_2: ContactSensor = scene["contact_sensor_2"] # Check buffers have the right size - assert contact_sensor.contact_physx_view.filter_count == 1 - assert contact_sensor_2.contact_physx_view.filter_count == 1 + assert contact_sensor.contact_view.filter_count == 1 + assert contact_sensor_2.contact_view.filter_count == 1 # Reset the contact sensors scene.reset() @@ -486,7 +486,7 @@ def test_friction_reporting(setup_simulation, grav_dir): _perform_sim_step(sim, scene, sim_dt) # check that forces are being reported match expected friction forces - expected_friction, _, _, _ = scene["contact_sensor"].contact_physx_view.get_friction_data(dt=sim_dt) + expected_friction, _, _, _ = scene["contact_sensor"].contact_view.get_friction_data(dt=sim_dt) reported_friction = scene["contact_sensor"].data.friction_forces_w[0, 0, :] torch.testing.assert_close(expected_friction.sum(dim=0), reported_friction[0], atol=1e-6, rtol=1e-5) @@ -752,11 +752,11 @@ def _test_friction_forces(shape: RigidObject, sensor: ContactSensor, mode: Conta # compare friction forces if mode == ContactTestMode.IN_CONTACT: assert torch.any(torch.abs(sensor._data.friction_forces_w) > 1e-5).item() - friction_forces, _, buffer_count, buffer_start_indices = sensor.contact_physx_view.get_friction_data( + friction_forces, _, buffer_count, buffer_start_indices = sensor.contact_view.get_friction_data( dt=sensor._sim_physics_dt ) for i in range(sensor.num_instances * num_bodies): - for j in range(sensor.contact_physx_view.filter_count): + for j in range(sensor.contact_view.filter_count): start_index_ij = buffer_start_indices[i, j] count_ij = buffer_count[i, j] force = torch.sum(friction_forces[start_index_ij : (start_index_ij + count_ij), :], dim=0) From 2b06fc4f8cad275c2014d8c614ac9a581dfb0b52 Mon Sep 17 00:00:00 2001 From: Antoine Richard Date: Thu, 29 Jan 2026 13:37:12 +0100 Subject: [PATCH 22/38] pre-commits --- source/isaaclab/isaaclab/sensors/contact_sensor/__init__.py | 2 +- .../isaaclab/sensors/contact_sensor/base_contact_sensor.py | 3 +-- .../isaaclab/isaaclab/sensors/frame_transformer/__init__.py | 2 +- .../sensors/frame_transformer/frame_transformer_data.py | 2 +- source/isaaclab/isaaclab/sensors/imu/imu_data.py | 2 +- .../isaaclab_physx/sensors/contact_sensor/__init__.py | 2 +- .../isaaclab_physx/sensors/contact_sensor/contact_sensor.py | 4 ++-- .../sensors/contact_sensor/contact_sensor_data.py | 3 ++- .../isaaclab_physx/sensors/frame_transformer/__init__.py | 2 +- source/isaaclab_physx/isaaclab_physx/sensors/imu/__init__.py | 2 +- source/isaaclab_physx/isaaclab_physx/sensors/imu/imu_data.py | 4 ++-- 11 files changed, 14 insertions(+), 14 deletions(-) diff --git a/source/isaaclab/isaaclab/sensors/contact_sensor/__init__.py b/source/isaaclab/isaaclab/sensors/contact_sensor/__init__.py index f6c2cd33c8e..5f2d3df68a0 100644 --- a/source/isaaclab/isaaclab/sensors/contact_sensor/__init__.py +++ b/source/isaaclab/isaaclab/sensors/contact_sensor/__init__.py @@ -11,4 +11,4 @@ from .contact_sensor_cfg import ContactSensorCfg from .contact_sensor_data import ContactSensorData -__all__ = ["BaseContactSensor", "BaseContactSensorData", "ContactSensor", "ContactSensorCfg", "ContactSensorData"] \ No newline at end of file +__all__ = ["BaseContactSensor", "BaseContactSensorData", "ContactSensor", "ContactSensorCfg", "ContactSensorData"] diff --git a/source/isaaclab/isaaclab/sensors/contact_sensor/base_contact_sensor.py b/source/isaaclab/isaaclab/sensors/contact_sensor/base_contact_sensor.py index a37339158d0..673eeb3b208 100644 --- a/source/isaaclab/isaaclab/sensors/contact_sensor/base_contact_sensor.py +++ b/source/isaaclab/isaaclab/sensors/contact_sensor/base_contact_sensor.py @@ -88,13 +88,12 @@ def body_names(self) -> list[str]: @abstractmethod def contact_view(self): """View for the contact reporting. - + .. note:: None if there is no view associated with the sensor. """ raise NotImplementedError - """ Operations """ diff --git a/source/isaaclab/isaaclab/sensors/frame_transformer/__init__.py b/source/isaaclab/isaaclab/sensors/frame_transformer/__init__.py index cb58a0c75e7..664a91e4eb0 100644 --- a/source/isaaclab/isaaclab/sensors/frame_transformer/__init__.py +++ b/source/isaaclab/isaaclab/sensors/frame_transformer/__init__.py @@ -11,4 +11,4 @@ from .frame_transformer_cfg import FrameTransformerCfg, OffsetCfg from .frame_transformer_data import FrameTransformerData -__all__ = ["BaseFrameTransformer", "BaseFrameTransformerData", "FrameTransformer", "FrameTransformerCfg", "FrameTransformerData", "OffsetCfg"] \ No newline at end of file +__all__ = ["BaseFrameTransformer", "BaseFrameTransformerData", "FrameTransformer", "FrameTransformerCfg", "FrameTransformerData", "OffsetCfg"] diff --git a/source/isaaclab/isaaclab/sensors/frame_transformer/frame_transformer_data.py b/source/isaaclab/isaaclab/sensors/frame_transformer/frame_transformer_data.py index acae3915c89..8f202e15f40 100644 --- a/source/isaaclab/isaaclab/sensors/frame_transformer/frame_transformer_data.py +++ b/source/isaaclab/isaaclab/sensors/frame_transformer/frame_transformer_data.py @@ -22,4 +22,4 @@ class FrameTransformerData(FactoryBase, BaseFrameTransformerData): def __new__(cls, *args, **kwargs) -> BaseFrameTransformerData | PhysXFrameTransformerData: """Create a new instance of a frame transformer data based on the backend.""" - return super().__new__(cls, *args, **kwargs) \ No newline at end of file + return super().__new__(cls, *args, **kwargs) diff --git a/source/isaaclab/isaaclab/sensors/imu/imu_data.py b/source/isaaclab/isaaclab/sensors/imu/imu_data.py index 81dfbcb1a79..6f6ed268ad5 100644 --- a/source/isaaclab/isaaclab/sensors/imu/imu_data.py +++ b/source/isaaclab/isaaclab/sensors/imu/imu_data.py @@ -22,4 +22,4 @@ class ImuData(FactoryBase, BaseImuData): def __new__(cls, *args, **kwargs) -> BaseImuData | PhysXImuData: """Create a new instance of an IMU data based on the backend.""" - return super().__new__(cls, *args, **kwargs) \ No newline at end of file + return super().__new__(cls, *args, **kwargs) diff --git a/source/isaaclab_physx/isaaclab_physx/sensors/contact_sensor/__init__.py b/source/isaaclab_physx/isaaclab_physx/sensors/contact_sensor/__init__.py index 1f1a0a03427..e2f5f2b7772 100644 --- a/source/isaaclab_physx/isaaclab_physx/sensors/contact_sensor/__init__.py +++ b/source/isaaclab_physx/isaaclab_physx/sensors/contact_sensor/__init__.py @@ -8,4 +8,4 @@ from .contact_sensor import ContactSensor from .contact_sensor_data import ContactSensorData -__all__ = ["ContactSensor", "ContactSensorData"] \ No newline at end of file +__all__ = ["ContactSensor", "ContactSensorData"] diff --git a/source/isaaclab_physx/isaaclab_physx/sensors/contact_sensor/contact_sensor.py b/source/isaaclab_physx/isaaclab_physx/sensors/contact_sensor/contact_sensor.py index 2c9bf94528f..a7776562047 100644 --- a/source/isaaclab_physx/isaaclab_physx/sensors/contact_sensor/contact_sensor.py +++ b/source/isaaclab_physx/isaaclab_physx/sensors/contact_sensor/contact_sensor.py @@ -281,8 +281,8 @@ def _update_buffers_impl(self, env_ids: Sequence[int]): # obtain contact points if self.cfg.track_contact_points: - _, buffer_contact_points, _, _, buffer_count, buffer_start_indices = ( - self.contact_view.get_contact_data(dt=self._sim_physics_dt) + _, buffer_contact_points, _, _, buffer_count, buffer_start_indices = self.contact_view.get_contact_data( + dt=self._sim_physics_dt ) self._data.contact_pos_w[env_ids] = self._unpack_contact_buffer_data( buffer_contact_points, buffer_count, buffer_start_indices diff --git a/source/isaaclab_physx/isaaclab_physx/sensors/contact_sensor/contact_sensor_data.py b/source/isaaclab_physx/isaaclab_physx/sensors/contact_sensor/contact_sensor_data.py index caea42722b3..ea2cf41ea51 100644 --- a/source/isaaclab_physx/isaaclab_physx/sensors/contact_sensor/contact_sensor_data.py +++ b/source/isaaclab_physx/isaaclab_physx/sensors/contact_sensor/contact_sensor_data.py @@ -5,9 +5,10 @@ from __future__ import annotations -import torch import logging +import torch + from isaaclab.sensors.contact_sensor import BaseContactSensorData logger = logging.getLogger(__name__) diff --git a/source/isaaclab_physx/isaaclab_physx/sensors/frame_transformer/__init__.py b/source/isaaclab_physx/isaaclab_physx/sensors/frame_transformer/__init__.py index 02f209c6fd2..dde1954a4ad 100644 --- a/source/isaaclab_physx/isaaclab_physx/sensors/frame_transformer/__init__.py +++ b/source/isaaclab_physx/isaaclab_physx/sensors/frame_transformer/__init__.py @@ -8,4 +8,4 @@ from .frame_transformer import FrameTransformer from .frame_transformer_data import FrameTransformerData -__all__ = ["FrameTransformer", "FrameTransformerData"] \ No newline at end of file +__all__ = ["FrameTransformer", "FrameTransformerData"] diff --git a/source/isaaclab_physx/isaaclab_physx/sensors/imu/__init__.py b/source/isaaclab_physx/isaaclab_physx/sensors/imu/__init__.py index 5a1157f1a72..d1c50358796 100644 --- a/source/isaaclab_physx/isaaclab_physx/sensors/imu/__init__.py +++ b/source/isaaclab_physx/isaaclab_physx/sensors/imu/__init__.py @@ -8,4 +8,4 @@ from .imu import Imu from .imu_data import ImuData -__all__ = ["Imu", "ImuData"] \ No newline at end of file +__all__ = ["Imu", "ImuData"] diff --git a/source/isaaclab_physx/isaaclab_physx/sensors/imu/imu_data.py b/source/isaaclab_physx/isaaclab_physx/sensors/imu/imu_data.py index ab44f21d2d2..acd4f25a6a3 100644 --- a/source/isaaclab_physx/isaaclab_physx/sensors/imu/imu_data.py +++ b/source/isaaclab_physx/isaaclab_physx/sensors/imu/imu_data.py @@ -5,12 +5,12 @@ from __future__ import annotations +import logging + import torch from isaaclab.sensors.imu import BaseImuData -import logging - logger = logging.getLogger(__name__) From 51f2d060b346faac273175c418fca59353268dfd Mon Sep 17 00:00:00 2001 From: Antoine Richard Date: Thu, 29 Jan 2026 15:37:46 +0100 Subject: [PATCH 23/38] Initial push for mock interfaces to allow for benchmarking before migrating to warp. --- .../isaaclab/test/mock_interfaces/__init__.py | 35 + .../test/mock_interfaces/assets/__init__.py | 17 + .../test/mock_interfaces/assets/factories.py | 184 ++ .../assets/mock_articulation.py | 1616 +++++++++++++++++ .../assets/mock_rigid_object.py | 760 ++++++++ .../assets/mock_rigid_object_collection.py | 578 ++++++ .../test/mock_interfaces/sensors/__init__.py | 16 + .../test/mock_interfaces/sensors/factories.py | 123 ++ .../sensors/mock_contact_sensor.py | 430 +++++ .../sensors/mock_frame_transformer.py | 291 +++ .../test/mock_interfaces/sensors/mock_imu.py | 228 +++ .../test/mock_interfaces/utils/__init__.py | 9 + .../mock_interfaces/utils/mock_generator.py | 351 ++++ .../test/mock_interfaces/utils/patching.py | 263 +++ .../test_mock_interfaces/test_mock_assets.py | 336 ++++ .../test_mock_data_properties.py | 842 +++++++++ .../test_mock_interfaces/test_mock_sensors.py | 309 ++++ .../isaaclab_physx/test/__init__.py | 6 + .../test/mock_interfaces/__init__.py | 32 + .../test/mock_interfaces/factories.py | 240 +++ .../test/mock_interfaces/utils/__init__.py | 26 + .../utils/mock_shared_metatype.py | 64 + .../test/mock_interfaces/utils/patching.py | 284 +++ .../test/mock_interfaces/views/__init__.py | 12 + .../views/mock_articulation_view.py | 794 ++++++++ .../views/mock_rigid_body_view.py | 307 ++++ .../views/mock_rigid_contact_view.py | 284 +++ .../test/test_mock_interfaces/__init__.py | 6 + .../test/test_mock_interfaces/conftest.py | 62 + .../test_mock_articulation_view.py | 335 ++++ .../test_mock_rigid_body_view.py | 220 +++ .../test_mock_rigid_contact_view.py | 201 ++ .../test_mock_interfaces/test_patching.py | 189 ++ 33 files changed, 9450 insertions(+) create mode 100644 source/isaaclab/isaaclab/test/mock_interfaces/__init__.py create mode 100644 source/isaaclab/isaaclab/test/mock_interfaces/assets/__init__.py create mode 100644 source/isaaclab/isaaclab/test/mock_interfaces/assets/factories.py create mode 100644 source/isaaclab/isaaclab/test/mock_interfaces/assets/mock_articulation.py create mode 100644 source/isaaclab/isaaclab/test/mock_interfaces/assets/mock_rigid_object.py create mode 100644 source/isaaclab/isaaclab/test/mock_interfaces/assets/mock_rigid_object_collection.py create mode 100644 source/isaaclab/isaaclab/test/mock_interfaces/sensors/__init__.py create mode 100644 source/isaaclab/isaaclab/test/mock_interfaces/sensors/factories.py create mode 100644 source/isaaclab/isaaclab/test/mock_interfaces/sensors/mock_contact_sensor.py create mode 100644 source/isaaclab/isaaclab/test/mock_interfaces/sensors/mock_frame_transformer.py create mode 100644 source/isaaclab/isaaclab/test/mock_interfaces/sensors/mock_imu.py create mode 100644 source/isaaclab/isaaclab/test/mock_interfaces/utils/__init__.py create mode 100644 source/isaaclab/isaaclab/test/mock_interfaces/utils/mock_generator.py create mode 100644 source/isaaclab/isaaclab/test/mock_interfaces/utils/patching.py create mode 100644 source/isaaclab/test/test_mock_interfaces/test_mock_assets.py create mode 100644 source/isaaclab/test/test_mock_interfaces/test_mock_data_properties.py create mode 100644 source/isaaclab/test/test_mock_interfaces/test_mock_sensors.py create mode 100644 source/isaaclab_physx/isaaclab_physx/test/__init__.py create mode 100644 source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/__init__.py create mode 100644 source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/factories.py create mode 100644 source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/utils/__init__.py create mode 100644 source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/utils/mock_shared_metatype.py create mode 100644 source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/utils/patching.py create mode 100644 source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/views/__init__.py create mode 100644 source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/views/mock_articulation_view.py create mode 100644 source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/views/mock_rigid_body_view.py create mode 100644 source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/views/mock_rigid_contact_view.py create mode 100644 source/isaaclab_physx/test/test_mock_interfaces/__init__.py create mode 100644 source/isaaclab_physx/test/test_mock_interfaces/conftest.py create mode 100644 source/isaaclab_physx/test/test_mock_interfaces/test_mock_articulation_view.py create mode 100644 source/isaaclab_physx/test/test_mock_interfaces/test_mock_rigid_body_view.py create mode 100644 source/isaaclab_physx/test/test_mock_interfaces/test_mock_rigid_contact_view.py create mode 100644 source/isaaclab_physx/test/test_mock_interfaces/test_patching.py diff --git a/source/isaaclab/isaaclab/test/mock_interfaces/__init__.py b/source/isaaclab/isaaclab/test/mock_interfaces/__init__.py new file mode 100644 index 00000000000..91a45c39692 --- /dev/null +++ b/source/isaaclab/isaaclab/test/mock_interfaces/__init__.py @@ -0,0 +1,35 @@ +# Copyright (c) 2022-2025, The Isaac Lab Project Developers. +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Mock interfaces for IsaacLab sensors and assets. + +This module provides mock implementations of sensor and asset classes for unit testing +without requiring the full Isaac Sim simulation environment. + +Example usage: + + .. code-block:: python + + from isaaclab.test.mock_interfaces.sensors import MockContactSensor + from isaaclab.test.mock_interfaces.assets import MockArticulation + + # Create mock sensor + sensor = MockContactSensor(num_instances=4, num_bodies=4, device="cpu") + sensor.data.set_mock_data(net_forces_w=torch.randn(4, 4, 3)) + + # Create mock articulation + robot = MockArticulation( + num_instances=4, + num_joints=12, + num_bodies=13, + joint_names=["joint_" + str(i) for i in range(12)], + device="cpu" + ) + +""" + +from .assets import * +from .sensors import * +from .utils import * diff --git a/source/isaaclab/isaaclab/test/mock_interfaces/assets/__init__.py b/source/isaaclab/isaaclab/test/mock_interfaces/assets/__init__.py new file mode 100644 index 00000000000..42a1b2f7705 --- /dev/null +++ b/source/isaaclab/isaaclab/test/mock_interfaces/assets/__init__.py @@ -0,0 +1,17 @@ +# Copyright (c) 2022-2025, The Isaac Lab Project Developers. +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Mock asset interfaces for testing without Isaac Sim.""" + +from .mock_articulation import MockArticulation, MockArticulationData +from .mock_rigid_object import MockRigidObject, MockRigidObjectData +from .mock_rigid_object_collection import MockRigidObjectCollection, MockRigidObjectCollectionData +from .factories import ( + create_mock_articulation, + create_mock_humanoid, + create_mock_quadruped, + create_mock_rigid_object, + create_mock_rigid_object_collection, +) diff --git a/source/isaaclab/isaaclab/test/mock_interfaces/assets/factories.py b/source/isaaclab/isaaclab/test/mock_interfaces/assets/factories.py new file mode 100644 index 00000000000..a04cf7b699f --- /dev/null +++ b/source/isaaclab/isaaclab/test/mock_interfaces/assets/factories.py @@ -0,0 +1,184 @@ +# Copyright (c) 2022-2025, The Isaac Lab Project Developers. +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Factory functions for creating pre-configured mock assets.""" + +from __future__ import annotations + +import torch + +from .mock_articulation import MockArticulation +from .mock_rigid_object import MockRigidObject +from .mock_rigid_object_collection import MockRigidObjectCollection + + +def create_mock_articulation( + num_instances: int = 1, + num_joints: int = 1, + num_bodies: int = 2, + joint_names: list[str] | None = None, + body_names: list[str] | None = None, + is_fixed_base: bool = False, + device: str = "cpu", +) -> MockArticulation: + """Create a mock articulation with default configuration. + + Args: + num_instances: Number of articulation instances. + num_joints: Number of joints. + num_bodies: Number of bodies. + joint_names: Names of joints. + body_names: Names of bodies. + is_fixed_base: Whether the articulation has a fixed base. + device: Device for tensor allocation. + + Returns: + Configured MockArticulation instance. + """ + return MockArticulation( + num_instances=num_instances, + num_joints=num_joints, + num_bodies=num_bodies, + joint_names=joint_names, + body_names=body_names, + is_fixed_base=is_fixed_base, + device=device, + ) + + +def create_mock_quadruped( + num_instances: int = 1, + device: str = "cpu", +) -> MockArticulation: + """Create a mock quadruped robot articulation. + + Creates a quadruped with 12 joints (3 per leg) and 13 bodies. + + Args: + num_instances: Number of robot instances. + device: Device for tensor allocation. + + Returns: + Configured MockArticulation instance for a quadruped. + """ + leg_names = ["FL", "FR", "RL", "RR"] + joint_suffixes = ["hip", "thigh", "calf"] + + joint_names = [f"{leg}_{suffix}" for leg in leg_names for suffix in joint_suffixes] + body_names = ["base"] + [f"{leg}_{part}" for leg in leg_names for part in ["hip", "thigh", "calf"]] + + robot = MockArticulation( + num_instances=num_instances, + num_joints=12, + num_bodies=13, + joint_names=joint_names, + body_names=body_names, + is_fixed_base=False, + device=device, + ) + + # Set reasonable default joint limits for a quadruped + joint_pos_limits = torch.zeros(num_instances, 12, 2, device=device) + joint_pos_limits[..., 0] = -1.57 # Lower limit + joint_pos_limits[..., 1] = 1.57 # Upper limit + robot.data.set_joint_pos_limits(joint_pos_limits) + + return robot + + +def create_mock_humanoid( + num_instances: int = 1, + device: str = "cpu", +) -> MockArticulation: + """Create a mock humanoid robot articulation. + + Creates a humanoid with 21 joints and 22 bodies. + + Args: + num_instances: Number of robot instances. + device: Device for tensor allocation. + + Returns: + Configured MockArticulation instance for a humanoid. + """ + # Simplified humanoid joint structure + joint_names = [ + # Torso + "torso_yaw", "torso_pitch", "torso_roll", + # Left arm + "L_shoulder_pitch", "L_shoulder_roll", "L_shoulder_yaw", "L_elbow", + # Right arm + "R_shoulder_pitch", "R_shoulder_roll", "R_shoulder_yaw", "R_elbow", + # Left leg + "L_hip_yaw", "L_hip_roll", "L_hip_pitch", "L_knee", "L_ankle_pitch", + # Right leg + "R_hip_yaw", "R_hip_roll", "R_hip_pitch", "R_knee", "R_ankle_pitch", + ] + + body_names = [ + "pelvis", "torso", + "L_upper_arm", "L_lower_arm", "L_hand", + "R_upper_arm", "R_lower_arm", "R_hand", + "L_thigh", "L_shin", "L_foot", + "R_thigh", "R_shin", "R_foot", + "head", + ] + + return MockArticulation( + num_instances=num_instances, + num_joints=21, + num_bodies=len(body_names), + joint_names=joint_names, + body_names=body_names, + is_fixed_base=False, + device=device, + ) + + +def create_mock_rigid_object( + num_instances: int = 1, + body_names: list[str] | None = None, + device: str = "cpu", +) -> MockRigidObject: + """Create a mock rigid object with default configuration. + + Args: + num_instances: Number of rigid object instances. + body_names: Names of bodies. + device: Device for tensor allocation. + + Returns: + Configured MockRigidObject instance. + """ + return MockRigidObject( + num_instances=num_instances, + body_names=body_names, + device=device, + ) + + +def create_mock_rigid_object_collection( + num_instances: int = 1, + num_bodies: int = 1, + body_names: list[str] | None = None, + device: str = "cpu", +) -> MockRigidObjectCollection: + """Create a mock rigid object collection with default configuration. + + Args: + num_instances: Number of environment instances. + num_bodies: Number of bodies in the collection. + body_names: Names of bodies. + device: Device for tensor allocation. + + Returns: + Configured MockRigidObjectCollection instance. + """ + return MockRigidObjectCollection( + num_instances=num_instances, + num_bodies=num_bodies, + body_names=body_names, + device=device, + ) diff --git a/source/isaaclab/isaaclab/test/mock_interfaces/assets/mock_articulation.py b/source/isaaclab/isaaclab/test/mock_interfaces/assets/mock_articulation.py new file mode 100644 index 00000000000..bb45770019c --- /dev/null +++ b/source/isaaclab/isaaclab/test/mock_interfaces/assets/mock_articulation.py @@ -0,0 +1,1616 @@ +# Copyright (c) 2022-2025, The Isaac Lab Project Developers. +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Mock articulation asset for testing without Isaac Sim.""" + +from __future__ import annotations + +import re +import torch +from typing import Sequence + + +class MockArticulationData: + """Mock data container for articulation asset. + + This class mimics the interface of BaseArticulationData for testing purposes. + All tensor properties return zero tensors with correct shapes if not explicitly set. + """ + + def __init__( + self, + num_instances: int, + num_joints: int, + num_bodies: int, + joint_names: list[str] | None = None, + body_names: list[str] | None = None, + num_fixed_tendons: int = 0, + num_spatial_tendons: int = 0, + fixed_tendon_names: list[str] | None = None, + spatial_tendon_names: list[str] | None = None, + device: str = "cpu", + ): + """Initialize mock articulation data. + + Args: + num_instances: Number of articulation instances. + num_joints: Number of joints. + num_bodies: Number of bodies. + joint_names: Names of joints. + body_names: Names of bodies. + num_fixed_tendons: Number of fixed tendons. + num_spatial_tendons: Number of spatial tendons. + fixed_tendon_names: Names of fixed tendons. + spatial_tendon_names: Names of spatial tendons. + device: Device for tensor allocation. + """ + self._num_instances = num_instances + self._num_joints = num_joints + self._num_bodies = num_bodies + self._num_fixed_tendons = num_fixed_tendons + self._num_spatial_tendons = num_spatial_tendons + self._device = device + + # Names + self.joint_names = joint_names or [f"joint_{i}" for i in range(num_joints)] + self.body_names = body_names or [f"body_{i}" for i in range(num_bodies)] + self.fixed_tendon_names = fixed_tendon_names or [f"fixed_tendon_{i}" for i in range(num_fixed_tendons)] + self.spatial_tendon_names = spatial_tendon_names or [f"spatial_tendon_{i}" for i in range(num_spatial_tendons)] + + # -- Internal storage for mock data -- + # Default states + self._default_root_pose: torch.Tensor | None = None + self._default_root_vel: torch.Tensor | None = None + self._default_joint_pos: torch.Tensor | None = None + self._default_joint_vel: torch.Tensor | None = None + + # Joint commands + self._joint_pos_target: torch.Tensor | None = None + self._joint_vel_target: torch.Tensor | None = None + self._joint_effort_target: torch.Tensor | None = None + self._computed_torque: torch.Tensor | None = None + self._applied_torque: torch.Tensor | None = None + + # Joint properties + self._joint_stiffness: torch.Tensor | None = None + self._joint_damping: torch.Tensor | None = None + self._joint_armature: torch.Tensor | None = None + self._joint_friction_coeff: torch.Tensor | None = None + self._joint_dynamic_friction_coeff: torch.Tensor | None = None + self._joint_viscous_friction_coeff: torch.Tensor | None = None + self._joint_pos_limits: torch.Tensor | None = None + self._joint_vel_limits: torch.Tensor | None = None + self._joint_effort_limits: torch.Tensor | None = None + self._soft_joint_pos_limits: torch.Tensor | None = None + self._soft_joint_vel_limits: torch.Tensor | None = None + self._gear_ratio: torch.Tensor | None = None + + # Joint state + self._joint_pos: torch.Tensor | None = None + self._joint_vel: torch.Tensor | None = None + self._joint_acc: torch.Tensor | None = None + + # Root state (link frame) + self._root_link_pose_w: torch.Tensor | None = None + self._root_link_vel_w: torch.Tensor | None = None + + # Root state (CoM frame) + self._root_com_pose_w: torch.Tensor | None = None + self._root_com_vel_w: torch.Tensor | None = None + + # Body state (link frame) + self._body_link_pose_w: torch.Tensor | None = None + self._body_link_vel_w: torch.Tensor | None = None + + # Body state (CoM frame) + self._body_com_pose_w: torch.Tensor | None = None + self._body_com_vel_w: torch.Tensor | None = None + self._body_com_acc_w: torch.Tensor | None = None + self._body_com_pose_b: torch.Tensor | None = None + + # Body properties + self._body_mass: torch.Tensor | None = None + self._body_inertia: torch.Tensor | None = None + self._body_incoming_joint_wrench_b: torch.Tensor | None = None + + # Tendon properties (fixed) + self._fixed_tendon_stiffness: torch.Tensor | None = None + self._fixed_tendon_damping: torch.Tensor | None = None + self._fixed_tendon_limit_stiffness: torch.Tensor | None = None + self._fixed_tendon_rest_length: torch.Tensor | None = None + self._fixed_tendon_offset: torch.Tensor | None = None + self._fixed_tendon_pos_limits: torch.Tensor | None = None + + # Tendon properties (spatial) + self._spatial_tendon_stiffness: torch.Tensor | None = None + self._spatial_tendon_damping: torch.Tensor | None = None + self._spatial_tendon_limit_stiffness: torch.Tensor | None = None + self._spatial_tendon_offset: torch.Tensor | None = None + + @property + def device(self) -> str: + """Device for tensor allocation.""" + return self._device + + # -- Helper for identity quaternion -- + def _identity_quat(self, *shape: int) -> torch.Tensor: + """Create identity quaternion tensor (w, x, y, z) = (1, 0, 0, 0).""" + quat = torch.zeros(*shape, 4, device=self._device) + quat[..., 0] = 1.0 + return quat + + # -- Default state properties -- + + @property + def default_root_pose(self) -> torch.Tensor: + """Default root pose [pos(3), quat_xyzw(4)]. Shape: (N, 7).""" + if self._default_root_pose is None: + pose = torch.zeros(self._num_instances, 7, device=self._device) + pose[:, 3:7] = self._identity_quat(self._num_instances) + return pose + return self._default_root_pose + + @property + def default_root_vel(self) -> torch.Tensor: + """Default root velocity [lin_vel(3), ang_vel(3)]. Shape: (N, 6).""" + if self._default_root_vel is None: + return torch.zeros(self._num_instances, 6, device=self._device) + return self._default_root_vel + + @property + def default_root_state(self) -> torch.Tensor: + """Default root state [pose(7), vel(6)]. Shape: (N, 13).""" + return torch.cat([self.default_root_pose, self.default_root_vel], dim=-1) + + @property + def default_joint_pos(self) -> torch.Tensor: + """Default joint positions. Shape: (N, num_joints).""" + if self._default_joint_pos is None: + return torch.zeros(self._num_instances, self._num_joints, device=self._device) + return self._default_joint_pos + + @property + def default_joint_vel(self) -> torch.Tensor: + """Default joint velocities. Shape: (N, num_joints).""" + if self._default_joint_vel is None: + return torch.zeros(self._num_instances, self._num_joints, device=self._device) + return self._default_joint_vel + + # -- Joint command properties -- + + @property + def joint_pos_target(self) -> torch.Tensor: + """Joint position targets. Shape: (N, num_joints).""" + if self._joint_pos_target is None: + return torch.zeros(self._num_instances, self._num_joints, device=self._device) + return self._joint_pos_target + + @property + def joint_vel_target(self) -> torch.Tensor: + """Joint velocity targets. Shape: (N, num_joints).""" + if self._joint_vel_target is None: + return torch.zeros(self._num_instances, self._num_joints, device=self._device) + return self._joint_vel_target + + @property + def joint_effort_target(self) -> torch.Tensor: + """Joint effort targets. Shape: (N, num_joints).""" + if self._joint_effort_target is None: + return torch.zeros(self._num_instances, self._num_joints, device=self._device) + return self._joint_effort_target + + @property + def computed_torque(self) -> torch.Tensor: + """Computed torques before clipping. Shape: (N, num_joints).""" + if self._computed_torque is None: + return torch.zeros(self._num_instances, self._num_joints, device=self._device) + return self._computed_torque + + @property + def applied_torque(self) -> torch.Tensor: + """Applied torques after clipping. Shape: (N, num_joints).""" + if self._applied_torque is None: + return torch.zeros(self._num_instances, self._num_joints, device=self._device) + return self._applied_torque + + # -- Joint properties -- + + @property + def joint_stiffness(self) -> torch.Tensor: + """Joint stiffness. Shape: (N, num_joints).""" + if self._joint_stiffness is None: + return torch.zeros(self._num_instances, self._num_joints, device=self._device) + return self._joint_stiffness + + @property + def joint_damping(self) -> torch.Tensor: + """Joint damping. Shape: (N, num_joints).""" + if self._joint_damping is None: + return torch.zeros(self._num_instances, self._num_joints, device=self._device) + return self._joint_damping + + @property + def joint_armature(self) -> torch.Tensor: + """Joint armature. Shape: (N, num_joints).""" + if self._joint_armature is None: + return torch.zeros(self._num_instances, self._num_joints, device=self._device) + return self._joint_armature + + @property + def joint_friction_coeff(self) -> torch.Tensor: + """Joint static friction coefficient. Shape: (N, num_joints).""" + if self._joint_friction_coeff is None: + return torch.zeros(self._num_instances, self._num_joints, device=self._device) + return self._joint_friction_coeff + + @property + def joint_dynamic_friction_coeff(self) -> torch.Tensor: + """Joint dynamic friction coefficient. Shape: (N, num_joints).""" + if self._joint_dynamic_friction_coeff is None: + return torch.zeros(self._num_instances, self._num_joints, device=self._device) + return self._joint_dynamic_friction_coeff + + @property + def joint_viscous_friction_coeff(self) -> torch.Tensor: + """Joint viscous friction coefficient. Shape: (N, num_joints).""" + if self._joint_viscous_friction_coeff is None: + return torch.zeros(self._num_instances, self._num_joints, device=self._device) + return self._joint_viscous_friction_coeff + + @property + def joint_pos_limits(self) -> torch.Tensor: + """Joint position limits [lower, upper]. Shape: (N, num_joints, 2).""" + if self._joint_pos_limits is None: + limits = torch.zeros(self._num_instances, self._num_joints, 2, device=self._device) + limits[..., 0] = -float("inf") + limits[..., 1] = float("inf") + return limits + return self._joint_pos_limits + + @property + def joint_vel_limits(self) -> torch.Tensor: + """Joint velocity limits. Shape: (N, num_joints).""" + if self._joint_vel_limits is None: + return torch.full((self._num_instances, self._num_joints), float("inf"), device=self._device) + return self._joint_vel_limits + + @property + def joint_effort_limits(self) -> torch.Tensor: + """Joint effort limits. Shape: (N, num_joints).""" + if self._joint_effort_limits is None: + return torch.full((self._num_instances, self._num_joints), float("inf"), device=self._device) + return self._joint_effort_limits + + @property + def soft_joint_pos_limits(self) -> torch.Tensor: + """Soft joint position limits. Shape: (N, num_joints, 2).""" + if self._soft_joint_pos_limits is None: + return self.joint_pos_limits.clone() + return self._soft_joint_pos_limits + + @property + def soft_joint_vel_limits(self) -> torch.Tensor: + """Soft joint velocity limits. Shape: (N, num_joints).""" + if self._soft_joint_vel_limits is None: + return self.joint_vel_limits.clone() + return self._soft_joint_vel_limits + + @property + def gear_ratio(self) -> torch.Tensor: + """Gear ratio. Shape: (N, num_joints).""" + if self._gear_ratio is None: + return torch.ones(self._num_instances, self._num_joints, device=self._device) + return self._gear_ratio + + # -- Joint state properties -- + + @property + def joint_pos(self) -> torch.Tensor: + """Joint positions. Shape: (N, num_joints).""" + if self._joint_pos is None: + return torch.zeros(self._num_instances, self._num_joints, device=self._device) + return self._joint_pos + + @property + def joint_vel(self) -> torch.Tensor: + """Joint velocities. Shape: (N, num_joints).""" + if self._joint_vel is None: + return torch.zeros(self._num_instances, self._num_joints, device=self._device) + return self._joint_vel + + @property + def joint_acc(self) -> torch.Tensor: + """Joint accelerations. Shape: (N, num_joints).""" + if self._joint_acc is None: + return torch.zeros(self._num_instances, self._num_joints, device=self._device) + return self._joint_acc + + # -- Root state properties (link frame) -- + + @property + def root_link_pose_w(self) -> torch.Tensor: + """Root link pose in world frame. Shape: (N, 7).""" + if self._root_link_pose_w is None: + pose = torch.zeros(self._num_instances, 7, device=self._device) + pose[:, 3:7] = self._identity_quat(self._num_instances) + return pose + return self._root_link_pose_w + + @property + def root_link_vel_w(self) -> torch.Tensor: + """Root link velocity in world frame. Shape: (N, 6).""" + if self._root_link_vel_w is None: + return torch.zeros(self._num_instances, 6, device=self._device) + return self._root_link_vel_w + + @property + def root_link_state_w(self) -> torch.Tensor: + """Root link state in world frame. Shape: (N, 13).""" + return torch.cat([self.root_link_pose_w, self.root_link_vel_w], dim=-1) + + # Sliced properties + @property + def root_link_pos_w(self) -> torch.Tensor: + """Root link position in world frame. Shape: (N, 3).""" + return self.root_link_pose_w[:, :3] + + @property + def root_link_quat_w(self) -> torch.Tensor: + """Root link orientation in world frame. Shape: (N, 4).""" + return self.root_link_pose_w[:, 3:7] + + @property + def root_link_lin_vel_w(self) -> torch.Tensor: + """Root link linear velocity in world frame. Shape: (N, 3).""" + return self.root_link_vel_w[:, :3] + + @property + def root_link_ang_vel_w(self) -> torch.Tensor: + """Root link angular velocity in world frame. Shape: (N, 3).""" + return self.root_link_vel_w[:, 3:6] + + # -- Root state properties (CoM frame) -- + + @property + def root_com_pose_w(self) -> torch.Tensor: + """Root CoM pose in world frame. Shape: (N, 7).""" + if self._root_com_pose_w is None: + return self.root_link_pose_w.clone() + return self._root_com_pose_w + + @property + def root_com_vel_w(self) -> torch.Tensor: + """Root CoM velocity in world frame. Shape: (N, 6).""" + if self._root_com_vel_w is None: + return self.root_link_vel_w.clone() + return self._root_com_vel_w + + @property + def root_com_state_w(self) -> torch.Tensor: + """Root CoM state in world frame. Shape: (N, 13).""" + return torch.cat([self.root_com_pose_w, self.root_com_vel_w], dim=-1) + + @property + def root_state_w(self) -> torch.Tensor: + """Root state (link pose + CoM velocity). Shape: (N, 13).""" + return torch.cat([self.root_link_pose_w, self.root_com_vel_w], dim=-1) + + # Sliced properties + @property + def root_com_pos_w(self) -> torch.Tensor: + """Root CoM position in world frame. Shape: (N, 3).""" + return self.root_com_pose_w[:, :3] + + @property + def root_com_quat_w(self) -> torch.Tensor: + """Root CoM orientation in world frame. Shape: (N, 4).""" + return self.root_com_pose_w[:, 3:7] + + @property + def root_com_lin_vel_w(self) -> torch.Tensor: + """Root CoM linear velocity in world frame. Shape: (N, 3).""" + return self.root_com_vel_w[:, :3] + + @property + def root_com_ang_vel_w(self) -> torch.Tensor: + """Root CoM angular velocity in world frame. Shape: (N, 3).""" + return self.root_com_vel_w[:, 3:6] + + # -- Body state properties (link frame) -- + + @property + def body_link_pose_w(self) -> torch.Tensor: + """Body link poses in world frame. Shape: (N, num_bodies, 7).""" + if self._body_link_pose_w is None: + pose = torch.zeros(self._num_instances, self._num_bodies, 7, device=self._device) + pose[..., 3:7] = self._identity_quat(self._num_instances, self._num_bodies) + return pose + return self._body_link_pose_w + + @property + def body_link_vel_w(self) -> torch.Tensor: + """Body link velocities in world frame. Shape: (N, num_bodies, 6).""" + if self._body_link_vel_w is None: + return torch.zeros(self._num_instances, self._num_bodies, 6, device=self._device) + return self._body_link_vel_w + + @property + def body_link_state_w(self) -> torch.Tensor: + """Body link states in world frame. Shape: (N, num_bodies, 13).""" + return torch.cat([self.body_link_pose_w, self.body_link_vel_w], dim=-1) + + # Sliced properties + @property + def body_link_pos_w(self) -> torch.Tensor: + """Body link positions in world frame. Shape: (N, num_bodies, 3).""" + return self.body_link_pose_w[..., :3] + + @property + def body_link_quat_w(self) -> torch.Tensor: + """Body link orientations in world frame. Shape: (N, num_bodies, 4).""" + return self.body_link_pose_w[..., 3:7] + + @property + def body_link_lin_vel_w(self) -> torch.Tensor: + """Body link linear velocities in world frame. Shape: (N, num_bodies, 3).""" + return self.body_link_vel_w[..., :3] + + @property + def body_link_ang_vel_w(self) -> torch.Tensor: + """Body link angular velocities in world frame. Shape: (N, num_bodies, 3).""" + return self.body_link_vel_w[..., 3:6] + + # -- Body state properties (CoM frame) -- + + @property + def body_com_pose_w(self) -> torch.Tensor: + """Body CoM poses in world frame. Shape: (N, num_bodies, 7).""" + if self._body_com_pose_w is None: + return self.body_link_pose_w.clone() + return self._body_com_pose_w + + @property + def body_com_vel_w(self) -> torch.Tensor: + """Body CoM velocities in world frame. Shape: (N, num_bodies, 6).""" + if self._body_com_vel_w is None: + return self.body_link_vel_w.clone() + return self._body_com_vel_w + + @property + def body_com_state_w(self) -> torch.Tensor: + """Body CoM states in world frame. Shape: (N, num_bodies, 13).""" + return torch.cat([self.body_com_pose_w, self.body_com_vel_w], dim=-1) + + @property + def body_com_acc_w(self) -> torch.Tensor: + """Body CoM accelerations in world frame. Shape: (N, num_bodies, 6).""" + if self._body_com_acc_w is None: + return torch.zeros(self._num_instances, self._num_bodies, 6, device=self._device) + return self._body_com_acc_w + + @property + def body_com_pose_b(self) -> torch.Tensor: + """Body CoM poses in body frame. Shape: (N, 1, 7).""" + if self._body_com_pose_b is None: + pose = torch.zeros(self._num_instances, 1, 7, device=self._device) + pose[..., 3:7] = self._identity_quat(self._num_instances, 1) + return pose + return self._body_com_pose_b + + # Sliced properties + @property + def body_com_pos_w(self) -> torch.Tensor: + """Body CoM positions in world frame. Shape: (N, num_bodies, 3).""" + return self.body_com_pose_w[..., :3] + + @property + def body_com_quat_w(self) -> torch.Tensor: + """Body CoM orientations in world frame. Shape: (N, num_bodies, 4).""" + return self.body_com_pose_w[..., 3:7] + + @property + def body_com_lin_vel_w(self) -> torch.Tensor: + """Body CoM linear velocities in world frame. Shape: (N, num_bodies, 3).""" + return self.body_com_vel_w[..., :3] + + @property + def body_com_ang_vel_w(self) -> torch.Tensor: + """Body CoM angular velocities in world frame. Shape: (N, num_bodies, 3).""" + return self.body_com_vel_w[..., 3:6] + + @property + def body_com_lin_acc_w(self) -> torch.Tensor: + """Body CoM linear accelerations in world frame. Shape: (N, num_bodies, 3).""" + return self.body_com_acc_w[..., :3] + + @property + def body_com_ang_acc_w(self) -> torch.Tensor: + """Body CoM angular accelerations in world frame. Shape: (N, num_bodies, 3).""" + return self.body_com_acc_w[..., 3:6] + + @property + def body_com_pos_b(self) -> torch.Tensor: + """Body CoM positions in body frame. Shape: (N, num_bodies, 3).""" + return self.body_com_pose_b[..., :3] + + @property + def body_com_quat_b(self) -> torch.Tensor: + """Body CoM orientations in body frame. Shape: (N, num_bodies, 4).""" + return self.body_com_pose_b[..., 3:7] + + # -- Body properties -- + + @property + def body_mass(self) -> torch.Tensor: + """Body masses. Shape: (N, num_bodies).""" + if self._body_mass is None: + return torch.ones(self._num_instances, self._num_bodies, device=self._device) + return self._body_mass + + @property + def body_inertia(self) -> torch.Tensor: + """Body inertias. Shape: (N, num_bodies, 3, 3).""" + if self._body_inertia is None: + inertia = torch.zeros(self._num_instances, self._num_bodies, 3, 3, device=self._device) + inertia[..., 0, 0] = 1.0 + inertia[..., 1, 1] = 1.0 + inertia[..., 2, 2] = 1.0 + return inertia + return self._body_inertia + + @property + def body_incoming_joint_wrench_b(self) -> torch.Tensor: + """Body incoming joint wrenches. Shape: (N, num_bodies, 6).""" + if self._body_incoming_joint_wrench_b is None: + return torch.zeros(self._num_instances, self._num_bodies, 6, device=self._device) + return self._body_incoming_joint_wrench_b + + # -- Derived properties -- + + @property + def projected_gravity_b(self) -> torch.Tensor: + """Gravity projection on base. Shape: (N, 3).""" + # Default gravity pointing down + gravity = torch.zeros(self._num_instances, 3, device=self._device) + gravity[:, 2] = -1.0 + return gravity + + @property + def heading_w(self) -> torch.Tensor: + """Yaw heading in world frame. Shape: (N,).""" + return torch.zeros(self._num_instances, device=self._device) + + @property + def root_link_lin_vel_b(self) -> torch.Tensor: + """Root link linear velocity in body frame. Shape: (N, 3).""" + return self.root_link_lin_vel_w.clone() + + @property + def root_link_ang_vel_b(self) -> torch.Tensor: + """Root link angular velocity in body frame. Shape: (N, 3).""" + return self.root_link_ang_vel_w.clone() + + @property + def root_com_lin_vel_b(self) -> torch.Tensor: + """Root CoM linear velocity in body frame. Shape: (N, 3).""" + return self.root_com_lin_vel_w.clone() + + @property + def root_com_ang_vel_b(self) -> torch.Tensor: + """Root CoM angular velocity in body frame. Shape: (N, 3).""" + return self.root_com_ang_vel_w.clone() + + # -- Convenience aliases for root state (without _link_ or _com_ prefix) -- + + @property + def root_pos_w(self) -> torch.Tensor: + """Root position (alias for root_link_pos_w). Shape: (N, 3).""" + return self.root_link_pos_w + + @property + def root_quat_w(self) -> torch.Tensor: + """Root orientation (alias for root_link_quat_w). Shape: (N, 4).""" + return self.root_link_quat_w + + @property + def root_pose_w(self) -> torch.Tensor: + """Root pose (alias for root_link_pose_w). Shape: (N, 7).""" + return self.root_link_pose_w + + @property + def root_vel_w(self) -> torch.Tensor: + """Root velocity (alias for root_com_vel_w). Shape: (N, 6).""" + return self.root_com_vel_w + + @property + def root_lin_vel_w(self) -> torch.Tensor: + """Root linear velocity (alias for root_com_lin_vel_w). Shape: (N, 3).""" + return self.root_com_lin_vel_w + + @property + def root_ang_vel_w(self) -> torch.Tensor: + """Root angular velocity (alias for root_com_ang_vel_w). Shape: (N, 3).""" + return self.root_com_ang_vel_w + + @property + def root_lin_vel_b(self) -> torch.Tensor: + """Root linear velocity in body frame (alias for root_com_lin_vel_b). Shape: (N, 3).""" + return self.root_com_lin_vel_b + + @property + def root_ang_vel_b(self) -> torch.Tensor: + """Root angular velocity in body frame (alias for root_com_ang_vel_b). Shape: (N, 3).""" + return self.root_com_ang_vel_b + + # -- Convenience aliases for body state (without _link_ or _com_ prefix) -- + + @property + def body_pos_w(self) -> torch.Tensor: + """Body positions (alias for body_link_pos_w). Shape: (N, num_bodies, 3).""" + return self.body_link_pos_w + + @property + def body_quat_w(self) -> torch.Tensor: + """Body orientations (alias for body_link_quat_w). Shape: (N, num_bodies, 4).""" + return self.body_link_quat_w + + @property + def body_pose_w(self) -> torch.Tensor: + """Body poses (alias for body_link_pose_w). Shape: (N, num_bodies, 7).""" + return self.body_link_pose_w + + @property + def body_vel_w(self) -> torch.Tensor: + """Body velocities (alias for body_com_vel_w). Shape: (N, num_bodies, 6).""" + return self.body_com_vel_w + + @property + def body_state_w(self) -> torch.Tensor: + """Body states (alias for body_com_state_w). Shape: (N, num_bodies, 13).""" + return self.body_com_state_w + + @property + def body_lin_vel_w(self) -> torch.Tensor: + """Body linear velocities (alias for body_com_lin_vel_w). Shape: (N, num_bodies, 3).""" + return self.body_com_lin_vel_w + + @property + def body_ang_vel_w(self) -> torch.Tensor: + """Body angular velocities (alias for body_com_ang_vel_w). Shape: (N, num_bodies, 3).""" + return self.body_com_ang_vel_w + + @property + def body_acc_w(self) -> torch.Tensor: + """Body accelerations (alias for body_com_acc_w). Shape: (N, num_bodies, 6).""" + return self.body_com_acc_w + + @property + def body_lin_acc_w(self) -> torch.Tensor: + """Body linear accelerations (alias for body_com_lin_acc_w). Shape: (N, num_bodies, 3).""" + return self.body_com_lin_acc_w + + @property + def body_ang_acc_w(self) -> torch.Tensor: + """Body angular accelerations (alias for body_com_ang_acc_w). Shape: (N, num_bodies, 3).""" + return self.body_com_ang_acc_w + + # -- CoM in body frame (root body only) -- + + @property + def com_pos_b(self) -> torch.Tensor: + """Root CoM position in body frame. Shape: (N, 3).""" + return self.body_com_pose_b[:, 0, :3] + + @property + def com_quat_b(self) -> torch.Tensor: + """Root CoM orientation in body frame. Shape: (N, 4).""" + return self.body_com_pose_b[:, 0, 3:7] + + # -- Joint property aliases -- + + @property + def joint_limits(self) -> torch.Tensor: + """Joint position limits (alias for joint_pos_limits). Shape: (N, num_joints, 2).""" + return self.joint_pos_limits + + @property + def joint_velocity_limits(self) -> torch.Tensor: + """Joint velocity limits (alias for joint_vel_limits). Shape: (N, num_joints).""" + return self.joint_vel_limits + + @property + def joint_friction(self) -> torch.Tensor: + """Joint friction (alias for joint_friction_coeff). Shape: (N, num_joints).""" + return self.joint_friction_coeff + + # -- Fixed tendon alias -- + + @property + def fixed_tendon_limit(self) -> torch.Tensor: + """Fixed tendon limit (alias for fixed_tendon_pos_limits). Shape: (N, num_fixed_tendons, 2).""" + return self.fixed_tendon_pos_limits + + # -- Fixed tendon properties -- + + @property + def fixed_tendon_stiffness(self) -> torch.Tensor: + """Fixed tendon stiffness. Shape: (N, num_fixed_tendons).""" + if self._num_fixed_tendons == 0: + return torch.zeros(self._num_instances, 0, device=self._device) + if self._fixed_tendon_stiffness is None: + return torch.zeros(self._num_instances, self._num_fixed_tendons, device=self._device) + return self._fixed_tendon_stiffness + + @property + def fixed_tendon_damping(self) -> torch.Tensor: + """Fixed tendon damping. Shape: (N, num_fixed_tendons).""" + if self._num_fixed_tendons == 0: + return torch.zeros(self._num_instances, 0, device=self._device) + if self._fixed_tendon_damping is None: + return torch.zeros(self._num_instances, self._num_fixed_tendons, device=self._device) + return self._fixed_tendon_damping + + @property + def fixed_tendon_limit_stiffness(self) -> torch.Tensor: + """Fixed tendon limit stiffness. Shape: (N, num_fixed_tendons).""" + if self._num_fixed_tendons == 0: + return torch.zeros(self._num_instances, 0, device=self._device) + if self._fixed_tendon_limit_stiffness is None: + return torch.zeros(self._num_instances, self._num_fixed_tendons, device=self._device) + return self._fixed_tendon_limit_stiffness + + @property + def fixed_tendon_rest_length(self) -> torch.Tensor: + """Fixed tendon rest length. Shape: (N, num_fixed_tendons).""" + if self._num_fixed_tendons == 0: + return torch.zeros(self._num_instances, 0, device=self._device) + if self._fixed_tendon_rest_length is None: + return torch.zeros(self._num_instances, self._num_fixed_tendons, device=self._device) + return self._fixed_tendon_rest_length + + @property + def fixed_tendon_offset(self) -> torch.Tensor: + """Fixed tendon offset. Shape: (N, num_fixed_tendons).""" + if self._num_fixed_tendons == 0: + return torch.zeros(self._num_instances, 0, device=self._device) + if self._fixed_tendon_offset is None: + return torch.zeros(self._num_instances, self._num_fixed_tendons, device=self._device) + return self._fixed_tendon_offset + + @property + def fixed_tendon_pos_limits(self) -> torch.Tensor: + """Fixed tendon position limits. Shape: (N, num_fixed_tendons, 2).""" + if self._num_fixed_tendons == 0: + return torch.zeros(self._num_instances, 0, 2, device=self._device) + if self._fixed_tendon_pos_limits is None: + limits = torch.zeros(self._num_instances, self._num_fixed_tendons, 2, device=self._device) + limits[..., 0] = -float("inf") + limits[..., 1] = float("inf") + return limits + return self._fixed_tendon_pos_limits + + # -- Spatial tendon properties -- + + @property + def spatial_tendon_stiffness(self) -> torch.Tensor: + """Spatial tendon stiffness. Shape: (N, num_spatial_tendons).""" + if self._num_spatial_tendons == 0: + return torch.zeros(self._num_instances, 0, device=self._device) + if self._spatial_tendon_stiffness is None: + return torch.zeros(self._num_instances, self._num_spatial_tendons, device=self._device) + return self._spatial_tendon_stiffness + + @property + def spatial_tendon_damping(self) -> torch.Tensor: + """Spatial tendon damping. Shape: (N, num_spatial_tendons).""" + if self._num_spatial_tendons == 0: + return torch.zeros(self._num_instances, 0, device=self._device) + if self._spatial_tendon_damping is None: + return torch.zeros(self._num_instances, self._num_spatial_tendons, device=self._device) + return self._spatial_tendon_damping + + @property + def spatial_tendon_limit_stiffness(self) -> torch.Tensor: + """Spatial tendon limit stiffness. Shape: (N, num_spatial_tendons).""" + if self._num_spatial_tendons == 0: + return torch.zeros(self._num_instances, 0, device=self._device) + if self._spatial_tendon_limit_stiffness is None: + return torch.zeros(self._num_instances, self._num_spatial_tendons, device=self._device) + return self._spatial_tendon_limit_stiffness + + @property + def spatial_tendon_offset(self) -> torch.Tensor: + """Spatial tendon offset. Shape: (N, num_spatial_tendons).""" + if self._num_spatial_tendons == 0: + return torch.zeros(self._num_instances, 0, device=self._device) + if self._spatial_tendon_offset is None: + return torch.zeros(self._num_instances, self._num_spatial_tendons, device=self._device) + return self._spatial_tendon_offset + + # -- Update method -- + + def update(self, dt: float) -> None: + """Update data (no-op for mock).""" + pass + + # -- Setters -- + + def set_default_root_pose(self, value: torch.Tensor) -> None: + self._default_root_pose = value.to(self._device) + + def set_default_root_vel(self, value: torch.Tensor) -> None: + self._default_root_vel = value.to(self._device) + + def set_default_joint_pos(self, value: torch.Tensor) -> None: + self._default_joint_pos = value.to(self._device) + + def set_default_joint_vel(self, value: torch.Tensor) -> None: + self._default_joint_vel = value.to(self._device) + + def set_joint_pos_target(self, value: torch.Tensor) -> None: + self._joint_pos_target = value.to(self._device) + + def set_joint_vel_target(self, value: torch.Tensor) -> None: + self._joint_vel_target = value.to(self._device) + + def set_joint_effort_target(self, value: torch.Tensor) -> None: + self._joint_effort_target = value.to(self._device) + + def set_computed_torque(self, value: torch.Tensor) -> None: + self._computed_torque = value.to(self._device) + + def set_applied_torque(self, value: torch.Tensor) -> None: + self._applied_torque = value.to(self._device) + + def set_joint_stiffness(self, value: torch.Tensor) -> None: + self._joint_stiffness = value.to(self._device) + + def set_joint_damping(self, value: torch.Tensor) -> None: + self._joint_damping = value.to(self._device) + + def set_joint_armature(self, value: torch.Tensor) -> None: + self._joint_armature = value.to(self._device) + + def set_joint_friction_coeff(self, value: torch.Tensor) -> None: + self._joint_friction_coeff = value.to(self._device) + + def set_joint_pos_limits(self, value: torch.Tensor) -> None: + self._joint_pos_limits = value.to(self._device) + + def set_joint_vel_limits(self, value: torch.Tensor) -> None: + self._joint_vel_limits = value.to(self._device) + + def set_joint_effort_limits(self, value: torch.Tensor) -> None: + self._joint_effort_limits = value.to(self._device) + + def set_soft_joint_pos_limits(self, value: torch.Tensor) -> None: + self._soft_joint_pos_limits = value.to(self._device) + + def set_soft_joint_vel_limits(self, value: torch.Tensor) -> None: + self._soft_joint_vel_limits = value.to(self._device) + + def set_gear_ratio(self, value: torch.Tensor) -> None: + self._gear_ratio = value.to(self._device) + + def set_joint_pos(self, value: torch.Tensor) -> None: + self._joint_pos = value.to(self._device) + + def set_joint_vel(self, value: torch.Tensor) -> None: + self._joint_vel = value.to(self._device) + + def set_joint_acc(self, value: torch.Tensor) -> None: + self._joint_acc = value.to(self._device) + + def set_root_link_pose_w(self, value: torch.Tensor) -> None: + self._root_link_pose_w = value.to(self._device) + + def set_root_link_vel_w(self, value: torch.Tensor) -> None: + self._root_link_vel_w = value.to(self._device) + + def set_root_com_pose_w(self, value: torch.Tensor) -> None: + self._root_com_pose_w = value.to(self._device) + + def set_root_com_vel_w(self, value: torch.Tensor) -> None: + self._root_com_vel_w = value.to(self._device) + + def set_body_link_pose_w(self, value: torch.Tensor) -> None: + self._body_link_pose_w = value.to(self._device) + + def set_body_link_vel_w(self, value: torch.Tensor) -> None: + self._body_link_vel_w = value.to(self._device) + + def set_body_com_pose_w(self, value: torch.Tensor) -> None: + self._body_com_pose_w = value.to(self._device) + + def set_body_com_vel_w(self, value: torch.Tensor) -> None: + self._body_com_vel_w = value.to(self._device) + + def set_body_com_acc_w(self, value: torch.Tensor) -> None: + self._body_com_acc_w = value.to(self._device) + + def set_body_com_pose_b(self, value: torch.Tensor) -> None: + self._body_com_pose_b = value.to(self._device) + + def set_body_mass(self, value: torch.Tensor) -> None: + self._body_mass = value.to(self._device) + + def set_body_inertia(self, value: torch.Tensor) -> None: + self._body_inertia = value.to(self._device) + + def set_body_incoming_joint_wrench_b(self, value: torch.Tensor) -> None: + self._body_incoming_joint_wrench_b = value.to(self._device) + + def set_fixed_tendon_stiffness(self, value: torch.Tensor) -> None: + self._fixed_tendon_stiffness = value.to(self._device) + + def set_fixed_tendon_damping(self, value: torch.Tensor) -> None: + self._fixed_tendon_damping = value.to(self._device) + + def set_fixed_tendon_limit_stiffness(self, value: torch.Tensor) -> None: + self._fixed_tendon_limit_stiffness = value.to(self._device) + + def set_fixed_tendon_rest_length(self, value: torch.Tensor) -> None: + self._fixed_tendon_rest_length = value.to(self._device) + + def set_fixed_tendon_offset(self, value: torch.Tensor) -> None: + self._fixed_tendon_offset = value.to(self._device) + + def set_fixed_tendon_pos_limits(self, value: torch.Tensor) -> None: + self._fixed_tendon_pos_limits = value.to(self._device) + + def set_spatial_tendon_stiffness(self, value: torch.Tensor) -> None: + self._spatial_tendon_stiffness = value.to(self._device) + + def set_spatial_tendon_damping(self, value: torch.Tensor) -> None: + self._spatial_tendon_damping = value.to(self._device) + + def set_spatial_tendon_limit_stiffness(self, value: torch.Tensor) -> None: + self._spatial_tendon_limit_stiffness = value.to(self._device) + + def set_spatial_tendon_offset(self, value: torch.Tensor) -> None: + self._spatial_tendon_offset = value.to(self._device) + + def set_mock_data(self, **kwargs) -> None: + """Bulk setter for mock data. + + Accepts any property name as a keyword argument with a tensor value. + """ + for key, value in kwargs.items(): + setter = getattr(self, f"set_{key}", None) + if setter is not None: + setter(value) + else: + raise ValueError(f"Unknown property: {key}") + + +class MockArticulation: + """Mock articulation asset for testing without Isaac Sim. + + This class mimics the interface of BaseArticulation for testing purposes. + It provides the same properties and methods but without simulation dependencies. + """ + + def __init__( + self, + num_instances: int, + num_joints: int, + num_bodies: int, + joint_names: list[str] | None = None, + body_names: list[str] | None = None, + is_fixed_base: bool = False, + num_fixed_tendons: int = 0, + num_spatial_tendons: int = 0, + fixed_tendon_names: list[str] | None = None, + spatial_tendon_names: list[str] | None = None, + device: str = "cpu", + ): + """Initialize mock articulation. + + Args: + num_instances: Number of articulation instances. + num_joints: Number of joints. + num_bodies: Number of bodies. + joint_names: Names of joints. + body_names: Names of bodies. + is_fixed_base: Whether the articulation has a fixed base. + num_fixed_tendons: Number of fixed tendons. + num_spatial_tendons: Number of spatial tendons. + fixed_tendon_names: Names of fixed tendons. + spatial_tendon_names: Names of spatial tendons. + device: Device for tensor allocation. + """ + self._num_instances = num_instances + self._num_joints = num_joints + self._num_bodies = num_bodies + self._is_fixed_base = is_fixed_base + self._num_fixed_tendons = num_fixed_tendons + self._num_spatial_tendons = num_spatial_tendons + self._device = device + + self._joint_names = joint_names or [f"joint_{i}" for i in range(num_joints)] + self._body_names = body_names or [f"body_{i}" for i in range(num_bodies)] + self._fixed_tendon_names = fixed_tendon_names or [f"fixed_tendon_{i}" for i in range(num_fixed_tendons)] + self._spatial_tendon_names = spatial_tendon_names or [f"spatial_tendon_{i}" for i in range(num_spatial_tendons)] + + self._data = MockArticulationData( + num_instances=num_instances, + num_joints=num_joints, + num_bodies=num_bodies, + joint_names=self._joint_names, + body_names=self._body_names, + num_fixed_tendons=num_fixed_tendons, + num_spatial_tendons=num_spatial_tendons, + fixed_tendon_names=self._fixed_tendon_names, + spatial_tendon_names=self._spatial_tendon_names, + device=device, + ) + + # Actuators (empty dict for mock) + self.actuators: dict = {} + + # -- Properties -- + + @property + def data(self) -> MockArticulationData: + """Data container for the articulation.""" + return self._data + + @property + def num_instances(self) -> int: + """Number of articulation instances.""" + return self._num_instances + + @property + def device(self) -> str: + """Device for tensor allocation.""" + return self._device + + @property + def is_fixed_base(self) -> bool: + """Whether the articulation has a fixed base.""" + return self._is_fixed_base + + @property + def num_joints(self) -> int: + """Number of joints.""" + return self._num_joints + + @property + def num_bodies(self) -> int: + """Number of bodies.""" + return self._num_bodies + + @property + def num_fixed_tendons(self) -> int: + """Number of fixed tendons.""" + return self._num_fixed_tendons + + @property + def num_spatial_tendons(self) -> int: + """Number of spatial tendons.""" + return self._num_spatial_tendons + + @property + def joint_names(self) -> list[str]: + """Ordered joint names.""" + return self._joint_names + + @property + def body_names(self) -> list[str]: + """Ordered body names.""" + return self._body_names + + @property + def fixed_tendon_names(self) -> list[str]: + """Ordered fixed tendon names.""" + return self._fixed_tendon_names + + @property + def spatial_tendon_names(self) -> list[str]: + """Ordered spatial tendon names.""" + return self._spatial_tendon_names + + @property + def root_view(self) -> None: + """Returns None (no physics view in mock).""" + return None + + @property + def instantaneous_wrench_composer(self) -> None: + """Returns None (no wrench composer in mock).""" + return None + + @property + def permanent_wrench_composer(self) -> None: + """Returns None (no wrench composer in mock).""" + return None + + # -- Core methods -- + + def reset(self, env_ids: Sequence[int] | None = None) -> None: + """Reset articulation state for specified environments.""" + pass + + def write_data_to_sim(self) -> None: + """Write data to simulation (no-op for mock).""" + pass + + def update(self, dt: float) -> None: + """Update articulation data.""" + self._data.update(dt) + + # -- Finder methods -- + + def find_bodies( + self, name_keys: str | Sequence[str], preserve_order: bool = False + ) -> tuple[list[int], list[str]]: + """Find bodies by name regex patterns.""" + return self._find_by_regex(self._body_names, name_keys, preserve_order) + + def find_joints( + self, + name_keys: str | Sequence[str], + joint_subset: list[int] | None = None, + preserve_order: bool = False, + ) -> tuple[list[int], list[str]]: + """Find joints by name regex patterns.""" + names = self._joint_names + if joint_subset is not None: + names = [names[i] for i in joint_subset] + indices, matched_names = self._find_by_regex(names, name_keys, preserve_order) + if joint_subset is not None: + indices = [joint_subset[i] for i in indices] + return indices, matched_names + + def find_fixed_tendons( + self, + name_keys: str | Sequence[str], + tendon_subsets: list[int] | None = None, + preserve_order: bool = False, + ) -> tuple[list[int], list[str]]: + """Find fixed tendons by name regex patterns.""" + names = self._fixed_tendon_names + if tendon_subsets is not None: + names = [names[i] for i in tendon_subsets] + indices, matched_names = self._find_by_regex(names, name_keys, preserve_order) + if tendon_subsets is not None: + indices = [tendon_subsets[i] for i in indices] + return indices, matched_names + + def find_spatial_tendons( + self, + name_keys: str | Sequence[str], + tendon_subsets: list[int] | None = None, + preserve_order: bool = False, + ) -> tuple[list[int], list[str]]: + """Find spatial tendons by name regex patterns.""" + names = self._spatial_tendon_names + if tendon_subsets is not None: + names = [names[i] for i in tendon_subsets] + indices, matched_names = self._find_by_regex(names, name_keys, preserve_order) + if tendon_subsets is not None: + indices = [tendon_subsets[i] for i in indices] + return indices, matched_names + + def _find_by_regex( + self, names: list[str], name_keys: str | Sequence[str], preserve_order: bool + ) -> tuple[list[int], list[str]]: + """Find items by regex patterns.""" + if isinstance(name_keys, str): + name_keys = [name_keys] + + matched_indices = [] + matched_names = [] + + if preserve_order: + for key in name_keys: + pattern = re.compile(key) + for i, name in enumerate(names): + if pattern.fullmatch(name) and i not in matched_indices: + matched_indices.append(i) + matched_names.append(name) + else: + for i, name in enumerate(names): + for key in name_keys: + pattern = re.compile(key) + if pattern.fullmatch(name): + matched_indices.append(i) + matched_names.append(name) + break + + return matched_indices, matched_names + + # -- State writer methods (no-op for mock) -- + + def write_root_state_to_sim( + self, + root_state: torch.Tensor, + env_ids: Sequence[int] | None = None, + ) -> None: + """Write root state to simulation.""" + pass + + def write_root_com_state_to_sim( + self, + root_state: torch.Tensor, + env_ids: Sequence[int] | None = None, + ) -> None: + """Write root CoM state to simulation.""" + pass + + def write_root_link_state_to_sim( + self, + root_state: torch.Tensor, + env_ids: Sequence[int] | None = None, + ) -> None: + """Write root link state to simulation.""" + pass + + def write_root_pose_to_sim( + self, + root_pose: torch.Tensor, + env_ids: Sequence[int] | None = None, + ) -> None: + """Write root pose to simulation.""" + pass + + def write_root_link_pose_to_sim( + self, + root_pose: torch.Tensor, + env_ids: Sequence[int] | None = None, + ) -> None: + """Write root link pose to simulation.""" + pass + + def write_root_com_pose_to_sim( + self, + root_pose: torch.Tensor, + env_ids: Sequence[int] | None = None, + ) -> None: + """Write root CoM pose to simulation.""" + pass + + def write_root_velocity_to_sim( + self, + root_velocity: torch.Tensor, + env_ids: Sequence[int] | None = None, + ) -> None: + """Write root velocity to simulation.""" + pass + + def write_root_com_velocity_to_sim( + self, + root_velocity: torch.Tensor, + env_ids: Sequence[int] | None = None, + ) -> None: + """Write root CoM velocity to simulation.""" + pass + + def write_root_link_velocity_to_sim( + self, + root_velocity: torch.Tensor, + env_ids: Sequence[int] | None = None, + ) -> None: + """Write root link velocity to simulation.""" + pass + + def write_joint_state_to_sim( + self, + position: torch.Tensor, + velocity: torch.Tensor, + joint_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | None = None, + ) -> None: + """Write joint state to simulation.""" + pass + + def write_joint_position_to_sim( + self, + position: torch.Tensor, + joint_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | None = None, + ) -> None: + """Write joint positions to simulation.""" + pass + + def write_joint_velocity_to_sim( + self, + velocity: torch.Tensor, + joint_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | None = None, + ) -> None: + """Write joint velocities to simulation.""" + pass + + def write_joint_stiffness_to_sim( + self, + stiffness: torch.Tensor | float, + joint_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | None = None, + ) -> None: + """Write joint stiffness to simulation.""" + pass + + def write_joint_damping_to_sim( + self, + damping: torch.Tensor | float, + joint_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | None = None, + ) -> None: + """Write joint damping to simulation.""" + pass + + def write_joint_position_limit_to_sim( + self, + limits: torch.Tensor | float, + joint_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | None = None, + warn_limit_violation: bool = True, + ) -> None: + """Write joint position limits to simulation.""" + pass + + def write_joint_velocity_limit_to_sim( + self, + limits: torch.Tensor | float, + joint_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | None = None, + ) -> None: + """Write joint velocity limits to simulation.""" + pass + + def write_joint_effort_limit_to_sim( + self, + limits: torch.Tensor | float, + joint_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | None = None, + ) -> None: + """Write joint effort limits to simulation.""" + pass + + def write_joint_armature_to_sim( + self, + armature: torch.Tensor | float, + joint_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | None = None, + ) -> None: + """Write joint armature to simulation.""" + pass + + def write_joint_friction_coefficient_to_sim( + self, + coeff: torch.Tensor | float, + joint_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | None = None, + ) -> None: + """Write joint friction coefficient to simulation.""" + pass + + def write_joint_friction_to_sim( + self, + joint_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | None = None, + ) -> None: + """Write joint friction to simulation.""" + pass + + def write_joint_limits_to_sim( + self, + joint_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | None = None, + ) -> None: + """Write joint limits to simulation.""" + pass + + # -- Setter methods -- + + def set_masses( + self, + masses: torch.Tensor, + body_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | None = None, + ) -> None: + """Set body masses.""" + pass + + def set_coms( + self, + coms: torch.Tensor, + body_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | None = None, + ) -> None: + """Set body centers of mass.""" + pass + + def set_inertias( + self, + inertias: torch.Tensor, + body_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | None = None, + ) -> None: + """Set body inertias.""" + pass + + def set_external_force_and_torque( + self, + forces: torch.Tensor, + torques: torch.Tensor, + positions: torch.Tensor | None = None, + body_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | None = None, + is_global: bool = True, + ) -> None: + """Set external forces and torques.""" + pass + + def set_joint_position_target( + self, + target: torch.Tensor, + joint_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | None = None, + ) -> None: + """Set joint position targets.""" + if env_ids is None: + env_ids = slice(None) + if joint_ids is None: + joint_ids = slice(None) + if self._data._joint_pos_target is None: + self._data._joint_pos_target = torch.zeros( + self._num_instances, self._num_joints, device=self._device + ) + self._data._joint_pos_target[env_ids, joint_ids] = target.to(self._device) + + def set_joint_velocity_target( + self, + target: torch.Tensor, + joint_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | None = None, + ) -> None: + """Set joint velocity targets.""" + if env_ids is None: + env_ids = slice(None) + if joint_ids is None: + joint_ids = slice(None) + if self._data._joint_vel_target is None: + self._data._joint_vel_target = torch.zeros( + self._num_instances, self._num_joints, device=self._device + ) + self._data._joint_vel_target[env_ids, joint_ids] = target.to(self._device) + + def set_joint_effort_target( + self, + target: torch.Tensor, + joint_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | None = None, + ) -> None: + """Set joint effort targets.""" + if env_ids is None: + env_ids = slice(None) + if joint_ids is None: + joint_ids = slice(None) + if self._data._joint_effort_target is None: + self._data._joint_effort_target = torch.zeros( + self._num_instances, self._num_joints, device=self._device + ) + self._data._joint_effort_target[env_ids, joint_ids] = target.to(self._device) + + # -- Tendon methods (fixed) -- + + def set_fixed_tendon_stiffness( + self, + stiffness: torch.Tensor, + tendon_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | None = None, + ) -> None: + """Set fixed tendon stiffness.""" + pass + + def set_fixed_tendon_damping( + self, + damping: torch.Tensor, + tendon_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | None = None, + ) -> None: + """Set fixed tendon damping.""" + pass + + def set_fixed_tendon_limit_stiffness( + self, + limit_stiffness: torch.Tensor, + tendon_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | None = None, + ) -> None: + """Set fixed tendon limit stiffness.""" + pass + + def set_fixed_tendon_position_limit( + self, + limit: torch.Tensor, + tendon_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | None = None, + ) -> None: + """Set fixed tendon position limit.""" + pass + + def set_fixed_tendon_limit( + self, + limit: torch.Tensor, + tendon_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | None = None, + ) -> None: + """Set fixed tendon limit (alias for set_fixed_tendon_position_limit).""" + pass + + def set_fixed_tendon_rest_length( + self, + rest_length: torch.Tensor, + tendon_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | None = None, + ) -> None: + """Set fixed tendon rest length.""" + pass + + def set_fixed_tendon_offset( + self, + offset: torch.Tensor, + tendon_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | None = None, + ) -> None: + """Set fixed tendon offset.""" + pass + + def write_fixed_tendon_properties_to_sim( + self, + tendon_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | None = None, + ) -> None: + """Write fixed tendon properties to simulation.""" + pass + + # -- Tendon methods (spatial) -- + + def set_spatial_tendon_stiffness( + self, + stiffness: torch.Tensor, + tendon_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | None = None, + ) -> None: + """Set spatial tendon stiffness.""" + pass + + def set_spatial_tendon_damping( + self, + damping: torch.Tensor, + tendon_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | None = None, + ) -> None: + """Set spatial tendon damping.""" + pass + + def set_spatial_tendon_limit_stiffness( + self, + limit_stiffness: torch.Tensor, + tendon_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | None = None, + ) -> None: + """Set spatial tendon limit stiffness.""" + pass + + def set_spatial_tendon_offset( + self, + offset: torch.Tensor, + tendon_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | None = None, + ) -> None: + """Set spatial tendon offset.""" + pass + + def write_spatial_tendon_properties_to_sim( + self, + tendon_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | None = None, + ) -> None: + """Write spatial tendon properties to simulation.""" + pass diff --git a/source/isaaclab/isaaclab/test/mock_interfaces/assets/mock_rigid_object.py b/source/isaaclab/isaaclab/test/mock_interfaces/assets/mock_rigid_object.py new file mode 100644 index 00000000000..7c901996a33 --- /dev/null +++ b/source/isaaclab/isaaclab/test/mock_interfaces/assets/mock_rigid_object.py @@ -0,0 +1,760 @@ +# Copyright (c) 2022-2025, The Isaac Lab Project Developers. +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Mock rigid object asset for testing without Isaac Sim.""" + +from __future__ import annotations + +import re +import torch +from typing import Sequence + + +class MockRigidObjectData: + """Mock data container for rigid object asset. + + This class mimics the interface of BaseRigidObjectData for testing purposes. + All tensor properties return zero tensors with correct shapes if not explicitly set. + """ + + def __init__( + self, + num_instances: int, + body_names: list[str] | None = None, + device: str = "cpu", + ): + """Initialize mock rigid object data. + + Args: + num_instances: Number of rigid object instances. + body_names: Names of bodies (single body for rigid object). + device: Device for tensor allocation. + """ + self._num_instances = num_instances + self._num_bodies = 1 # Rigid object always has 1 body + self._device = device + + self.body_names = body_names or ["body"] + + # Default states + self._default_root_pose: torch.Tensor | None = None + self._default_root_vel: torch.Tensor | None = None + + # Root state (link frame) + self._root_link_pose_w: torch.Tensor | None = None + self._root_link_vel_w: torch.Tensor | None = None + + # Root state (CoM frame) + self._root_com_pose_w: torch.Tensor | None = None + self._root_com_vel_w: torch.Tensor | None = None + + # Body state (link frame) + self._body_link_pose_w: torch.Tensor | None = None + self._body_link_vel_w: torch.Tensor | None = None + + # Body state (CoM frame) + self._body_com_pose_w: torch.Tensor | None = None + self._body_com_vel_w: torch.Tensor | None = None + self._body_com_acc_w: torch.Tensor | None = None + self._body_com_pose_b: torch.Tensor | None = None + + # Body properties + self._body_mass: torch.Tensor | None = None + self._body_inertia: torch.Tensor | None = None + + @property + def device(self) -> str: + """Device for tensor allocation.""" + return self._device + + def _identity_quat(self, *shape: int) -> torch.Tensor: + """Create identity quaternion tensor (w, x, y, z) = (1, 0, 0, 0).""" + quat = torch.zeros(*shape, 4, device=self._device) + quat[..., 0] = 1.0 + return quat + + # -- Default state properties -- + + @property + def default_root_pose(self) -> torch.Tensor: + """Default root pose. Shape: (N, 7).""" + if self._default_root_pose is None: + pose = torch.zeros(self._num_instances, 7, device=self._device) + pose[:, 3:7] = self._identity_quat(self._num_instances) + return pose + return self._default_root_pose + + @property + def default_root_vel(self) -> torch.Tensor: + """Default root velocity. Shape: (N, 6).""" + if self._default_root_vel is None: + return torch.zeros(self._num_instances, 6, device=self._device) + return self._default_root_vel + + @property + def default_root_state(self) -> torch.Tensor: + """Default root state. Shape: (N, 13).""" + return torch.cat([self.default_root_pose, self.default_root_vel], dim=-1) + + # -- Root state properties (link frame) -- + + @property + def root_link_pose_w(self) -> torch.Tensor: + """Root link pose in world frame. Shape: (N, 7).""" + if self._root_link_pose_w is None: + pose = torch.zeros(self._num_instances, 7, device=self._device) + pose[:, 3:7] = self._identity_quat(self._num_instances) + return pose + return self._root_link_pose_w + + @property + def root_link_vel_w(self) -> torch.Tensor: + """Root link velocity in world frame. Shape: (N, 6).""" + if self._root_link_vel_w is None: + return torch.zeros(self._num_instances, 6, device=self._device) + return self._root_link_vel_w + + @property + def root_link_state_w(self) -> torch.Tensor: + """Root link state in world frame. Shape: (N, 13).""" + return torch.cat([self.root_link_pose_w, self.root_link_vel_w], dim=-1) + + @property + def root_link_pos_w(self) -> torch.Tensor: + """Root link position. Shape: (N, 3).""" + return self.root_link_pose_w[:, :3] + + @property + def root_link_quat_w(self) -> torch.Tensor: + """Root link orientation. Shape: (N, 4).""" + return self.root_link_pose_w[:, 3:7] + + @property + def root_link_lin_vel_w(self) -> torch.Tensor: + """Root link linear velocity. Shape: (N, 3).""" + return self.root_link_vel_w[:, :3] + + @property + def root_link_ang_vel_w(self) -> torch.Tensor: + """Root link angular velocity. Shape: (N, 3).""" + return self.root_link_vel_w[:, 3:6] + + # -- Root state properties (CoM frame) -- + + @property + def root_com_pose_w(self) -> torch.Tensor: + """Root CoM pose in world frame. Shape: (N, 7).""" + if self._root_com_pose_w is None: + return self.root_link_pose_w.clone() + return self._root_com_pose_w + + @property + def root_com_vel_w(self) -> torch.Tensor: + """Root CoM velocity in world frame. Shape: (N, 6).""" + if self._root_com_vel_w is None: + return self.root_link_vel_w.clone() + return self._root_com_vel_w + + @property + def root_com_state_w(self) -> torch.Tensor: + """Root CoM state in world frame. Shape: (N, 13).""" + return torch.cat([self.root_com_pose_w, self.root_com_vel_w], dim=-1) + + @property + def root_state_w(self) -> torch.Tensor: + """Root state (link pose + CoM velocity). Shape: (N, 13).""" + return torch.cat([self.root_link_pose_w, self.root_com_vel_w], dim=-1) + + @property + def root_com_pos_w(self) -> torch.Tensor: + """Root CoM position. Shape: (N, 3).""" + return self.root_com_pose_w[:, :3] + + @property + def root_com_quat_w(self) -> torch.Tensor: + """Root CoM orientation. Shape: (N, 4).""" + return self.root_com_pose_w[:, 3:7] + + @property + def root_com_lin_vel_w(self) -> torch.Tensor: + """Root CoM linear velocity. Shape: (N, 3).""" + return self.root_com_vel_w[:, :3] + + @property + def root_com_ang_vel_w(self) -> torch.Tensor: + """Root CoM angular velocity. Shape: (N, 3).""" + return self.root_com_vel_w[:, 3:6] + + # -- Body state properties (link frame) -- + + @property + def body_link_pose_w(self) -> torch.Tensor: + """Body link pose in world frame. Shape: (N, 1, 7).""" + if self._body_link_pose_w is None: + pose = torch.zeros(self._num_instances, 1, 7, device=self._device) + pose[..., 3:7] = self._identity_quat(self._num_instances, 1) + return pose + return self._body_link_pose_w + + @property + def body_link_vel_w(self) -> torch.Tensor: + """Body link velocity in world frame. Shape: (N, 1, 6).""" + if self._body_link_vel_w is None: + return torch.zeros(self._num_instances, 1, 6, device=self._device) + return self._body_link_vel_w + + @property + def body_link_state_w(self) -> torch.Tensor: + """Body link state in world frame. Shape: (N, 1, 13).""" + return torch.cat([self.body_link_pose_w, self.body_link_vel_w], dim=-1) + + @property + def body_link_pos_w(self) -> torch.Tensor: + """Body link position. Shape: (N, 1, 3).""" + return self.body_link_pose_w[..., :3] + + @property + def body_link_quat_w(self) -> torch.Tensor: + """Body link orientation. Shape: (N, 1, 4).""" + return self.body_link_pose_w[..., 3:7] + + @property + def body_link_lin_vel_w(self) -> torch.Tensor: + """Body link linear velocity. Shape: (N, 1, 3).""" + return self.body_link_vel_w[..., :3] + + @property + def body_link_ang_vel_w(self) -> torch.Tensor: + """Body link angular velocity. Shape: (N, 1, 3).""" + return self.body_link_vel_w[..., 3:6] + + # -- Body state properties (CoM frame) -- + + @property + def body_com_pose_w(self) -> torch.Tensor: + """Body CoM pose in world frame. Shape: (N, 1, 7).""" + if self._body_com_pose_w is None: + return self.body_link_pose_w.clone() + return self._body_com_pose_w + + @property + def body_com_vel_w(self) -> torch.Tensor: + """Body CoM velocity in world frame. Shape: (N, 1, 6).""" + if self._body_com_vel_w is None: + return self.body_link_vel_w.clone() + return self._body_com_vel_w + + @property + def body_com_state_w(self) -> torch.Tensor: + """Body CoM state in world frame. Shape: (N, 1, 13).""" + return torch.cat([self.body_com_pose_w, self.body_com_vel_w], dim=-1) + + @property + def body_com_acc_w(self) -> torch.Tensor: + """Body CoM acceleration in world frame. Shape: (N, 1, 6).""" + if self._body_com_acc_w is None: + return torch.zeros(self._num_instances, 1, 6, device=self._device) + return self._body_com_acc_w + + @property + def body_com_pose_b(self) -> torch.Tensor: + """Body CoM pose in body frame. Shape: (N, 1, 7).""" + if self._body_com_pose_b is None: + pose = torch.zeros(self._num_instances, 1, 7, device=self._device) + pose[..., 3:7] = self._identity_quat(self._num_instances, 1) + return pose + return self._body_com_pose_b + + @property + def body_com_pos_w(self) -> torch.Tensor: + """Body CoM position. Shape: (N, 1, 3).""" + return self.body_com_pose_w[..., :3] + + @property + def body_com_quat_w(self) -> torch.Tensor: + """Body CoM orientation. Shape: (N, 1, 4).""" + return self.body_com_pose_w[..., 3:7] + + @property + def body_com_lin_vel_w(self) -> torch.Tensor: + """Body CoM linear velocity. Shape: (N, 1, 3).""" + return self.body_com_vel_w[..., :3] + + @property + def body_com_ang_vel_w(self) -> torch.Tensor: + """Body CoM angular velocity. Shape: (N, 1, 3).""" + return self.body_com_vel_w[..., 3:6] + + @property + def body_com_lin_acc_w(self) -> torch.Tensor: + """Body CoM linear acceleration. Shape: (N, 1, 3).""" + return self.body_com_acc_w[..., :3] + + @property + def body_com_ang_acc_w(self) -> torch.Tensor: + """Body CoM angular acceleration. Shape: (N, 1, 3).""" + return self.body_com_acc_w[..., 3:6] + + @property + def body_com_pos_b(self) -> torch.Tensor: + """Body CoM position in body frame. Shape: (N, 1, 3).""" + return self.body_com_pose_b[..., :3] + + @property + def body_com_quat_b(self) -> torch.Tensor: + """Body CoM orientation in body frame. Shape: (N, 1, 4).""" + return self.body_com_pose_b[..., 3:7] + + # -- Body properties -- + + @property + def body_mass(self) -> torch.Tensor: + """Body mass. Shape: (N, 1, 1).""" + if self._body_mass is None: + return torch.ones(self._num_instances, 1, 1, device=self._device) + return self._body_mass + + @property + def body_inertia(self) -> torch.Tensor: + """Body inertia (flattened 3x3). Shape: (N, 1, 9).""" + if self._body_inertia is None: + inertia = torch.zeros(self._num_instances, 1, 9, device=self._device) + inertia[..., 0] = 1.0 # Ixx + inertia[..., 4] = 1.0 # Iyy + inertia[..., 8] = 1.0 # Izz + return inertia + return self._body_inertia + + # -- Derived properties -- + + @property + def projected_gravity_b(self) -> torch.Tensor: + """Gravity projection on body. Shape: (N, 3).""" + gravity = torch.zeros(self._num_instances, 3, device=self._device) + gravity[:, 2] = -1.0 + return gravity + + @property + def heading_w(self) -> torch.Tensor: + """Yaw heading in world frame. Shape: (N,).""" + return torch.zeros(self._num_instances, device=self._device) + + @property + def root_link_lin_vel_b(self) -> torch.Tensor: + """Root link linear velocity in body frame. Shape: (N, 3).""" + return self.root_link_lin_vel_w.clone() + + @property + def root_link_ang_vel_b(self) -> torch.Tensor: + """Root link angular velocity in body frame. Shape: (N, 3).""" + return self.root_link_ang_vel_w.clone() + + @property + def root_com_lin_vel_b(self) -> torch.Tensor: + """Root CoM linear velocity in body frame. Shape: (N, 3).""" + return self.root_com_lin_vel_w.clone() + + @property + def root_com_ang_vel_b(self) -> torch.Tensor: + """Root CoM angular velocity in body frame. Shape: (N, 3).""" + return self.root_com_ang_vel_w.clone() + + # -- Convenience aliases for root state (without _link_ or _com_ prefix) -- + + @property + def root_pos_w(self) -> torch.Tensor: + """Root position (alias for root_link_pos_w). Shape: (N, 3).""" + return self.root_link_pos_w + + @property + def root_quat_w(self) -> torch.Tensor: + """Root orientation (alias for root_link_quat_w). Shape: (N, 4).""" + return self.root_link_quat_w + + @property + def root_pose_w(self) -> torch.Tensor: + """Root pose (alias for root_link_pose_w). Shape: (N, 7).""" + return self.root_link_pose_w + + @property + def root_vel_w(self) -> torch.Tensor: + """Root velocity (alias for root_com_vel_w). Shape: (N, 6).""" + return self.root_com_vel_w + + @property + def root_lin_vel_w(self) -> torch.Tensor: + """Root linear velocity (alias for root_com_lin_vel_w). Shape: (N, 3).""" + return self.root_com_lin_vel_w + + @property + def root_ang_vel_w(self) -> torch.Tensor: + """Root angular velocity (alias for root_com_ang_vel_w). Shape: (N, 3).""" + return self.root_com_ang_vel_w + + @property + def root_lin_vel_b(self) -> torch.Tensor: + """Root linear velocity in body frame (alias for root_com_lin_vel_b). Shape: (N, 3).""" + return self.root_com_lin_vel_b + + @property + def root_ang_vel_b(self) -> torch.Tensor: + """Root angular velocity in body frame (alias for root_com_ang_vel_b). Shape: (N, 3).""" + return self.root_com_ang_vel_b + + # -- Convenience aliases for body state (without _link_ or _com_ prefix) -- + + @property + def body_pos_w(self) -> torch.Tensor: + """Body positions (alias for body_link_pos_w). Shape: (N, 1, 3).""" + return self.body_link_pos_w + + @property + def body_quat_w(self) -> torch.Tensor: + """Body orientations (alias for body_link_quat_w). Shape: (N, 1, 4).""" + return self.body_link_quat_w + + @property + def body_pose_w(self) -> torch.Tensor: + """Body poses (alias for body_link_pose_w). Shape: (N, 1, 7).""" + return self.body_link_pose_w + + @property + def body_vel_w(self) -> torch.Tensor: + """Body velocities (alias for body_com_vel_w). Shape: (N, 1, 6).""" + return self.body_com_vel_w + + @property + def body_state_w(self) -> torch.Tensor: + """Body states (alias for body_com_state_w). Shape: (N, 1, 13).""" + return self.body_com_state_w + + @property + def body_lin_vel_w(self) -> torch.Tensor: + """Body linear velocities (alias for body_com_lin_vel_w). Shape: (N, 1, 3).""" + return self.body_com_lin_vel_w + + @property + def body_ang_vel_w(self) -> torch.Tensor: + """Body angular velocities (alias for body_com_ang_vel_w). Shape: (N, 1, 3).""" + return self.body_com_ang_vel_w + + @property + def body_acc_w(self) -> torch.Tensor: + """Body accelerations (alias for body_com_acc_w). Shape: (N, 1, 6).""" + return self.body_com_acc_w + + @property + def body_lin_acc_w(self) -> torch.Tensor: + """Body linear accelerations (alias for body_com_lin_acc_w). Shape: (N, 1, 3).""" + return self.body_com_lin_acc_w + + @property + def body_ang_acc_w(self) -> torch.Tensor: + """Body angular accelerations (alias for body_com_ang_acc_w). Shape: (N, 1, 3).""" + return self.body_com_ang_acc_w + + # -- CoM in body frame -- + + @property + def com_pos_b(self) -> torch.Tensor: + """CoM position in body frame. Shape: (N, 3).""" + return self.body_com_pose_b[:, 0, :3] + + @property + def com_quat_b(self) -> torch.Tensor: + """CoM orientation in body frame. Shape: (N, 4).""" + return self.body_com_pose_b[:, 0, 3:7] + + # -- Update method -- + + def update(self, dt: float) -> None: + """Update data (no-op for mock).""" + pass + + # -- Setters -- + + def set_default_root_pose(self, value: torch.Tensor) -> None: + self._default_root_pose = value.to(self._device) + + def set_default_root_vel(self, value: torch.Tensor) -> None: + self._default_root_vel = value.to(self._device) + + def set_root_link_pose_w(self, value: torch.Tensor) -> None: + self._root_link_pose_w = value.to(self._device) + + def set_root_link_vel_w(self, value: torch.Tensor) -> None: + self._root_link_vel_w = value.to(self._device) + + def set_root_com_pose_w(self, value: torch.Tensor) -> None: + self._root_com_pose_w = value.to(self._device) + + def set_root_com_vel_w(self, value: torch.Tensor) -> None: + self._root_com_vel_w = value.to(self._device) + + def set_body_link_pose_w(self, value: torch.Tensor) -> None: + self._body_link_pose_w = value.to(self._device) + + def set_body_link_vel_w(self, value: torch.Tensor) -> None: + self._body_link_vel_w = value.to(self._device) + + def set_body_com_pose_w(self, value: torch.Tensor) -> None: + self._body_com_pose_w = value.to(self._device) + + def set_body_com_vel_w(self, value: torch.Tensor) -> None: + self._body_com_vel_w = value.to(self._device) + + def set_body_com_acc_w(self, value: torch.Tensor) -> None: + self._body_com_acc_w = value.to(self._device) + + def set_body_com_pose_b(self, value: torch.Tensor) -> None: + self._body_com_pose_b = value.to(self._device) + + def set_body_mass(self, value: torch.Tensor) -> None: + self._body_mass = value.to(self._device) + + def set_body_inertia(self, value: torch.Tensor) -> None: + self._body_inertia = value.to(self._device) + + def set_mock_data(self, **kwargs) -> None: + """Bulk setter for mock data.""" + for key, value in kwargs.items(): + setter = getattr(self, f"set_{key}", None) + if setter is not None: + setter(value) + else: + raise ValueError(f"Unknown property: {key}") + + +class MockRigidObject: + """Mock rigid object asset for testing without Isaac Sim. + + This class mimics the interface of BaseRigidObject for testing purposes. + It provides the same properties and methods but without simulation dependencies. + """ + + def __init__( + self, + num_instances: int, + body_names: list[str] | None = None, + device: str = "cpu", + ): + """Initialize mock rigid object. + + Args: + num_instances: Number of rigid object instances. + body_names: Names of bodies (single body for rigid object). + device: Device for tensor allocation. + """ + self._num_instances = num_instances + self._num_bodies = 1 + self._device = device + self._body_names = body_names or ["body"] + + self._data = MockRigidObjectData( + num_instances=num_instances, + body_names=self._body_names, + device=device, + ) + + # -- Properties -- + + @property + def data(self) -> MockRigidObjectData: + """Data container for the rigid object.""" + return self._data + + @property + def num_instances(self) -> int: + """Number of rigid object instances.""" + return self._num_instances + + @property + def num_bodies(self) -> int: + """Number of bodies (always 1 for rigid object).""" + return self._num_bodies + + @property + def body_names(self) -> list[str]: + """Body names.""" + return self._body_names + + @property + def device(self) -> str: + """Device for tensor allocation.""" + return self._device + + @property + def root_view(self) -> None: + """Returns None (no physics view in mock).""" + return None + + @property + def instantaneous_wrench_composer(self) -> None: + """Returns None (no wrench composer in mock).""" + return None + + @property + def permanent_wrench_composer(self) -> None: + """Returns None (no wrench composer in mock).""" + return None + + # -- Core methods -- + + def reset(self, env_ids: Sequence[int] | None = None) -> None: + """Reset rigid object state for specified environments.""" + pass + + def write_data_to_sim(self) -> None: + """Write data to simulation (no-op for mock).""" + pass + + def update(self, dt: float) -> None: + """Update rigid object data.""" + self._data.update(dt) + + # -- Finder methods -- + + def find_bodies( + self, name_keys: str | Sequence[str], preserve_order: bool = False + ) -> tuple[list[int], list[str]]: + """Find bodies by name regex patterns.""" + if isinstance(name_keys, str): + name_keys = [name_keys] + + matched_indices = [] + matched_names = [] + + if preserve_order: + for key in name_keys: + pattern = re.compile(key) + for i, name in enumerate(self._body_names): + if pattern.fullmatch(name) and i not in matched_indices: + matched_indices.append(i) + matched_names.append(name) + else: + for i, name in enumerate(self._body_names): + for key in name_keys: + pattern = re.compile(key) + if pattern.fullmatch(name): + matched_indices.append(i) + matched_names.append(name) + break + + return matched_indices, matched_names + + # -- State writer methods (no-op for mock) -- + + def write_root_state_to_sim( + self, + root_state: torch.Tensor, + env_ids: Sequence[int] | None = None, + ) -> None: + """Write root state to simulation.""" + pass + + def write_root_com_state_to_sim( + self, + root_state: torch.Tensor, + env_ids: Sequence[int] | None = None, + ) -> None: + """Write root CoM state to simulation.""" + pass + + def write_root_link_state_to_sim( + self, + root_state: torch.Tensor, + env_ids: Sequence[int] | None = None, + ) -> None: + """Write root link state to simulation.""" + pass + + def write_root_pose_to_sim( + self, + root_pose: torch.Tensor, + env_ids: Sequence[int] | None = None, + ) -> None: + """Write root pose to simulation.""" + pass + + def write_root_link_pose_to_sim( + self, + root_pose: torch.Tensor, + env_ids: Sequence[int] | None = None, + ) -> None: + """Write root link pose to simulation.""" + pass + + def write_root_com_pose_to_sim( + self, + root_pose: torch.Tensor, + env_ids: Sequence[int] | None = None, + ) -> None: + """Write root CoM pose to simulation.""" + pass + + def write_root_velocity_to_sim( + self, + root_velocity: torch.Tensor, + env_ids: Sequence[int] | None = None, + ) -> None: + """Write root velocity to simulation.""" + pass + + def write_root_com_velocity_to_sim( + self, + root_velocity: torch.Tensor, + env_ids: Sequence[int] | None = None, + ) -> None: + """Write root CoM velocity to simulation.""" + pass + + def write_root_link_velocity_to_sim( + self, + root_velocity: torch.Tensor, + env_ids: Sequence[int] | None = None, + ) -> None: + """Write root link velocity to simulation.""" + pass + + # -- Setter methods -- + + def set_masses( + self, + masses: torch.Tensor, + body_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | None = None, + ) -> None: + """Set body masses.""" + pass + + def set_coms( + self, + coms: torch.Tensor, + body_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | None = None, + ) -> None: + """Set body centers of mass.""" + pass + + def set_inertias( + self, + inertias: torch.Tensor, + body_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | None = None, + ) -> None: + """Set body inertias.""" + pass + + def set_external_force_and_torque( + self, + forces: torch.Tensor, + torques: torch.Tensor, + positions: torch.Tensor | None = None, + body_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | None = None, + is_global: bool = True, + ) -> None: + """Set external forces and torques.""" + pass diff --git a/source/isaaclab/isaaclab/test/mock_interfaces/assets/mock_rigid_object_collection.py b/source/isaaclab/isaaclab/test/mock_interfaces/assets/mock_rigid_object_collection.py new file mode 100644 index 00000000000..eaf636b50cb --- /dev/null +++ b/source/isaaclab/isaaclab/test/mock_interfaces/assets/mock_rigid_object_collection.py @@ -0,0 +1,578 @@ +# Copyright (c) 2022-2025, The Isaac Lab Project Developers. +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Mock rigid object collection asset for testing without Isaac Sim.""" + +from __future__ import annotations + +import re +import torch +from typing import Sequence + + +class MockRigidObjectCollectionData: + """Mock data container for rigid object collection asset. + + This class mimics the interface of BaseRigidObjectCollectionData for testing purposes. + All tensor properties return zero tensors with correct shapes if not explicitly set. + """ + + def __init__( + self, + num_instances: int, + num_bodies: int, + body_names: list[str] | None = None, + device: str = "cpu", + ): + """Initialize mock rigid object collection data. + + Args: + num_instances: Number of environment instances. + num_bodies: Number of bodies in the collection. + body_names: Names of bodies. + device: Device for tensor allocation. + """ + self._num_instances = num_instances + self._num_bodies = num_bodies + self._device = device + + self.body_names = body_names or [f"body_{i}" for i in range(num_bodies)] + + # Default states + self._default_body_pose: torch.Tensor | None = None + self._default_body_vel: torch.Tensor | None = None + + # Body state (link frame) + self._body_link_pose_w: torch.Tensor | None = None + self._body_link_vel_w: torch.Tensor | None = None + + # Body state (CoM frame) + self._body_com_pose_w: torch.Tensor | None = None + self._body_com_vel_w: torch.Tensor | None = None + self._body_com_acc_w: torch.Tensor | None = None + self._body_com_pose_b: torch.Tensor | None = None + + # Body properties + self._body_mass: torch.Tensor | None = None + self._body_inertia: torch.Tensor | None = None + + @property + def device(self) -> str: + """Device for tensor allocation.""" + return self._device + + def _identity_quat(self, *shape: int) -> torch.Tensor: + """Create identity quaternion tensor (w, x, y, z) = (1, 0, 0, 0).""" + quat = torch.zeros(*shape, 4, device=self._device) + quat[..., 0] = 1.0 + return quat + + # -- Default state properties -- + + @property + def default_body_pose(self) -> torch.Tensor: + """Default body poses. Shape: (N, num_bodies, 7).""" + if self._default_body_pose is None: + pose = torch.zeros(self._num_instances, self._num_bodies, 7, device=self._device) + pose[..., 3:7] = self._identity_quat(self._num_instances, self._num_bodies) + return pose + return self._default_body_pose + + @property + def default_body_vel(self) -> torch.Tensor: + """Default body velocities. Shape: (N, num_bodies, 6).""" + if self._default_body_vel is None: + return torch.zeros(self._num_instances, self._num_bodies, 6, device=self._device) + return self._default_body_vel + + @property + def default_body_state(self) -> torch.Tensor: + """Default body states. Shape: (N, num_bodies, 13).""" + return torch.cat([self.default_body_pose, self.default_body_vel], dim=-1) + + # -- Body state properties (link frame) -- + + @property + def body_link_pose_w(self) -> torch.Tensor: + """Body link poses in world frame. Shape: (N, num_bodies, 7).""" + if self._body_link_pose_w is None: + pose = torch.zeros(self._num_instances, self._num_bodies, 7, device=self._device) + pose[..., 3:7] = self._identity_quat(self._num_instances, self._num_bodies) + return pose + return self._body_link_pose_w + + @property + def body_link_vel_w(self) -> torch.Tensor: + """Body link velocities in world frame. Shape: (N, num_bodies, 6).""" + if self._body_link_vel_w is None: + return torch.zeros(self._num_instances, self._num_bodies, 6, device=self._device) + return self._body_link_vel_w + + @property + def body_link_state_w(self) -> torch.Tensor: + """Body link states in world frame. Shape: (N, num_bodies, 13).""" + return torch.cat([self.body_link_pose_w, self.body_link_vel_w], dim=-1) + + @property + def body_link_pos_w(self) -> torch.Tensor: + """Body link positions. Shape: (N, num_bodies, 3).""" + return self.body_link_pose_w[..., :3] + + @property + def body_link_quat_w(self) -> torch.Tensor: + """Body link orientations. Shape: (N, num_bodies, 4).""" + return self.body_link_pose_w[..., 3:7] + + @property + def body_link_lin_vel_w(self) -> torch.Tensor: + """Body link linear velocities. Shape: (N, num_bodies, 3).""" + return self.body_link_vel_w[..., :3] + + @property + def body_link_ang_vel_w(self) -> torch.Tensor: + """Body link angular velocities. Shape: (N, num_bodies, 3).""" + return self.body_link_vel_w[..., 3:6] + + # -- Body state properties (CoM frame) -- + + @property + def body_com_pose_w(self) -> torch.Tensor: + """Body CoM poses in world frame. Shape: (N, num_bodies, 7).""" + if self._body_com_pose_w is None: + return self.body_link_pose_w.clone() + return self._body_com_pose_w + + @property + def body_com_vel_w(self) -> torch.Tensor: + """Body CoM velocities in world frame. Shape: (N, num_bodies, 6).""" + if self._body_com_vel_w is None: + return self.body_link_vel_w.clone() + return self._body_com_vel_w + + @property + def body_com_state_w(self) -> torch.Tensor: + """Body CoM states in world frame. Shape: (N, num_bodies, 13).""" + return torch.cat([self.body_com_pose_w, self.body_com_vel_w], dim=-1) + + @property + def body_com_acc_w(self) -> torch.Tensor: + """Body CoM accelerations in world frame. Shape: (N, num_bodies, 6).""" + if self._body_com_acc_w is None: + return torch.zeros(self._num_instances, self._num_bodies, 6, device=self._device) + return self._body_com_acc_w + + @property + def body_com_pose_b(self) -> torch.Tensor: + """Body CoM poses in body frame. Shape: (N, num_bodies, 7).""" + if self._body_com_pose_b is None: + pose = torch.zeros(self._num_instances, self._num_bodies, 7, device=self._device) + pose[..., 3:7] = self._identity_quat(self._num_instances, self._num_bodies) + return pose + return self._body_com_pose_b + + @property + def body_com_pos_w(self) -> torch.Tensor: + """Body CoM positions. Shape: (N, num_bodies, 3).""" + return self.body_com_pose_w[..., :3] + + @property + def body_com_quat_w(self) -> torch.Tensor: + """Body CoM orientations. Shape: (N, num_bodies, 4).""" + return self.body_com_pose_w[..., 3:7] + + @property + def body_com_lin_vel_w(self) -> torch.Tensor: + """Body CoM linear velocities. Shape: (N, num_bodies, 3).""" + return self.body_com_vel_w[..., :3] + + @property + def body_com_ang_vel_w(self) -> torch.Tensor: + """Body CoM angular velocities. Shape: (N, num_bodies, 3).""" + return self.body_com_vel_w[..., 3:6] + + @property + def body_com_lin_acc_w(self) -> torch.Tensor: + """Body CoM linear accelerations. Shape: (N, num_bodies, 3).""" + return self.body_com_acc_w[..., :3] + + @property + def body_com_ang_acc_w(self) -> torch.Tensor: + """Body CoM angular accelerations. Shape: (N, num_bodies, 3).""" + return self.body_com_acc_w[..., 3:6] + + @property + def body_com_pos_b(self) -> torch.Tensor: + """Body CoM positions in body frame. Shape: (N, num_bodies, 3).""" + return self.body_com_pose_b[..., :3] + + @property + def body_com_quat_b(self) -> torch.Tensor: + """Body CoM orientations in body frame. Shape: (N, num_bodies, 4).""" + return self.body_com_pose_b[..., 3:7] + + # -- Body properties -- + + @property + def body_mass(self) -> torch.Tensor: + """Body masses. Shape: (N, num_bodies).""" + if self._body_mass is None: + return torch.ones(self._num_instances, self._num_bodies, device=self._device) + return self._body_mass + + @property + def body_inertia(self) -> torch.Tensor: + """Body inertias (flattened 3x3). Shape: (N, num_bodies, 9).""" + if self._body_inertia is None: + inertia = torch.zeros(self._num_instances, self._num_bodies, 9, device=self._device) + inertia[..., 0] = 1.0 # Ixx + inertia[..., 4] = 1.0 # Iyy + inertia[..., 8] = 1.0 # Izz + return inertia + return self._body_inertia + + # -- Derived properties -- + + @property + def projected_gravity_b(self) -> torch.Tensor: + """Gravity projection on bodies. Shape: (N, num_bodies, 3).""" + gravity = torch.zeros(self._num_instances, self._num_bodies, 3, device=self._device) + gravity[..., 2] = -1.0 + return gravity + + @property + def heading_w(self) -> torch.Tensor: + """Yaw heading per body. Shape: (N, num_bodies).""" + return torch.zeros(self._num_instances, self._num_bodies, device=self._device) + + @property + def body_link_lin_vel_b(self) -> torch.Tensor: + """Body link linear velocities in body frame. Shape: (N, num_bodies, 3).""" + return self.body_link_lin_vel_w.clone() + + @property + def body_link_ang_vel_b(self) -> torch.Tensor: + """Body link angular velocities in body frame. Shape: (N, num_bodies, 3).""" + return self.body_link_ang_vel_w.clone() + + @property + def body_com_lin_vel_b(self) -> torch.Tensor: + """Body CoM linear velocities in body frame. Shape: (N, num_bodies, 3).""" + return self.body_com_lin_vel_w.clone() + + @property + def body_com_ang_vel_b(self) -> torch.Tensor: + """Body CoM angular velocities in body frame. Shape: (N, num_bodies, 3).""" + return self.body_com_ang_vel_w.clone() + + # -- Convenience alias -- + + @property + def body_state_w(self) -> torch.Tensor: + """Body states (alias for body_com_state_w). Shape: (N, num_bodies, 13).""" + return self.body_com_state_w + + # -- Update method -- + + def update(self, dt: float) -> None: + """Update data (no-op for mock).""" + pass + + # -- Setters -- + + def set_default_body_pose(self, value: torch.Tensor) -> None: + self._default_body_pose = value.to(self._device) + + def set_default_body_vel(self, value: torch.Tensor) -> None: + self._default_body_vel = value.to(self._device) + + def set_body_link_pose_w(self, value: torch.Tensor) -> None: + self._body_link_pose_w = value.to(self._device) + + def set_body_link_vel_w(self, value: torch.Tensor) -> None: + self._body_link_vel_w = value.to(self._device) + + def set_body_com_pose_w(self, value: torch.Tensor) -> None: + self._body_com_pose_w = value.to(self._device) + + def set_body_com_vel_w(self, value: torch.Tensor) -> None: + self._body_com_vel_w = value.to(self._device) + + def set_body_com_acc_w(self, value: torch.Tensor) -> None: + self._body_com_acc_w = value.to(self._device) + + def set_body_com_pose_b(self, value: torch.Tensor) -> None: + self._body_com_pose_b = value.to(self._device) + + def set_body_mass(self, value: torch.Tensor) -> None: + self._body_mass = value.to(self._device) + + def set_body_inertia(self, value: torch.Tensor) -> None: + self._body_inertia = value.to(self._device) + + def set_mock_data(self, **kwargs) -> None: + """Bulk setter for mock data.""" + for key, value in kwargs.items(): + setter = getattr(self, f"set_{key}", None) + if setter is not None: + setter(value) + else: + raise ValueError(f"Unknown property: {key}") + + +class MockRigidObjectCollection: + """Mock rigid object collection asset for testing without Isaac Sim. + + This class mimics the interface of BaseRigidObjectCollection for testing purposes. + It provides the same properties and methods but without simulation dependencies. + """ + + def __init__( + self, + num_instances: int, + num_bodies: int, + body_names: list[str] | None = None, + device: str = "cpu", + ): + """Initialize mock rigid object collection. + + Args: + num_instances: Number of environment instances. + num_bodies: Number of bodies in the collection. + body_names: Names of bodies. + device: Device for tensor allocation. + """ + self._num_instances = num_instances + self._num_bodies = num_bodies + self._device = device + self._body_names = body_names or [f"body_{i}" for i in range(num_bodies)] + + self._data = MockRigidObjectCollectionData( + num_instances=num_instances, + num_bodies=num_bodies, + body_names=self._body_names, + device=device, + ) + + # -- Properties -- + + @property + def data(self) -> MockRigidObjectCollectionData: + """Data container for the rigid object collection.""" + return self._data + + @property + def num_instances(self) -> int: + """Number of environment instances.""" + return self._num_instances + + @property + def num_bodies(self) -> int: + """Number of bodies in the collection.""" + return self._num_bodies + + @property + def body_names(self) -> list[str]: + """Body names.""" + return self._body_names + + @property + def device(self) -> str: + """Device for tensor allocation.""" + return self._device + + @property + def root_view(self) -> None: + """Returns None (no physics view in mock).""" + return None + + @property + def instantaneous_wrench_composer(self) -> None: + """Returns None (no wrench composer in mock).""" + return None + + @property + def permanent_wrench_composer(self) -> None: + """Returns None (no wrench composer in mock).""" + return None + + # -- Core methods -- + + def reset( + self, + env_ids: Sequence[int] | None = None, + object_ids: slice | torch.Tensor | None = None, + ) -> None: + """Reset rigid object collection state for specified environments.""" + pass + + def write_data_to_sim(self) -> None: + """Write data to simulation (no-op for mock).""" + pass + + def update(self, dt: float) -> None: + """Update rigid object collection data.""" + self._data.update(dt) + + # -- Finder methods -- + + def find_bodies( + self, name_keys: str | Sequence[str], preserve_order: bool = False + ) -> tuple[torch.Tensor, list[str], list[int]]: + """Find bodies by name regex patterns. + + Returns: + Tuple of (body_mask, body_names, body_indices). + """ + if isinstance(name_keys, str): + name_keys = [name_keys] + + matched_indices = [] + matched_names = [] + + if preserve_order: + for key in name_keys: + pattern = re.compile(key) + for i, name in enumerate(self._body_names): + if pattern.fullmatch(name) and i not in matched_indices: + matched_indices.append(i) + matched_names.append(name) + else: + for i, name in enumerate(self._body_names): + for key in name_keys: + pattern = re.compile(key) + if pattern.fullmatch(name): + matched_indices.append(i) + matched_names.append(name) + break + + # Create body mask + body_mask = torch.zeros(self._num_bodies, dtype=torch.bool, device=self._device) + body_mask[matched_indices] = True + + return body_mask, matched_names, matched_indices + + # -- State writer methods (no-op for mock) -- + + def write_body_state_to_sim( + self, + body_states: torch.Tensor, + env_ids: Sequence[int] | None = None, + body_ids: Sequence[int] | slice | None = None, + ) -> None: + """Write body states to simulation.""" + pass + + def write_body_com_state_to_sim( + self, + body_states: torch.Tensor, + env_ids: Sequence[int] | None = None, + body_ids: Sequence[int] | slice | None = None, + ) -> None: + """Write body CoM states to simulation.""" + pass + + def write_body_link_state_to_sim( + self, + body_states: torch.Tensor, + env_ids: Sequence[int] | None = None, + body_ids: Sequence[int] | slice | None = None, + ) -> None: + """Write body link states to simulation.""" + pass + + def write_body_pose_to_sim( + self, + body_poses: torch.Tensor, + env_ids: Sequence[int] | None = None, + body_ids: Sequence[int] | slice | None = None, + ) -> None: + """Write body poses to simulation.""" + pass + + def write_body_link_pose_to_sim( + self, + body_poses: torch.Tensor, + env_ids: Sequence[int] | None = None, + body_ids: Sequence[int] | slice | None = None, + ) -> None: + """Write body link poses to simulation.""" + pass + + def write_body_com_pose_to_sim( + self, + body_poses: torch.Tensor, + env_ids: Sequence[int] | None = None, + body_ids: Sequence[int] | slice | None = None, + ) -> None: + """Write body CoM poses to simulation.""" + pass + + def write_body_velocity_to_sim( + self, + body_velocities: torch.Tensor, + env_ids: Sequence[int] | None = None, + body_ids: Sequence[int] | slice | None = None, + ) -> None: + """Write body velocities to simulation.""" + pass + + def write_body_com_velocity_to_sim( + self, + body_velocities: torch.Tensor, + env_ids: Sequence[int] | None = None, + body_ids: Sequence[int] | slice | None = None, + ) -> None: + """Write body CoM velocities to simulation.""" + pass + + def write_body_link_velocity_to_sim( + self, + body_velocities: torch.Tensor, + env_ids: Sequence[int] | None = None, + body_ids: Sequence[int] | slice | None = None, + ) -> None: + """Write body link velocities to simulation.""" + pass + + # -- Setter methods -- + + def set_masses( + self, + masses: torch.Tensor, + body_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | None = None, + ) -> None: + """Set body masses.""" + pass + + def set_coms( + self, + coms: torch.Tensor, + body_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | None = None, + ) -> None: + """Set body centers of mass.""" + pass + + def set_inertias( + self, + inertias: torch.Tensor, + body_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | None = None, + ) -> None: + """Set body inertias.""" + pass + + def set_external_force_and_torque( + self, + forces: torch.Tensor, + torques: torch.Tensor, + positions: torch.Tensor | None = None, + body_ids: Sequence[int] | slice | None = None, + env_ids: Sequence[int] | None = None, + is_global: bool = True, + ) -> None: + """Set external forces and torques.""" + pass diff --git a/source/isaaclab/isaaclab/test/mock_interfaces/sensors/__init__.py b/source/isaaclab/isaaclab/test/mock_interfaces/sensors/__init__.py new file mode 100644 index 00000000000..dadef613a91 --- /dev/null +++ b/source/isaaclab/isaaclab/test/mock_interfaces/sensors/__init__.py @@ -0,0 +1,16 @@ +# Copyright (c) 2022-2025, The Isaac Lab Project Developers. +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Mock sensor interfaces for testing without Isaac Sim.""" + +from .mock_contact_sensor import MockContactSensor, MockContactSensorData +from .mock_frame_transformer import MockFrameTransformer, MockFrameTransformerData +from .mock_imu import MockImu, MockImuData +from .factories import ( + create_mock_contact_sensor, + create_mock_foot_contact_sensor, + create_mock_frame_transformer, + create_mock_imu, +) diff --git a/source/isaaclab/isaaclab/test/mock_interfaces/sensors/factories.py b/source/isaaclab/isaaclab/test/mock_interfaces/sensors/factories.py new file mode 100644 index 00000000000..1a9419ab4ce --- /dev/null +++ b/source/isaaclab/isaaclab/test/mock_interfaces/sensors/factories.py @@ -0,0 +1,123 @@ +# Copyright (c) 2022-2025, The Isaac Lab Project Developers. +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Factory functions for creating pre-configured mock sensors.""" + +from __future__ import annotations + +import torch + +from .mock_contact_sensor import MockContactSensor +from .mock_frame_transformer import MockFrameTransformer +from .mock_imu import MockImu + + +def create_mock_imu( + num_instances: int = 1, + device: str = "cpu", + gravity: tuple[float, float, float] = (0.0, 0.0, -1.0), +) -> MockImu: + """Create a mock IMU sensor with default configuration. + + Args: + num_instances: Number of sensor instances. + device: Device for tensor allocation. + gravity: Default gravity direction in body frame. + + Returns: + Configured MockImu instance. + """ + imu = MockImu(num_instances=num_instances, device=device) + imu.data.set_projected_gravity_b( + torch.tensor([gravity], device=device).expand(num_instances, -1).clone() + ) + return imu + + +def create_mock_contact_sensor( + num_instances: int = 1, + num_bodies: int = 1, + body_names: list[str] | None = None, + device: str = "cpu", + history_length: int = 0, + num_filter_bodies: int = 0, +) -> MockContactSensor: + """Create a mock contact sensor with default configuration. + + Args: + num_instances: Number of environment instances. + num_bodies: Number of bodies with contact sensors. + body_names: Names of bodies with contact sensors. + device: Device for tensor allocation. + history_length: Length of history buffer for forces. + num_filter_bodies: Number of filter bodies for force matrix. + + Returns: + Configured MockContactSensor instance. + """ + return MockContactSensor( + num_instances=num_instances, + num_bodies=num_bodies, + body_names=body_names, + device=device, + history_length=history_length, + num_filter_bodies=num_filter_bodies, + ) + + +def create_mock_foot_contact_sensor( + num_instances: int = 1, + num_feet: int = 4, + foot_names: list[str] | None = None, + device: str = "cpu", + history_length: int = 0, +) -> MockContactSensor: + """Create a mock foot contact sensor for quadruped robots. + + Args: + num_instances: Number of environment instances. + num_feet: Number of feet (default 4 for quadruped). + foot_names: Names of feet. Defaults to FL, FR, RL, RR. + device: Device for tensor allocation. + history_length: Length of history buffer for forces. + + Returns: + Configured MockContactSensor instance for foot contacts. + """ + if foot_names is None: + foot_names = ["FL_foot", "FR_foot", "RL_foot", "RR_foot"][:num_feet] + + return MockContactSensor( + num_instances=num_instances, + num_bodies=num_feet, + body_names=foot_names, + device=device, + history_length=history_length, + ) + + +def create_mock_frame_transformer( + num_instances: int = 1, + num_target_frames: int = 1, + target_frame_names: list[str] | None = None, + device: str = "cpu", +) -> MockFrameTransformer: + """Create a mock frame transformer sensor with default configuration. + + Args: + num_instances: Number of environment instances. + num_target_frames: Number of target frames to track. + target_frame_names: Names of target frames. + device: Device for tensor allocation. + + Returns: + Configured MockFrameTransformer instance. + """ + return MockFrameTransformer( + num_instances=num_instances, + num_target_frames=num_target_frames, + target_frame_names=target_frame_names, + device=device, + ) diff --git a/source/isaaclab/isaaclab/test/mock_interfaces/sensors/mock_contact_sensor.py b/source/isaaclab/isaaclab/test/mock_interfaces/sensors/mock_contact_sensor.py new file mode 100644 index 00000000000..af7ca0fbd7d --- /dev/null +++ b/source/isaaclab/isaaclab/test/mock_interfaces/sensors/mock_contact_sensor.py @@ -0,0 +1,430 @@ +# Copyright (c) 2022-2025, The Isaac Lab Project Developers. +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Mock contact sensor for testing without Isaac Sim.""" + +from __future__ import annotations + +import re +import torch +from typing import Sequence + + +class MockContactSensorData: + """Mock data container for contact sensor. + + This class mimics the interface of BaseContactSensorData for testing purposes. + All tensor properties return zero tensors with correct shapes if not explicitly set. + """ + + def __init__( + self, + num_instances: int, + num_bodies: int, + device: str = "cpu", + history_length: int = 0, + num_filter_bodies: int = 0, + ): + """Initialize mock contact sensor data. + + Args: + num_instances: Number of environment instances. + num_bodies: Number of bodies with contact sensors. + device: Device for tensor allocation. + history_length: Length of history buffer for forces. + num_filter_bodies: Number of filter bodies for force matrix. + """ + self._num_instances = num_instances + self._num_bodies = num_bodies + self._device = device + self._history_length = history_length + self._num_filter_bodies = num_filter_bodies + + # Internal storage for mock data + self._pos_w: torch.Tensor | None = None + self._quat_w: torch.Tensor | None = None + self._net_forces_w: torch.Tensor | None = None + self._net_forces_w_history: torch.Tensor | None = None + self._force_matrix_w: torch.Tensor | None = None + self._force_matrix_w_history: torch.Tensor | None = None + self._contact_pos_w: torch.Tensor | None = None + self._friction_forces_w: torch.Tensor | None = None + self._last_air_time: torch.Tensor | None = None + self._current_air_time: torch.Tensor | None = None + self._last_contact_time: torch.Tensor | None = None + self._current_contact_time: torch.Tensor | None = None + + # -- Properties -- + + @property + def pos_w(self) -> torch.Tensor | None: + """Position of sensor origins in world frame. Shape: (N, B, 3).""" + if self._pos_w is None: + return torch.zeros(self._num_instances, self._num_bodies, 3, device=self._device) + return self._pos_w + + @property + def quat_w(self) -> torch.Tensor | None: + """Orientation (w, x, y, z) in world frame. Shape: (N, B, 4).""" + if self._quat_w is None: + # Default to identity quaternion + quat = torch.zeros(self._num_instances, self._num_bodies, 4, device=self._device) + quat[..., 0] = 1.0 + return quat + return self._quat_w + + @property + def pose_w(self) -> torch.Tensor | None: + """Pose in world frame (pos + quat). Shape: (N, B, 7).""" + return torch.cat([self.pos_w, self.quat_w], dim=-1) + + @property + def net_forces_w(self) -> torch.Tensor: + """Net normal contact forces in world frame. Shape: (N, B, 3).""" + if self._net_forces_w is None: + return torch.zeros(self._num_instances, self._num_bodies, 3, device=self._device) + return self._net_forces_w + + @property + def net_forces_w_history(self) -> torch.Tensor | None: + """History of net forces. Shape: (N, T, B, 3).""" + if self._history_length == 0: + return None + if self._net_forces_w_history is None: + return torch.zeros( + self._num_instances, self._history_length, self._num_bodies, 3, device=self._device + ) + return self._net_forces_w_history + + @property + def force_matrix_w(self) -> torch.Tensor | None: + """Filtered contact forces. Shape: (N, B, M, 3).""" + if self._num_filter_bodies == 0: + return None + if self._force_matrix_w is None: + return torch.zeros( + self._num_instances, self._num_bodies, self._num_filter_bodies, 3, device=self._device + ) + return self._force_matrix_w + + @property + def force_matrix_w_history(self) -> torch.Tensor | None: + """History of filtered forces. Shape: (N, T, B, M, 3).""" + if self._history_length == 0 or self._num_filter_bodies == 0: + return None + if self._force_matrix_w_history is None: + return torch.zeros( + self._num_instances, + self._history_length, + self._num_bodies, + self._num_filter_bodies, + 3, + device=self._device, + ) + return self._force_matrix_w_history + + @property + def contact_pos_w(self) -> torch.Tensor | None: + """Contact point positions in world frame. Shape: (N, B, M, 3).""" + if self._num_filter_bodies == 0: + return None + if self._contact_pos_w is None: + return torch.zeros( + self._num_instances, self._num_bodies, self._num_filter_bodies, 3, device=self._device + ) + return self._contact_pos_w + + @property + def friction_forces_w(self) -> torch.Tensor | None: + """Friction forces in world frame. Shape: (N, B, M, 3).""" + if self._num_filter_bodies == 0: + return None + if self._friction_forces_w is None: + return torch.zeros( + self._num_instances, self._num_bodies, self._num_filter_bodies, 3, device=self._device + ) + return self._friction_forces_w + + @property + def last_air_time(self) -> torch.Tensor: + """Time in air before last contact. Shape: (N, B).""" + if self._last_air_time is None: + return torch.zeros(self._num_instances, self._num_bodies, device=self._device) + return self._last_air_time + + @property + def current_air_time(self) -> torch.Tensor: + """Current time in air. Shape: (N, B).""" + if self._current_air_time is None: + return torch.zeros(self._num_instances, self._num_bodies, device=self._device) + return self._current_air_time + + @property + def last_contact_time(self) -> torch.Tensor: + """Time in contact before last detach. Shape: (N, B).""" + if self._last_contact_time is None: + return torch.zeros(self._num_instances, self._num_bodies, device=self._device) + return self._last_contact_time + + @property + def current_contact_time(self) -> torch.Tensor: + """Current time in contact. Shape: (N, B).""" + if self._current_contact_time is None: + return torch.zeros(self._num_instances, self._num_bodies, device=self._device) + return self._current_contact_time + + # -- Setters -- + + def set_pos_w(self, value: torch.Tensor) -> None: + """Set position in world frame.""" + self._pos_w = value.to(self._device) + + def set_quat_w(self, value: torch.Tensor) -> None: + """Set orientation in world frame.""" + self._quat_w = value.to(self._device) + + def set_net_forces_w(self, value: torch.Tensor) -> None: + """Set net contact forces.""" + self._net_forces_w = value.to(self._device) + + def set_net_forces_w_history(self, value: torch.Tensor) -> None: + """Set net forces history.""" + self._net_forces_w_history = value.to(self._device) + + def set_force_matrix_w(self, value: torch.Tensor) -> None: + """Set filtered contact forces.""" + self._force_matrix_w = value.to(self._device) + + def set_force_matrix_w_history(self, value: torch.Tensor) -> None: + """Set filtered forces history.""" + self._force_matrix_w_history = value.to(self._device) + + def set_contact_pos_w(self, value: torch.Tensor) -> None: + """Set contact point positions.""" + self._contact_pos_w = value.to(self._device) + + def set_friction_forces_w(self, value: torch.Tensor) -> None: + """Set friction forces.""" + self._friction_forces_w = value.to(self._device) + + def set_last_air_time(self, value: torch.Tensor) -> None: + """Set last air time.""" + self._last_air_time = value.to(self._device) + + def set_current_air_time(self, value: torch.Tensor) -> None: + """Set current air time.""" + self._current_air_time = value.to(self._device) + + def set_last_contact_time(self, value: torch.Tensor) -> None: + """Set last contact time.""" + self._last_contact_time = value.to(self._device) + + def set_current_contact_time(self, value: torch.Tensor) -> None: + """Set current contact time.""" + self._current_contact_time = value.to(self._device) + + def set_mock_data( + self, + pos_w: torch.Tensor | None = None, + quat_w: torch.Tensor | None = None, + net_forces_w: torch.Tensor | None = None, + net_forces_w_history: torch.Tensor | None = None, + force_matrix_w: torch.Tensor | None = None, + force_matrix_w_history: torch.Tensor | None = None, + contact_pos_w: torch.Tensor | None = None, + friction_forces_w: torch.Tensor | None = None, + last_air_time: torch.Tensor | None = None, + current_air_time: torch.Tensor | None = None, + last_contact_time: torch.Tensor | None = None, + current_contact_time: torch.Tensor | None = None, + ) -> None: + """Bulk setter for mock data. + + Args: + pos_w: Position in world frame. Shape: (N, B, 3). + quat_w: Orientation in world frame. Shape: (N, B, 4). + net_forces_w: Net contact forces. Shape: (N, B, 3). + net_forces_w_history: History of net forces. Shape: (N, T, B, 3). + force_matrix_w: Filtered contact forces. Shape: (N, B, M, 3). + force_matrix_w_history: History of filtered forces. Shape: (N, T, B, M, 3). + contact_pos_w: Contact point positions. Shape: (N, B, M, 3). + friction_forces_w: Friction forces. Shape: (N, B, M, 3). + last_air_time: Time in air before last contact. Shape: (N, B). + current_air_time: Current time in air. Shape: (N, B). + last_contact_time: Time in contact before last detach. Shape: (N, B). + current_contact_time: Current time in contact. Shape: (N, B). + """ + if pos_w is not None: + self.set_pos_w(pos_w) + if quat_w is not None: + self.set_quat_w(quat_w) + if net_forces_w is not None: + self.set_net_forces_w(net_forces_w) + if net_forces_w_history is not None: + self.set_net_forces_w_history(net_forces_w_history) + if force_matrix_w is not None: + self.set_force_matrix_w(force_matrix_w) + if force_matrix_w_history is not None: + self.set_force_matrix_w_history(force_matrix_w_history) + if contact_pos_w is not None: + self.set_contact_pos_w(contact_pos_w) + if friction_forces_w is not None: + self.set_friction_forces_w(friction_forces_w) + if last_air_time is not None: + self.set_last_air_time(last_air_time) + if current_air_time is not None: + self.set_current_air_time(current_air_time) + if last_contact_time is not None: + self.set_last_contact_time(last_contact_time) + if current_contact_time is not None: + self.set_current_contact_time(current_contact_time) + + +class MockContactSensor: + """Mock contact sensor for testing without Isaac Sim. + + This class mimics the interface of BaseContactSensor for testing purposes. + It provides the same properties and methods but without simulation dependencies. + """ + + def __init__( + self, + num_instances: int, + num_bodies: int, + body_names: list[str] | None = None, + device: str = "cpu", + history_length: int = 0, + num_filter_bodies: int = 0, + ): + """Initialize mock contact sensor. + + Args: + num_instances: Number of environment instances. + num_bodies: Number of bodies with contact sensors. + body_names: Names of bodies with contact sensors. + device: Device for tensor allocation. + history_length: Length of history buffer for forces. + num_filter_bodies: Number of filter bodies for force matrix. + """ + self._num_instances = num_instances + self._num_bodies = num_bodies + self._body_names = body_names or [f"body_{i}" for i in range(num_bodies)] + self._device = device + self._data = MockContactSensorData( + num_instances, num_bodies, device, history_length, num_filter_bodies + ) + + # -- Properties -- + + @property + def data(self) -> MockContactSensorData: + """Data container for the sensor.""" + return self._data + + @property + def num_instances(self) -> int: + """Number of sensor instances.""" + return self._num_instances + + @property + def num_bodies(self) -> int: + """Number of bodies with contact sensors.""" + return self._num_bodies + + @property + def body_names(self) -> list[str]: + """Ordered names of bodies with contact sensors.""" + return self._body_names + + @property + def contact_view(self) -> None: + """Returns None (no PhysX view in mock).""" + return None + + @property + def device(self) -> str: + """Device for tensor allocation.""" + return self._device + + # -- Methods -- + + def find_bodies( + self, name_keys: str | Sequence[str], preserve_order: bool = False + ) -> tuple[list[int], list[str]]: + """Find bodies by name regex patterns. + + Args: + name_keys: Regex pattern(s) to match body names. + preserve_order: If True, preserve order of name_keys in output. + + Returns: + Tuple of (body_indices, body_names) matching the patterns. + """ + if isinstance(name_keys, str): + name_keys = [name_keys] + + matched_indices = [] + matched_names = [] + + if preserve_order: + for key in name_keys: + pattern = re.compile(key) + for i, name in enumerate(self._body_names): + if pattern.fullmatch(name) and i not in matched_indices: + matched_indices.append(i) + matched_names.append(name) + else: + for i, name in enumerate(self._body_names): + for key in name_keys: + pattern = re.compile(key) + if pattern.fullmatch(name): + matched_indices.append(i) + matched_names.append(name) + break + + return matched_indices, matched_names + + def compute_first_contact(self, dt: float, abs_tol: float = 1.0e-8) -> torch.Tensor: + """Check which bodies established contact within dt seconds. + + Args: + dt: Time window to check for first contact. + abs_tol: Absolute tolerance for contact time comparison. + + Returns: + Boolean tensor of shape (N, B) indicating first contact. + """ + return self._data.current_contact_time < (dt + abs_tol) + + def compute_first_air(self, dt: float, abs_tol: float = 1.0e-8) -> torch.Tensor: + """Check which bodies broke contact within dt seconds. + + Args: + dt: Time window to check for first air. + abs_tol: Absolute tolerance for air time comparison. + + Returns: + Boolean tensor of shape (N, B) indicating first air. + """ + return self._data.current_air_time < (dt + abs_tol) + + def reset(self, env_ids: Sequence[int] | None = None) -> None: + """Reset sensor state for specified environments. + + Args: + env_ids: Environment indices to reset. If None, resets all. + """ + # No-op for mock - data persists until explicitly changed + pass + + def update(self, dt: float, force_recompute: bool = False) -> None: + """Update sensor. + + Args: + dt: Time step since last update. + force_recompute: Force recomputation of buffers. + """ + # No-op for mock - data is set explicitly + pass diff --git a/source/isaaclab/isaaclab/test/mock_interfaces/sensors/mock_frame_transformer.py b/source/isaaclab/isaaclab/test/mock_interfaces/sensors/mock_frame_transformer.py new file mode 100644 index 00000000000..a7661d37c74 --- /dev/null +++ b/source/isaaclab/isaaclab/test/mock_interfaces/sensors/mock_frame_transformer.py @@ -0,0 +1,291 @@ +# Copyright (c) 2022-2025, The Isaac Lab Project Developers. +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Mock frame transformer sensor for testing without Isaac Sim.""" + +from __future__ import annotations + +import re +import torch +from typing import Sequence + + +class MockFrameTransformerData: + """Mock data container for frame transformer sensor. + + This class mimics the interface of BaseFrameTransformerData for testing purposes. + All tensor properties return zero tensors with correct shapes if not explicitly set. + """ + + def __init__( + self, + num_instances: int, + num_target_frames: int, + target_frame_names: list[str] | None = None, + device: str = "cpu", + ): + """Initialize mock frame transformer data. + + Args: + num_instances: Number of environment instances. + num_target_frames: Number of target frames to track. + target_frame_names: Names of target frames. + device: Device for tensor allocation. + """ + self._num_instances = num_instances + self._num_target_frames = num_target_frames + self._target_frame_names = target_frame_names or [f"frame_{i}" for i in range(num_target_frames)] + self._device = device + + # Internal storage for mock data + self._source_pos_w: torch.Tensor | None = None + self._source_quat_w: torch.Tensor | None = None + self._target_pos_w: torch.Tensor | None = None + self._target_quat_w: torch.Tensor | None = None + self._target_pos_source: torch.Tensor | None = None + self._target_quat_source: torch.Tensor | None = None + + # -- Properties -- + + @property + def target_frame_names(self) -> list[str]: + """Names of target frames.""" + return self._target_frame_names + + @property + def source_pos_w(self) -> torch.Tensor: + """Position of source frame in world frame. Shape: (N, 3).""" + if self._source_pos_w is None: + return torch.zeros(self._num_instances, 3, device=self._device) + return self._source_pos_w + + @property + def source_quat_w(self) -> torch.Tensor: + """Orientation (w, x, y, z) of source frame in world frame. Shape: (N, 4).""" + if self._source_quat_w is None: + quat = torch.zeros(self._num_instances, 4, device=self._device) + quat[:, 0] = 1.0 + return quat + return self._source_quat_w + + @property + def source_pose_w(self) -> torch.Tensor: + """Pose of source frame in world frame. Shape: (N, 7).""" + return torch.cat([self.source_pos_w, self.source_quat_w], dim=-1) + + @property + def target_pos_w(self) -> torch.Tensor: + """Position of target frames in world frame. Shape: (N, M, 3).""" + if self._target_pos_w is None: + return torch.zeros(self._num_instances, self._num_target_frames, 3, device=self._device) + return self._target_pos_w + + @property + def target_quat_w(self) -> torch.Tensor: + """Orientation (w, x, y, z) of target frames in world frame. Shape: (N, M, 4).""" + if self._target_quat_w is None: + quat = torch.zeros(self._num_instances, self._num_target_frames, 4, device=self._device) + quat[..., 0] = 1.0 + return quat + return self._target_quat_w + + @property + def target_pose_w(self) -> torch.Tensor: + """Pose of target frames in world frame. Shape: (N, M, 7).""" + return torch.cat([self.target_pos_w, self.target_quat_w], dim=-1) + + @property + def target_pos_source(self) -> torch.Tensor: + """Position of target frames relative to source frame. Shape: (N, M, 3).""" + if self._target_pos_source is None: + return torch.zeros(self._num_instances, self._num_target_frames, 3, device=self._device) + return self._target_pos_source + + @property + def target_quat_source(self) -> torch.Tensor: + """Orientation (w, x, y, z) of target frames relative to source frame. Shape: (N, M, 4).""" + if self._target_quat_source is None: + quat = torch.zeros(self._num_instances, self._num_target_frames, 4, device=self._device) + quat[..., 0] = 1.0 + return quat + return self._target_quat_source + + @property + def target_pose_source(self) -> torch.Tensor: + """Pose of target frames relative to source frame. Shape: (N, M, 7).""" + return torch.cat([self.target_pos_source, self.target_quat_source], dim=-1) + + # -- Setters -- + + def set_source_pos_w(self, value: torch.Tensor) -> None: + """Set source position in world frame.""" + self._source_pos_w = value.to(self._device) + + def set_source_quat_w(self, value: torch.Tensor) -> None: + """Set source orientation in world frame.""" + self._source_quat_w = value.to(self._device) + + def set_target_pos_w(self, value: torch.Tensor) -> None: + """Set target positions in world frame.""" + self._target_pos_w = value.to(self._device) + + def set_target_quat_w(self, value: torch.Tensor) -> None: + """Set target orientations in world frame.""" + self._target_quat_w = value.to(self._device) + + def set_target_pos_source(self, value: torch.Tensor) -> None: + """Set target positions relative to source.""" + self._target_pos_source = value.to(self._device) + + def set_target_quat_source(self, value: torch.Tensor) -> None: + """Set target orientations relative to source.""" + self._target_quat_source = value.to(self._device) + + def set_mock_data( + self, + source_pos_w: torch.Tensor | None = None, + source_quat_w: torch.Tensor | None = None, + target_pos_w: torch.Tensor | None = None, + target_quat_w: torch.Tensor | None = None, + target_pos_source: torch.Tensor | None = None, + target_quat_source: torch.Tensor | None = None, + ) -> None: + """Bulk setter for mock data. + + Args: + source_pos_w: Source position in world frame. Shape: (N, 3). + source_quat_w: Source orientation in world frame. Shape: (N, 4). + target_pos_w: Target positions in world frame. Shape: (N, M, 3). + target_quat_w: Target orientations in world frame. Shape: (N, M, 4). + target_pos_source: Target positions relative to source. Shape: (N, M, 3). + target_quat_source: Target orientations relative to source. Shape: (N, M, 4). + """ + if source_pos_w is not None: + self.set_source_pos_w(source_pos_w) + if source_quat_w is not None: + self.set_source_quat_w(source_quat_w) + if target_pos_w is not None: + self.set_target_pos_w(target_pos_w) + if target_quat_w is not None: + self.set_target_quat_w(target_quat_w) + if target_pos_source is not None: + self.set_target_pos_source(target_pos_source) + if target_quat_source is not None: + self.set_target_quat_source(target_quat_source) + + +class MockFrameTransformer: + """Mock frame transformer sensor for testing without Isaac Sim. + + This class mimics the interface of BaseFrameTransformer for testing purposes. + It provides the same properties and methods but without simulation dependencies. + """ + + def __init__( + self, + num_instances: int, + num_target_frames: int, + target_frame_names: list[str] | None = None, + device: str = "cpu", + ): + """Initialize mock frame transformer sensor. + + Args: + num_instances: Number of environment instances. + num_target_frames: Number of target frames to track. + target_frame_names: Names of target frames. + device: Device for tensor allocation. + """ + self._num_instances = num_instances + self._num_target_frames = num_target_frames + self._target_frame_names = target_frame_names or [f"frame_{i}" for i in range(num_target_frames)] + self._device = device + self._data = MockFrameTransformerData( + num_instances, num_target_frames, self._target_frame_names, device + ) + + # -- Properties -- + + @property + def data(self) -> MockFrameTransformerData: + """Data container for the sensor.""" + return self._data + + @property + def num_instances(self) -> int: + """Number of sensor instances.""" + return self._num_instances + + @property + def num_bodies(self) -> int: + """Number of target bodies being tracked.""" + return self._num_target_frames + + @property + def body_names(self) -> list[str]: + """Names of target bodies being tracked.""" + return self._target_frame_names + + @property + def device(self) -> str: + """Device for tensor allocation.""" + return self._device + + # -- Methods -- + + def find_bodies( + self, name_keys: str | Sequence[str], preserve_order: bool = False + ) -> tuple[list[int], list[str]]: + """Find target frames by name regex patterns. + + Args: + name_keys: Regex pattern(s) to match frame names. + preserve_order: If True, preserve order of name_keys in output. + + Returns: + Tuple of (frame_indices, frame_names) matching the patterns. + """ + if isinstance(name_keys, str): + name_keys = [name_keys] + + matched_indices = [] + matched_names = [] + + if preserve_order: + for key in name_keys: + pattern = re.compile(key) + for i, name in enumerate(self._target_frame_names): + if pattern.fullmatch(name) and i not in matched_indices: + matched_indices.append(i) + matched_names.append(name) + else: + for i, name in enumerate(self._target_frame_names): + for key in name_keys: + pattern = re.compile(key) + if pattern.fullmatch(name): + matched_indices.append(i) + matched_names.append(name) + break + + return matched_indices, matched_names + + def reset(self, env_ids: Sequence[int] | None = None) -> None: + """Reset sensor state for specified environments. + + Args: + env_ids: Environment indices to reset. If None, resets all. + """ + # No-op for mock - data persists until explicitly changed + pass + + def update(self, dt: float, force_recompute: bool = False) -> None: + """Update sensor. + + Args: + dt: Time step since last update. + force_recompute: Force recomputation of buffers. + """ + # No-op for mock - data is set explicitly + pass diff --git a/source/isaaclab/isaaclab/test/mock_interfaces/sensors/mock_imu.py b/source/isaaclab/isaaclab/test/mock_interfaces/sensors/mock_imu.py new file mode 100644 index 00000000000..66e663a3c8d --- /dev/null +++ b/source/isaaclab/isaaclab/test/mock_interfaces/sensors/mock_imu.py @@ -0,0 +1,228 @@ +# Copyright (c) 2022-2025, The Isaac Lab Project Developers. +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Mock IMU sensor for testing without Isaac Sim.""" + +from __future__ import annotations + +import re +import torch +from typing import Sequence + + +class MockImuData: + """Mock data container for IMU sensor. + + This class mimics the interface of BaseImuData for testing purposes. + All tensor properties return zero tensors with correct shapes if not explicitly set. + """ + + def __init__(self, num_instances: int, device: str = "cpu"): + """Initialize mock IMU data. + + Args: + num_instances: Number of sensor instances. + device: Device for tensor allocation. + """ + self._num_instances = num_instances + self._device = device + + # Internal storage for mock data + self._pos_w: torch.Tensor | None = None + self._quat_w: torch.Tensor | None = None + self._projected_gravity_b: torch.Tensor | None = None + self._lin_vel_b: torch.Tensor | None = None + self._ang_vel_b: torch.Tensor | None = None + self._lin_acc_b: torch.Tensor | None = None + self._ang_acc_b: torch.Tensor | None = None + + # -- Properties -- + + @property + def pos_w(self) -> torch.Tensor: + """Position of sensor origin in world frame. Shape: (N, 3).""" + if self._pos_w is None: + return torch.zeros(self._num_instances, 3, device=self._device) + return self._pos_w + + @property + def quat_w(self) -> torch.Tensor: + """Orientation (w, x, y, z) in world frame. Shape: (N, 4).""" + if self._quat_w is None: + # Default to identity quaternion (w, x, y, z) = (1, 0, 0, 0) + quat = torch.zeros(self._num_instances, 4, device=self._device) + quat[:, 0] = 1.0 + return quat + return self._quat_w + + @property + def pose_w(self) -> torch.Tensor: + """Pose in world frame (pos + quat). Shape: (N, 7).""" + return torch.cat([self.pos_w, self.quat_w], dim=-1) + + @property + def projected_gravity_b(self) -> torch.Tensor: + """Gravity direction in IMU body frame. Shape: (N, 3).""" + if self._projected_gravity_b is None: + # Default gravity pointing down in body frame + gravity = torch.zeros(self._num_instances, 3, device=self._device) + gravity[:, 2] = -1.0 + return gravity + return self._projected_gravity_b + + @property + def lin_vel_b(self) -> torch.Tensor: + """Linear velocity in IMU body frame. Shape: (N, 3).""" + if self._lin_vel_b is None: + return torch.zeros(self._num_instances, 3, device=self._device) + return self._lin_vel_b + + @property + def ang_vel_b(self) -> torch.Tensor: + """Angular velocity in IMU body frame. Shape: (N, 3).""" + if self._ang_vel_b is None: + return torch.zeros(self._num_instances, 3, device=self._device) + return self._ang_vel_b + + @property + def lin_acc_b(self) -> torch.Tensor: + """Linear acceleration in IMU body frame. Shape: (N, 3).""" + if self._lin_acc_b is None: + return torch.zeros(self._num_instances, 3, device=self._device) + return self._lin_acc_b + + @property + def ang_acc_b(self) -> torch.Tensor: + """Angular acceleration in IMU body frame. Shape: (N, 3).""" + if self._ang_acc_b is None: + return torch.zeros(self._num_instances, 3, device=self._device) + return self._ang_acc_b + + # -- Setters -- + + def set_pos_w(self, value: torch.Tensor) -> None: + """Set position in world frame.""" + self._pos_w = value.to(self._device) + + def set_quat_w(self, value: torch.Tensor) -> None: + """Set orientation in world frame.""" + self._quat_w = value.to(self._device) + + def set_projected_gravity_b(self, value: torch.Tensor) -> None: + """Set projected gravity in body frame.""" + self._projected_gravity_b = value.to(self._device) + + def set_lin_vel_b(self, value: torch.Tensor) -> None: + """Set linear velocity in body frame.""" + self._lin_vel_b = value.to(self._device) + + def set_ang_vel_b(self, value: torch.Tensor) -> None: + """Set angular velocity in body frame.""" + self._ang_vel_b = value.to(self._device) + + def set_lin_acc_b(self, value: torch.Tensor) -> None: + """Set linear acceleration in body frame.""" + self._lin_acc_b = value.to(self._device) + + def set_ang_acc_b(self, value: torch.Tensor) -> None: + """Set angular acceleration in body frame.""" + self._ang_acc_b = value.to(self._device) + + def set_mock_data( + self, + pos_w: torch.Tensor | None = None, + quat_w: torch.Tensor | None = None, + projected_gravity_b: torch.Tensor | None = None, + lin_vel_b: torch.Tensor | None = None, + ang_vel_b: torch.Tensor | None = None, + lin_acc_b: torch.Tensor | None = None, + ang_acc_b: torch.Tensor | None = None, + ) -> None: + """Bulk setter for mock data. + + Args: + pos_w: Position in world frame. Shape: (N, 3). + quat_w: Orientation (w, x, y, z) in world frame. Shape: (N, 4). + projected_gravity_b: Gravity direction in body frame. Shape: (N, 3). + lin_vel_b: Linear velocity in body frame. Shape: (N, 3). + ang_vel_b: Angular velocity in body frame. Shape: (N, 3). + lin_acc_b: Linear acceleration in body frame. Shape: (N, 3). + ang_acc_b: Angular acceleration in body frame. Shape: (N, 3). + """ + if pos_w is not None: + self.set_pos_w(pos_w) + if quat_w is not None: + self.set_quat_w(quat_w) + if projected_gravity_b is not None: + self.set_projected_gravity_b(projected_gravity_b) + if lin_vel_b is not None: + self.set_lin_vel_b(lin_vel_b) + if ang_vel_b is not None: + self.set_ang_vel_b(ang_vel_b) + if lin_acc_b is not None: + self.set_lin_acc_b(lin_acc_b) + if ang_acc_b is not None: + self.set_ang_acc_b(ang_acc_b) + + +class MockImu: + """Mock IMU sensor for testing without Isaac Sim. + + This class mimics the interface of BaseImu for testing purposes. + It provides the same properties and methods but without simulation dependencies. + """ + + def __init__( + self, + num_instances: int, + device: str = "cpu", + ): + """Initialize mock IMU sensor. + + Args: + num_instances: Number of sensor instances. + device: Device for tensor allocation. + """ + self._num_instances = num_instances + self._device = device + self._data = MockImuData(num_instances, device) + + # -- Properties -- + + @property + def data(self) -> MockImuData: + """Data container for the sensor.""" + return self._data + + @property + def num_instances(self) -> int: + """Number of sensor instances.""" + return self._num_instances + + @property + def device(self) -> str: + """Device for tensor allocation.""" + return self._device + + # -- Methods -- + + def reset(self, env_ids: Sequence[int] | None = None) -> None: + """Reset sensor state for specified environments. + + Args: + env_ids: Environment indices to reset. If None, resets all. + """ + # No-op for mock - data persists until explicitly changed + pass + + def update(self, dt: float, force_recompute: bool = False) -> None: + """Update sensor. + + Args: + dt: Time step since last update. + force_recompute: Force recomputation of buffers. + """ + # No-op for mock - data is set explicitly + pass diff --git a/source/isaaclab/isaaclab/test/mock_interfaces/utils/__init__.py b/source/isaaclab/isaaclab/test/mock_interfaces/utils/__init__.py new file mode 100644 index 00000000000..a8cfc933e97 --- /dev/null +++ b/source/isaaclab/isaaclab/test/mock_interfaces/utils/__init__.py @@ -0,0 +1,9 @@ +# Copyright (c) 2022-2025, The Isaac Lab Project Developers. +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Utilities for creating and using mock interfaces.""" + +from .mock_generator import MockArticulationBuilder, MockSensorBuilder +from .patching import patch_articulation, patch_sensor diff --git a/source/isaaclab/isaaclab/test/mock_interfaces/utils/mock_generator.py b/source/isaaclab/isaaclab/test/mock_interfaces/utils/mock_generator.py new file mode 100644 index 00000000000..d18a6b6283e --- /dev/null +++ b/source/isaaclab/isaaclab/test/mock_interfaces/utils/mock_generator.py @@ -0,0 +1,351 @@ +# Copyright (c) 2022-2025, The Isaac Lab Project Developers. +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Utilities for generating custom mock objects.""" + +from __future__ import annotations + +import torch +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from ..assets import MockArticulation + from ..sensors import MockContactSensor, MockFrameTransformer, MockImu + + +class MockArticulationBuilder: + """Builder class for creating custom MockArticulation instances. + + Example: + >>> robot = (MockArticulationBuilder() + ... .with_joints(["hip", "knee", "ankle"], default_pos=[0.0, 0.5, -0.5]) + ... .with_bodies(["base", "thigh", "shin", "foot"]) + ... .with_fixed_base(True) + ... .with_num_instances(8) + ... .build()) + """ + + def __init__(self): + """Initialize the builder with default values.""" + self._num_instances = 1 + self._joint_names: list[str] = [] + self._body_names: list[str] = [] + self._is_fixed_base = False + self._device = "cpu" + self._default_joint_pos: list[float] | None = None + self._default_joint_vel: list[float] | None = None + self._joint_pos_limits: tuple[float, float] | None = None + self._num_fixed_tendons = 0 + self._num_spatial_tendons = 0 + self._fixed_tendon_names: list[str] | None = None + self._spatial_tendon_names: list[str] | None = None + + def with_num_instances(self, num_instances: int) -> "MockArticulationBuilder": + """Set the number of articulation instances. + + Args: + num_instances: Number of instances. + + Returns: + Self for method chaining. + """ + self._num_instances = num_instances + return self + + def with_joints( + self, + joint_names: list[str], + default_pos: list[float] | None = None, + default_vel: list[float] | None = None, + ) -> "MockArticulationBuilder": + """Set joint configuration. + + Args: + joint_names: Names of the joints. + default_pos: Default joint positions. + default_vel: Default joint velocities. + + Returns: + Self for method chaining. + """ + self._joint_names = joint_names + self._default_joint_pos = default_pos + self._default_joint_vel = default_vel + return self + + def with_bodies(self, body_names: list[str]) -> "MockArticulationBuilder": + """Set body configuration. + + Args: + body_names: Names of the bodies. + + Returns: + Self for method chaining. + """ + self._body_names = body_names + return self + + def with_fixed_base(self, is_fixed: bool) -> "MockArticulationBuilder": + """Set whether the articulation has a fixed base. + + Args: + is_fixed: True for fixed base, False for floating base. + + Returns: + Self for method chaining. + """ + self._is_fixed_base = is_fixed + return self + + def with_device(self, device: str) -> "MockArticulationBuilder": + """Set the device for tensor allocation. + + Args: + device: Device string (e.g., "cpu", "cuda:0"). + + Returns: + Self for method chaining. + """ + self._device = device + return self + + def with_joint_limits( + self, lower: float, upper: float + ) -> "MockArticulationBuilder": + """Set uniform joint position limits for all joints. + + Args: + lower: Lower joint limit. + upper: Upper joint limit. + + Returns: + Self for method chaining. + """ + self._joint_pos_limits = (lower, upper) + return self + + def with_fixed_tendons( + self, tendon_names: list[str] + ) -> "MockArticulationBuilder": + """Set fixed tendon configuration. + + Args: + tendon_names: Names of fixed tendons. + + Returns: + Self for method chaining. + """ + self._fixed_tendon_names = tendon_names + self._num_fixed_tendons = len(tendon_names) + return self + + def with_spatial_tendons( + self, tendon_names: list[str] + ) -> "MockArticulationBuilder": + """Set spatial tendon configuration. + + Args: + tendon_names: Names of spatial tendons. + + Returns: + Self for method chaining. + """ + self._spatial_tendon_names = tendon_names + self._num_spatial_tendons = len(tendon_names) + return self + + def build(self) -> "MockArticulation": + """Build the MockArticulation instance. + + Returns: + Configured MockArticulation instance. + """ + from ..assets import MockArticulation + + num_joints = len(self._joint_names) if self._joint_names else 1 + num_bodies = len(self._body_names) if self._body_names else num_joints + 1 + + robot = MockArticulation( + num_instances=self._num_instances, + num_joints=num_joints, + num_bodies=num_bodies, + joint_names=self._joint_names or None, + body_names=self._body_names or None, + is_fixed_base=self._is_fixed_base, + num_fixed_tendons=self._num_fixed_tendons, + num_spatial_tendons=self._num_spatial_tendons, + fixed_tendon_names=self._fixed_tendon_names, + spatial_tendon_names=self._spatial_tendon_names, + device=self._device, + ) + + # Set default joint positions + if self._default_joint_pos is not None: + default_pos = torch.tensor( + [self._default_joint_pos] * self._num_instances, + device=self._device, + ) + robot.data.set_default_joint_pos(default_pos) + robot.data.set_joint_pos(default_pos) + + # Set default joint velocities + if self._default_joint_vel is not None: + default_vel = torch.tensor( + [self._default_joint_vel] * self._num_instances, + device=self._device, + ) + robot.data.set_default_joint_vel(default_vel) + robot.data.set_joint_vel(default_vel) + + # Set joint limits + if self._joint_pos_limits is not None: + limits = torch.zeros(self._num_instances, num_joints, 2, device=self._device) + limits[..., 0] = self._joint_pos_limits[0] + limits[..., 1] = self._joint_pos_limits[1] + robot.data.set_joint_pos_limits(limits) + + return robot + + +class MockSensorBuilder: + """Builder class for creating custom mock sensor instances. + + Example: + >>> sensor = (MockSensorBuilder("contact") + ... .with_num_instances(4) + ... .with_bodies(["FL_foot", "FR_foot", "RL_foot", "RR_foot"]) + ... .with_device("cuda") + ... .build()) + """ + + def __init__(self, sensor_type: str): + """Initialize the builder. + + Args: + sensor_type: Type of sensor ("contact", "imu", or "frame_transformer"). + """ + if sensor_type not in ("contact", "imu", "frame_transformer"): + raise ValueError(f"Unknown sensor type: {sensor_type}") + self._sensor_type = sensor_type + self._num_instances = 1 + self._device = "cpu" + + # Contact sensor specific + self._body_names: list[str] = [] + self._history_length = 0 + self._num_filter_bodies = 0 + + # Frame transformer specific + self._target_frame_names: list[str] = [] + + def with_num_instances(self, num_instances: int) -> "MockSensorBuilder": + """Set the number of sensor instances. + + Args: + num_instances: Number of instances. + + Returns: + Self for method chaining. + """ + self._num_instances = num_instances + return self + + def with_device(self, device: str) -> "MockSensorBuilder": + """Set the device for tensor allocation. + + Args: + device: Device string. + + Returns: + Self for method chaining. + """ + self._device = device + return self + + def with_bodies(self, body_names: list[str]) -> "MockSensorBuilder": + """Set body names (for contact sensor). + + Args: + body_names: Names of bodies with contact sensors. + + Returns: + Self for method chaining. + """ + self._body_names = body_names + return self + + def with_history_length(self, length: int) -> "MockSensorBuilder": + """Set history buffer length (for contact sensor). + + Args: + length: Length of history buffer. + + Returns: + Self for method chaining. + """ + self._history_length = length + return self + + def with_filter_bodies(self, num_filter_bodies: int) -> "MockSensorBuilder": + """Set number of filter bodies (for contact sensor). + + Args: + num_filter_bodies: Number of filter bodies for force matrix. + + Returns: + Self for method chaining. + """ + self._num_filter_bodies = num_filter_bodies + return self + + def with_target_frames(self, frame_names: list[str]) -> "MockSensorBuilder": + """Set target frame names (for frame transformer). + + Args: + frame_names: Names of target frames. + + Returns: + Self for method chaining. + """ + self._target_frame_names = frame_names + return self + + def build(self) -> "MockContactSensor | MockImu | MockFrameTransformer": + """Build the mock sensor instance. + + Returns: + Configured mock sensor instance. + """ + if self._sensor_type == "contact": + from ..sensors import MockContactSensor + + num_bodies = len(self._body_names) if self._body_names else 1 + return MockContactSensor( + num_instances=self._num_instances, + num_bodies=num_bodies, + body_names=self._body_names or None, + device=self._device, + history_length=self._history_length, + num_filter_bodies=self._num_filter_bodies, + ) + elif self._sensor_type == "imu": + from ..sensors import MockImu + + return MockImu( + num_instances=self._num_instances, + device=self._device, + ) + elif self._sensor_type == "frame_transformer": + from ..sensors import MockFrameTransformer + + num_frames = len(self._target_frame_names) if self._target_frame_names else 1 + return MockFrameTransformer( + num_instances=self._num_instances, + num_target_frames=num_frames, + target_frame_names=self._target_frame_names or None, + device=self._device, + ) + else: + raise ValueError(f"Unknown sensor type: {self._sensor_type}") diff --git a/source/isaaclab/isaaclab/test/mock_interfaces/utils/patching.py b/source/isaaclab/isaaclab/test/mock_interfaces/utils/patching.py new file mode 100644 index 00000000000..cc44c316530 --- /dev/null +++ b/source/isaaclab/isaaclab/test/mock_interfaces/utils/patching.py @@ -0,0 +1,263 @@ +# Copyright (c) 2022-2025, The Isaac Lab Project Developers. +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Utilities for patching real classes with mock implementations in tests.""" + +from __future__ import annotations + +import functools +from contextlib import contextmanager +from typing import Any, Callable, Generator, TypeVar +from unittest.mock import patch + +F = TypeVar("F", bound=Callable[..., Any]) + + +@contextmanager +def patch_articulation( + target: str, + num_instances: int = 1, + num_joints: int = 12, + num_bodies: int = 13, + joint_names: list[str] | None = None, + body_names: list[str] | None = None, + is_fixed_base: bool = False, + device: str = "cpu", + **kwargs: Any, +) -> Generator[Any, None, None]: + """Context manager for patching an articulation class with MockArticulation. + + Args: + target: The target to patch (e.g., "my_module.Articulation"). + num_instances: Number of articulation instances. + num_joints: Number of joints. + num_bodies: Number of bodies. + joint_names: Names of joints. + body_names: Names of bodies. + is_fixed_base: Whether the articulation has a fixed base. + device: Device for tensor allocation. + **kwargs: Additional keyword arguments for MockArticulation. + + Yields: + The mock articulation class. + + Example: + >>> with patch_articulation("my_module.robot", num_joints=12) as MockRobot: + ... robot = MockRobot() + ... robot.data.set_joint_pos(torch.zeros(1, 12)) + ... result = my_function_using_robot(robot) + """ + from ..assets import MockArticulation + + def create_mock(*args: Any, **create_kwargs: Any) -> MockArticulation: + # Merge configuration with any runtime kwargs + return MockArticulation( + num_instances=create_kwargs.get("num_instances", num_instances), + num_joints=create_kwargs.get("num_joints", num_joints), + num_bodies=create_kwargs.get("num_bodies", num_bodies), + joint_names=create_kwargs.get("joint_names", joint_names), + body_names=create_kwargs.get("body_names", body_names), + is_fixed_base=create_kwargs.get("is_fixed_base", is_fixed_base), + device=create_kwargs.get("device", device), + **{k: v for k, v in kwargs.items() if k not in create_kwargs}, + ) + + with patch(target, side_effect=create_mock) as mock_class: + yield mock_class + + +@contextmanager +def patch_sensor( + target: str, + sensor_type: str, + num_instances: int = 1, + device: str = "cpu", + **kwargs: Any, +) -> Generator[Any, None, None]: + """Context manager for patching a sensor class with a mock sensor. + + Args: + target: The target to patch (e.g., "my_module.ContactSensor"). + sensor_type: Type of sensor ("contact", "imu", or "frame_transformer"). + num_instances: Number of sensor instances. + device: Device for tensor allocation. + **kwargs: Additional keyword arguments for the mock sensor. + + Yields: + The mock sensor class. + + Example: + >>> with patch_sensor("my_env.ContactSensor", "contact", num_bodies=4) as MockSensor: + ... sensor = MockSensor() + ... sensor.data.set_net_forces_w(torch.randn(1, 4, 3)) + ... result = my_function(sensor) + """ + if sensor_type == "contact": + from ..sensors import MockContactSensor + + def create_mock(*args: Any, **create_kwargs: Any) -> MockContactSensor: + return MockContactSensor( + num_instances=create_kwargs.get("num_instances", num_instances), + num_bodies=create_kwargs.get("num_bodies", kwargs.get("num_bodies", 1)), + body_names=create_kwargs.get("body_names", kwargs.get("body_names")), + device=create_kwargs.get("device", device), + history_length=create_kwargs.get("history_length", kwargs.get("history_length", 0)), + num_filter_bodies=create_kwargs.get( + "num_filter_bodies", kwargs.get("num_filter_bodies", 0) + ), + ) + + elif sensor_type == "imu": + from ..sensors import MockImu + + def create_mock(*args: Any, **create_kwargs: Any) -> MockImu: + return MockImu( + num_instances=create_kwargs.get("num_instances", num_instances), + device=create_kwargs.get("device", device), + ) + + elif sensor_type == "frame_transformer": + from ..sensors import MockFrameTransformer + + def create_mock(*args: Any, **create_kwargs: Any) -> MockFrameTransformer: + return MockFrameTransformer( + num_instances=create_kwargs.get("num_instances", num_instances), + num_target_frames=create_kwargs.get( + "num_target_frames", kwargs.get("num_target_frames", 1) + ), + target_frame_names=create_kwargs.get( + "target_frame_names", kwargs.get("target_frame_names") + ), + device=create_kwargs.get("device", device), + ) + + else: + raise ValueError(f"Unknown sensor type: {sensor_type}") + + with patch(target, side_effect=create_mock) as mock_class: + yield mock_class + + +def mock_articulation( + num_instances: int = 1, + num_joints: int = 12, + num_bodies: int = 13, + joint_names: list[str] | None = None, + body_names: list[str] | None = None, + is_fixed_base: bool = False, + device: str = "cpu", + **kwargs: Any, +) -> Callable[[F], F]: + """Decorator for injecting a MockArticulation into a test function. + + The mock articulation is passed as the first argument to the decorated function. + + Args: + num_instances: Number of articulation instances. + num_joints: Number of joints. + num_bodies: Number of bodies. + joint_names: Names of joints. + body_names: Names of bodies. + is_fixed_base: Whether the articulation has a fixed base. + device: Device for tensor allocation. + **kwargs: Additional keyword arguments for MockArticulation. + + Returns: + Decorator function. + + Example: + >>> @mock_articulation(num_joints=12, num_bodies=13) + ... def test_my_function(mock_robot): + ... mock_robot.data.set_joint_pos(torch.zeros(1, 12)) + ... result = compute_something(mock_robot) + ... assert result.shape == (1, 12) + """ + from ..assets import MockArticulation + + def decorator(func: F) -> F: + @functools.wraps(func) + def wrapper(*args: Any, **wrap_kwargs: Any) -> Any: + mock = MockArticulation( + num_instances=num_instances, + num_joints=num_joints, + num_bodies=num_bodies, + joint_names=joint_names, + body_names=body_names, + is_fixed_base=is_fixed_base, + device=device, + **kwargs, + ) + return func(mock, *args, **wrap_kwargs) + + return wrapper # type: ignore + + return decorator + + +def mock_sensor( + sensor_type: str, + num_instances: int = 1, + device: str = "cpu", + **kwargs: Any, +) -> Callable[[F], F]: + """Decorator for injecting a mock sensor into a test function. + + The mock sensor is passed as the first argument to the decorated function. + + Args: + sensor_type: Type of sensor ("contact", "imu", or "frame_transformer"). + num_instances: Number of sensor instances. + device: Device for tensor allocation. + **kwargs: Additional keyword arguments for the mock sensor. + + Returns: + Decorator function. + + Example: + >>> @mock_sensor("contact", num_instances=4, num_bodies=4) + ... def test_contact_reward(mock_contact): + ... mock_contact.data.set_net_forces_w(torch.randn(4, 4, 3)) + ... reward = compute_contact_reward(mock_contact) + ... assert reward.shape == (4,) + """ + + def decorator(func: F) -> F: + @functools.wraps(func) + def wrapper(*args: Any, **wrap_kwargs: Any) -> Any: + if sensor_type == "contact": + from ..sensors import MockContactSensor + + mock = MockContactSensor( + num_instances=num_instances, + num_bodies=kwargs.get("num_bodies", 1), + body_names=kwargs.get("body_names"), + device=device, + history_length=kwargs.get("history_length", 0), + num_filter_bodies=kwargs.get("num_filter_bodies", 0), + ) + elif sensor_type == "imu": + from ..sensors import MockImu + + mock = MockImu( + num_instances=num_instances, + device=device, + ) + elif sensor_type == "frame_transformer": + from ..sensors import MockFrameTransformer + + mock = MockFrameTransformer( + num_instances=num_instances, + num_target_frames=kwargs.get("num_target_frames", 1), + target_frame_names=kwargs.get("target_frame_names"), + device=device, + ) + else: + raise ValueError(f"Unknown sensor type: {sensor_type}") + + return func(mock, *args, **wrap_kwargs) + + return wrapper # type: ignore + + return decorator diff --git a/source/isaaclab/test/test_mock_interfaces/test_mock_assets.py b/source/isaaclab/test/test_mock_interfaces/test_mock_assets.py new file mode 100644 index 00000000000..9718a15aaea --- /dev/null +++ b/source/isaaclab/test/test_mock_interfaces/test_mock_assets.py @@ -0,0 +1,336 @@ +# Copyright (c) 2022-2025, The Isaac Lab Project Developers. +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Unit tests for mock asset interfaces.""" + +import pytest +import torch + +from isaaclab.test.mock_interfaces.assets import ( + MockArticulation, + MockRigidObject, + MockRigidObjectCollection, + create_mock_articulation, + create_mock_quadruped, + create_mock_humanoid, + create_mock_rigid_object, + create_mock_rigid_object_collection, +) +from isaaclab.test.mock_interfaces.utils import MockArticulationBuilder, MockSensorBuilder + + +# ============================================================================== +# MockArticulation Tests +# ============================================================================== + + +class TestMockArticulation: + """Tests for MockArticulation and MockArticulationData.""" + + @pytest.fixture + def robot(self): + """Create a mock articulation fixture.""" + return MockArticulation( + num_instances=4, + num_joints=12, + num_bodies=13, + device="cpu", + ) + + def test_initialization(self, robot): + """Test that MockArticulation initializes correctly.""" + assert robot.num_instances == 4 + assert robot.num_joints == 12 + assert robot.num_bodies == 13 + assert robot.device == "cpu" + assert robot.root_view is None + assert robot.data is not None + + def test_joint_state_shapes(self, robot): + """Test joint state tensor shapes.""" + assert robot.data.joint_pos.shape == (4, 12) + assert robot.data.joint_vel.shape == (4, 12) + assert robot.data.joint_acc.shape == (4, 12) + + def test_root_state_shapes(self, robot): + """Test root state tensor shapes.""" + # Link frame + assert robot.data.root_link_pose_w.shape == (4, 7) + assert robot.data.root_link_vel_w.shape == (4, 6) + assert robot.data.root_link_state_w.shape == (4, 13) + + # Sliced properties + assert robot.data.root_link_pos_w.shape == (4, 3) + assert robot.data.root_link_quat_w.shape == (4, 4) + assert robot.data.root_link_lin_vel_w.shape == (4, 3) + assert robot.data.root_link_ang_vel_w.shape == (4, 3) + + def test_body_state_shapes(self, robot): + """Test body state tensor shapes.""" + assert robot.data.body_link_pose_w.shape == (4, 13, 7) + assert robot.data.body_link_vel_w.shape == (4, 13, 6) + assert robot.data.body_link_state_w.shape == (4, 13, 13) + + def test_default_state_shapes(self, robot): + """Test default state tensor shapes.""" + assert robot.data.default_root_pose.shape == (4, 7) + assert robot.data.default_root_vel.shape == (4, 6) + assert robot.data.default_root_state.shape == (4, 13) + assert robot.data.default_joint_pos.shape == (4, 12) + assert robot.data.default_joint_vel.shape == (4, 12) + + def test_identity_quaternion_default(self, robot): + """Test that default quaternions are identity quaternions.""" + quat = robot.data.root_link_quat_w + # w=1, x=y=z=0 + assert torch.all(quat[:, 0] == 1) + assert torch.all(quat[:, 1:] == 0) + + def test_set_joint_pos(self, robot): + """Test setting joint positions.""" + joint_pos = torch.randn(4, 12) + robot.data.set_joint_pos(joint_pos) + assert torch.allclose(robot.data.joint_pos, joint_pos) + + def test_set_mock_data_bulk(self, robot): + """Test bulk data setter.""" + joint_pos = torch.randn(4, 12) + joint_vel = torch.randn(4, 12) + + robot.data.set_mock_data(joint_pos=joint_pos, joint_vel=joint_vel) + + assert torch.allclose(robot.data.joint_pos, joint_pos) + assert torch.allclose(robot.data.joint_vel, joint_vel) + + def test_find_joints(self): + """Test joint finding by regex.""" + joint_names = ["FL_hip", "FL_thigh", "FL_calf", "FR_hip", "FR_thigh", "FR_calf"] + robot = MockArticulation( + num_instances=1, + num_joints=6, + num_bodies=7, + joint_names=joint_names, + device="cpu", + ) + + # Find all hip joints + indices, names = robot.find_joints(".*_hip") + assert len(indices) == 2 + assert "FL_hip" in names + assert "FR_hip" in names + + # Find FL leg joints + indices, names = robot.find_joints("FL_.*") + assert len(indices) == 3 + + def test_find_bodies(self): + """Test body finding by regex.""" + body_names = ["base", "FL_hip", "FL_thigh", "FL_calf", "FR_hip", "FR_thigh", "FR_calf"] + robot = MockArticulation( + num_instances=1, + num_joints=6, + num_bodies=7, + body_names=body_names, + device="cpu", + ) + + # Find base + indices, names = robot.find_bodies("base") + assert indices == [0] + + # Find all thigh bodies + indices, names = robot.find_bodies(".*_thigh") + assert len(indices) == 2 + + def test_set_joint_position_target(self, robot): + """Test setting joint position targets.""" + target = torch.randn(4, 12) + robot.set_joint_position_target(target) + assert torch.allclose(robot.data.joint_pos_target, target) + + def test_joint_limits(self, robot): + """Test joint limits.""" + limits = robot.data.joint_pos_limits + assert limits.shape == (4, 12, 2) + # Default limits should be -inf to inf + assert torch.all(limits[..., 0] == float("-inf")) + assert torch.all(limits[..., 1] == float("inf")) + + +# ============================================================================== +# MockRigidObject Tests +# ============================================================================== + + +class TestMockRigidObject: + """Tests for MockRigidObject and MockRigidObjectData.""" + + @pytest.fixture + def obj(self): + """Create a mock rigid object fixture.""" + return MockRigidObject(num_instances=4, device="cpu") + + def test_initialization(self, obj): + """Test that MockRigidObject initializes correctly.""" + assert obj.num_instances == 4 + assert obj.num_bodies == 1 # Always 1 for rigid object + assert obj.root_view is None + + def test_root_state_shapes(self, obj): + """Test root state tensor shapes.""" + assert obj.data.root_link_pose_w.shape == (4, 7) + assert obj.data.root_link_vel_w.shape == (4, 6) + assert obj.data.root_link_state_w.shape == (4, 13) + + def test_body_state_shapes(self, obj): + """Test body state tensor shapes (single body).""" + assert obj.data.body_link_pose_w.shape == (4, 1, 7) + assert obj.data.body_link_vel_w.shape == (4, 1, 6) + + def test_body_properties(self, obj): + """Test body property shapes.""" + assert obj.data.body_mass.shape == (4, 1, 1) + assert obj.data.body_inertia.shape == (4, 1, 9) + + +# ============================================================================== +# MockRigidObjectCollection Tests +# ============================================================================== + + +class TestMockRigidObjectCollection: + """Tests for MockRigidObjectCollection and MockRigidObjectCollectionData.""" + + @pytest.fixture + def collection(self): + """Create a mock rigid object collection fixture.""" + return MockRigidObjectCollection( + num_instances=4, + num_bodies=5, + device="cpu", + ) + + def test_initialization(self, collection): + """Test that MockRigidObjectCollection initializes correctly.""" + assert collection.num_instances == 4 + assert collection.num_bodies == 5 + + def test_body_state_shapes(self, collection): + """Test body state tensor shapes.""" + assert collection.data.body_link_pose_w.shape == (4, 5, 7) + assert collection.data.body_link_vel_w.shape == (4, 5, 6) + assert collection.data.body_link_state_w.shape == (4, 5, 13) + + def test_find_bodies_returns_mask(self, collection): + """Test that find_bodies returns a mask tensor.""" + body_mask, names, indices = collection.find_bodies("body_0") + assert isinstance(body_mask, torch.Tensor) + assert body_mask.shape == (5,) + assert body_mask[0] + + +# ============================================================================== +# Factory Function Tests +# ============================================================================== + + +class TestAssetFactories: + """Tests for asset factory functions.""" + + def test_create_mock_quadruped(self): + """Test quadruped factory function.""" + robot = create_mock_quadruped(num_instances=4) + assert robot.num_instances == 4 + assert robot.num_joints == 12 + assert robot.num_bodies == 13 + assert "FL_hip" in robot.joint_names + assert "base" in robot.body_names + + def test_create_mock_humanoid(self): + """Test humanoid factory function.""" + robot = create_mock_humanoid(num_instances=2) + assert robot.num_instances == 2 + assert robot.num_joints == 21 + + def test_create_mock_articulation(self): + """Test generic articulation factory function.""" + robot = create_mock_articulation( + num_instances=2, + num_joints=6, + num_bodies=7, + is_fixed_base=True, + ) + assert robot.num_instances == 2 + assert robot.num_joints == 6 + assert robot.is_fixed_base + + def test_create_mock_rigid_object(self): + """Test rigid object factory function.""" + obj = create_mock_rigid_object(num_instances=3) + assert obj.num_instances == 3 + assert obj.num_bodies == 1 + assert obj.data.root_link_pose_w.shape == (3, 7) + + def test_create_mock_rigid_object_collection(self): + """Test rigid object collection factory function.""" + collection = create_mock_rigid_object_collection( + num_instances=4, + num_bodies=6, + body_names=["obj_0", "obj_1", "obj_2", "obj_3", "obj_4", "obj_5"], + ) + assert collection.num_instances == 4 + assert collection.num_bodies == 6 + assert collection.body_names == ["obj_0", "obj_1", "obj_2", "obj_3", "obj_4", "obj_5"] + assert collection.data.body_link_pose_w.shape == (4, 6, 7) + + +# ============================================================================== +# MockArticulationBuilder Tests +# ============================================================================== + + +class TestMockArticulationBuilder: + """Tests for MockArticulationBuilder.""" + + def test_basic_build(self): + """Test building a basic articulation.""" + robot = ( + MockArticulationBuilder() + .with_num_instances(4) + .with_joints(["joint_0", "joint_1", "joint_2"]) + .with_bodies(["base", "link_1", "link_2", "link_3"]) + .build() + ) + + assert robot.num_instances == 4 + assert robot.num_joints == 3 + assert robot.num_bodies == 4 + + def test_with_default_positions(self): + """Test setting default joint positions.""" + default_pos = [0.0, 0.5, -0.5] + robot = ( + MockArticulationBuilder() + .with_num_instances(2) + .with_joints(["j0", "j1", "j2"], default_pos=default_pos) + .build() + ) + + expected = torch.tensor([default_pos, default_pos]) + assert torch.allclose(robot.data.joint_pos, expected) + + def test_with_joint_limits(self): + """Test setting joint limits.""" + robot = ( + MockArticulationBuilder() + .with_num_instances(1) + .with_joints(["j0", "j1"]) + .with_joint_limits(-1.0, 1.0) + .build() + ) + + limits = robot.data.joint_pos_limits + assert torch.all(limits[..., 0] == -1.0) + assert torch.all(limits[..., 1] == 1.0) diff --git a/source/isaaclab/test/test_mock_interfaces/test_mock_data_properties.py b/source/isaaclab/test/test_mock_interfaces/test_mock_data_properties.py new file mode 100644 index 00000000000..c76dadbbbb2 --- /dev/null +++ b/source/isaaclab/test/test_mock_interfaces/test_mock_data_properties.py @@ -0,0 +1,842 @@ +# Copyright (c) 2022-2025, The Isaac Lab Project Developers. +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Comprehensive tests for mock data properties - shapes, setters, and device handling.""" + +import pytest +import torch + +from isaaclab.test.mock_interfaces.assets import ( + MockArticulation, + MockArticulationData, + MockRigidObject, + MockRigidObjectData, + MockRigidObjectCollection, + MockRigidObjectCollectionData, +) +from isaaclab.test.mock_interfaces.sensors import ( + MockImu, + MockImuData, + MockContactSensor, + MockContactSensorData, + MockFrameTransformer, + MockFrameTransformerData, +) + + +# ============================================================================== +# IMU Data Property Tests +# ============================================================================== + + +class TestMockImuDataProperties: + """Comprehensive tests for all MockImuData properties.""" + + @pytest.fixture + def data(self): + """Create MockImuData fixture.""" + return MockImuData(num_instances=4, device="cpu") + + @pytest.mark.parametrize( + "property_name,expected_shape", + [ + ("pos_w", (4, 3)), + ("quat_w", (4, 4)), + ("pose_w", (4, 7)), + ("projected_gravity_b", (4, 3)), + ("lin_vel_b", (4, 3)), + ("ang_vel_b", (4, 3)), + ("lin_acc_b", (4, 3)), + ("ang_acc_b", (4, 3)), + ], + ) + def test_property_shapes(self, data, property_name, expected_shape): + """Test that all properties return tensors with correct shapes.""" + prop = getattr(data, property_name) + assert prop.shape == expected_shape, f"{property_name} has wrong shape" + + @pytest.mark.parametrize( + "setter_name,property_name,shape", + [ + ("set_pos_w", "pos_w", (4, 3)), + ("set_quat_w", "quat_w", (4, 4)), + ("set_projected_gravity_b", "projected_gravity_b", (4, 3)), + ("set_lin_vel_b", "lin_vel_b", (4, 3)), + ("set_ang_vel_b", "ang_vel_b", (4, 3)), + ("set_lin_acc_b", "lin_acc_b", (4, 3)), + ("set_ang_acc_b", "ang_acc_b", (4, 3)), + ], + ) + def test_setters_update_properties(self, data, setter_name, property_name, shape): + """Test that setters properly update the corresponding properties.""" + test_value = torch.randn(shape) + setter = getattr(data, setter_name) + setter(test_value) + result = getattr(data, property_name) + assert torch.allclose(result, test_value), f"{setter_name} did not update {property_name}" + + def test_bulk_setter(self, data): + """Test that set_mock_data updates multiple properties at once.""" + lin_vel = torch.randn(4, 3) + ang_vel = torch.randn(4, 3) + lin_acc = torch.randn(4, 3) + + data.set_mock_data(lin_vel_b=lin_vel, ang_vel_b=ang_vel, lin_acc_b=lin_acc) + + assert torch.allclose(data.lin_vel_b, lin_vel) + assert torch.allclose(data.ang_vel_b, ang_vel) + assert torch.allclose(data.lin_acc_b, lin_acc) + + def test_default_quaternion_is_identity(self, data): + """Test that default quaternion is identity (w=1, x=y=z=0).""" + quat = data.quat_w + assert torch.allclose(quat[:, 0], torch.ones(4)) # w=1 + assert torch.allclose(quat[:, 1:], torch.zeros(4, 3)) # xyz=0 + + def test_default_gravity_points_down(self, data): + """Test that default gravity points in -z direction.""" + gravity = data.projected_gravity_b + expected = torch.tensor([[0, 0, -1]] * 4, dtype=torch.float32) + assert torch.allclose(gravity, expected) + + +# ============================================================================== +# Contact Sensor Data Property Tests +# ============================================================================== + + +class TestMockContactSensorDataProperties: + """Comprehensive tests for all MockContactSensorData properties.""" + + @pytest.fixture + def data(self): + """Create MockContactSensorData fixture.""" + return MockContactSensorData( + num_instances=4, + num_bodies=3, + device="cpu", + history_length=2, + num_filter_bodies=5, + ) + + @pytest.fixture + def data_no_history(self): + """Create MockContactSensorData without history.""" + return MockContactSensorData( + num_instances=4, + num_bodies=3, + device="cpu", + history_length=0, + num_filter_bodies=0, + ) + + @pytest.mark.parametrize( + "property_name,expected_shape", + [ + ("pos_w", (4, 3, 3)), + ("quat_w", (4, 3, 4)), + ("pose_w", (4, 3, 7)), + ("net_forces_w", (4, 3, 3)), + ("last_air_time", (4, 3)), + ("current_air_time", (4, 3)), + ("last_contact_time", (4, 3)), + ("current_contact_time", (4, 3)), + ], + ) + def test_basic_property_shapes(self, data, property_name, expected_shape): + """Test basic properties return tensors with correct shapes.""" + prop = getattr(data, property_name) + assert prop.shape == expected_shape, f"{property_name} has wrong shape" + + @pytest.mark.parametrize( + "property_name,expected_shape", + [ + ("net_forces_w_history", (4, 2, 3, 3)), # (N, T, B, 3) + ("force_matrix_w", (4, 3, 5, 3)), # (N, B, M, 3) + ("force_matrix_w_history", (4, 2, 3, 5, 3)), # (N, T, B, M, 3) + ("contact_pos_w", (4, 3, 5, 3)), # (N, B, M, 3) + ("friction_forces_w", (4, 3, 5, 3)), # (N, B, M, 3) + ], + ) + def test_optional_property_shapes_with_history(self, data, property_name, expected_shape): + """Test optional properties with history/filter enabled.""" + prop = getattr(data, property_name) + assert prop is not None + assert prop.shape == expected_shape, f"{property_name} has wrong shape" + + def test_optional_properties_none_without_config(self, data_no_history): + """Test optional properties are None when not configured.""" + assert data_no_history.net_forces_w_history is None + assert data_no_history.force_matrix_w is None + assert data_no_history.force_matrix_w_history is None + assert data_no_history.contact_pos_w is None + assert data_no_history.friction_forces_w is None + + @pytest.mark.parametrize( + "setter_name,property_name,shape", + [ + ("set_pos_w", "pos_w", (4, 3, 3)), + ("set_quat_w", "quat_w", (4, 3, 4)), + ("set_net_forces_w", "net_forces_w", (4, 3, 3)), + ("set_last_air_time", "last_air_time", (4, 3)), + ("set_current_air_time", "current_air_time", (4, 3)), + ("set_last_contact_time", "last_contact_time", (4, 3)), + ("set_current_contact_time", "current_contact_time", (4, 3)), + ], + ) + def test_setters_update_properties(self, data, setter_name, property_name, shape): + """Test that setters properly update the corresponding properties.""" + test_value = torch.randn(shape) + setter = getattr(data, setter_name) + setter(test_value) + result = getattr(data, property_name) + assert torch.allclose(result, test_value), f"{setter_name} did not update {property_name}" + + +# ============================================================================== +# Frame Transformer Data Property Tests +# ============================================================================== + + +class TestMockFrameTransformerDataProperties: + """Comprehensive tests for all MockFrameTransformerData properties.""" + + @pytest.fixture + def data(self): + """Create MockFrameTransformerData fixture.""" + return MockFrameTransformerData( + num_instances=4, + num_target_frames=3, + target_frame_names=["frame_0", "frame_1", "frame_2"], + device="cpu", + ) + + @pytest.mark.parametrize( + "property_name,expected_shape", + [ + ("source_pos_w", (4, 3)), + ("source_quat_w", (4, 4)), + ("source_pose_w", (4, 7)), + ("target_pos_w", (4, 3, 3)), + ("target_quat_w", (4, 3, 4)), + ("target_pose_w", (4, 3, 7)), + ("target_pos_source", (4, 3, 3)), + ("target_quat_source", (4, 3, 4)), + ("target_pose_source", (4, 3, 7)), + ], + ) + def test_property_shapes(self, data, property_name, expected_shape): + """Test that all properties return tensors with correct shapes.""" + prop = getattr(data, property_name) + assert prop.shape == expected_shape, f"{property_name} has wrong shape" + + @pytest.mark.parametrize( + "setter_name,property_name,shape", + [ + ("set_source_pos_w", "source_pos_w", (4, 3)), + ("set_source_quat_w", "source_quat_w", (4, 4)), + ("set_target_pos_w", "target_pos_w", (4, 3, 3)), + ("set_target_quat_w", "target_quat_w", (4, 3, 4)), + ("set_target_pos_source", "target_pos_source", (4, 3, 3)), + ("set_target_quat_source", "target_quat_source", (4, 3, 4)), + ], + ) + def test_setters_update_properties(self, data, setter_name, property_name, shape): + """Test that setters properly update the corresponding properties.""" + test_value = torch.randn(shape) + setter = getattr(data, setter_name) + setter(test_value) + result = getattr(data, property_name) + assert torch.allclose(result, test_value), f"{setter_name} did not update {property_name}" + + def test_target_frame_names(self, data): + """Test that target frame names are correctly stored.""" + assert data.target_frame_names == ["frame_0", "frame_1", "frame_2"] + + +# ============================================================================== +# Articulation Data Property Tests +# ============================================================================== + + +class TestMockArticulationDataProperties: + """Comprehensive tests for all MockArticulationData properties.""" + + @pytest.fixture + def data(self): + """Create MockArticulationData fixture.""" + return MockArticulationData( + num_instances=4, + num_joints=12, + num_bodies=13, + num_fixed_tendons=2, + num_spatial_tendons=1, + device="cpu", + ) + + # -- Joint State Properties -- + @pytest.mark.parametrize( + "property_name,expected_shape", + [ + ("joint_pos", (4, 12)), + ("joint_vel", (4, 12)), + ("joint_acc", (4, 12)), + ("joint_pos_target", (4, 12)), + ("joint_vel_target", (4, 12)), + ("joint_effort_target", (4, 12)), + ("computed_torque", (4, 12)), + ("applied_torque", (4, 12)), + ], + ) + def test_joint_state_shapes(self, data, property_name, expected_shape): + """Test joint state properties have correct shapes.""" + prop = getattr(data, property_name) + assert prop.shape == expected_shape, f"{property_name} has wrong shape" + + # -- Joint Property Shapes -- + @pytest.mark.parametrize( + "property_name,expected_shape", + [ + ("joint_stiffness", (4, 12)), + ("joint_damping", (4, 12)), + ("joint_armature", (4, 12)), + ("joint_friction_coeff", (4, 12)), + ("joint_dynamic_friction_coeff", (4, 12)), + ("joint_viscous_friction_coeff", (4, 12)), + ("joint_pos_limits", (4, 12, 2)), + ("joint_vel_limits", (4, 12)), + ("joint_effort_limits", (4, 12)), + ("soft_joint_pos_limits", (4, 12, 2)), + ("soft_joint_vel_limits", (4, 12)), + ("gear_ratio", (4, 12)), + ], + ) + def test_joint_property_shapes(self, data, property_name, expected_shape): + """Test joint property shapes.""" + prop = getattr(data, property_name) + assert prop.shape == expected_shape, f"{property_name} has wrong shape" + + # -- Root State Properties -- + @pytest.mark.parametrize( + "property_name,expected_shape", + [ + ("root_link_pose_w", (4, 7)), + ("root_link_vel_w", (4, 6)), + ("root_link_state_w", (4, 13)), + ("root_link_pos_w", (4, 3)), + ("root_link_quat_w", (4, 4)), + ("root_link_lin_vel_w", (4, 3)), + ("root_link_ang_vel_w", (4, 3)), + ("root_com_pose_w", (4, 7)), + ("root_com_vel_w", (4, 6)), + ("root_com_state_w", (4, 13)), + ("root_com_pos_w", (4, 3)), + ("root_com_quat_w", (4, 4)), + ("root_com_lin_vel_w", (4, 3)), + ("root_com_ang_vel_w", (4, 3)), + ("root_state_w", (4, 13)), + ], + ) + def test_root_state_shapes(self, data, property_name, expected_shape): + """Test root state properties have correct shapes.""" + prop = getattr(data, property_name) + assert prop.shape == expected_shape, f"{property_name} has wrong shape" + + # -- Body State Properties -- + @pytest.mark.parametrize( + "property_name,expected_shape", + [ + ("body_link_pose_w", (4, 13, 7)), + ("body_link_vel_w", (4, 13, 6)), + ("body_link_state_w", (4, 13, 13)), + ("body_link_pos_w", (4, 13, 3)), + ("body_link_quat_w", (4, 13, 4)), + ("body_link_lin_vel_w", (4, 13, 3)), + ("body_link_ang_vel_w", (4, 13, 3)), + ("body_com_pose_w", (4, 13, 7)), + ("body_com_vel_w", (4, 13, 6)), + ("body_com_state_w", (4, 13, 13)), + ("body_com_acc_w", (4, 13, 6)), + ("body_com_pos_w", (4, 13, 3)), + ("body_com_quat_w", (4, 13, 4)), + ("body_com_lin_vel_w", (4, 13, 3)), + ("body_com_ang_vel_w", (4, 13, 3)), + ("body_com_lin_acc_w", (4, 13, 3)), + ("body_com_ang_acc_w", (4, 13, 3)), + ], + ) + def test_body_state_shapes(self, data, property_name, expected_shape): + """Test body state properties have correct shapes.""" + prop = getattr(data, property_name) + assert prop.shape == expected_shape, f"{property_name} has wrong shape" + + # -- Body Properties -- + @pytest.mark.parametrize( + "property_name,expected_shape", + [ + ("body_mass", (4, 13)), + ("body_inertia", (4, 13, 3, 3)), + ("body_incoming_joint_wrench_b", (4, 13, 6)), + ], + ) + def test_body_property_shapes(self, data, property_name, expected_shape): + """Test body property shapes.""" + prop = getattr(data, property_name) + assert prop.shape == expected_shape, f"{property_name} has wrong shape" + + # -- Default State Properties -- + @pytest.mark.parametrize( + "property_name,expected_shape", + [ + ("default_root_pose", (4, 7)), + ("default_root_vel", (4, 6)), + ("default_root_state", (4, 13)), + ("default_joint_pos", (4, 12)), + ("default_joint_vel", (4, 12)), + ], + ) + def test_default_state_shapes(self, data, property_name, expected_shape): + """Test default state properties have correct shapes.""" + prop = getattr(data, property_name) + assert prop.shape == expected_shape, f"{property_name} has wrong shape" + + # -- Derived Properties -- + @pytest.mark.parametrize( + "property_name,expected_shape", + [ + ("projected_gravity_b", (4, 3)), + ("heading_w", (4,)), + ("root_link_lin_vel_b", (4, 3)), + ("root_link_ang_vel_b", (4, 3)), + ("root_com_lin_vel_b", (4, 3)), + ("root_com_ang_vel_b", (4, 3)), + ], + ) + def test_derived_property_shapes(self, data, property_name, expected_shape): + """Test derived properties have correct shapes.""" + prop = getattr(data, property_name) + assert prop.shape == expected_shape, f"{property_name} has wrong shape" + + # -- Tendon Properties -- + @pytest.mark.parametrize( + "property_name,expected_shape", + [ + ("fixed_tendon_stiffness", (4, 2)), + ("fixed_tendon_damping", (4, 2)), + ("fixed_tendon_limit_stiffness", (4, 2)), + ("fixed_tendon_rest_length", (4, 2)), + ("fixed_tendon_offset", (4, 2)), + ("fixed_tendon_pos_limits", (4, 2, 2)), + ("spatial_tendon_stiffness", (4, 1)), + ("spatial_tendon_damping", (4, 1)), + ("spatial_tendon_limit_stiffness", (4, 1)), + ("spatial_tendon_offset", (4, 1)), + ], + ) + def test_tendon_property_shapes(self, data, property_name, expected_shape): + """Test tendon properties have correct shapes.""" + prop = getattr(data, property_name) + assert prop.shape == expected_shape, f"{property_name} has wrong shape" + + # -- Setter Tests -- + @pytest.mark.parametrize( + "setter_name,property_name,shape", + [ + ("set_joint_pos", "joint_pos", (4, 12)), + ("set_joint_vel", "joint_vel", (4, 12)), + ("set_joint_acc", "joint_acc", (4, 12)), + ("set_root_link_pose_w", "root_link_pose_w", (4, 7)), + ("set_root_link_vel_w", "root_link_vel_w", (4, 6)), + ("set_body_link_pose_w", "body_link_pose_w", (4, 13, 7)), + ("set_body_link_vel_w", "body_link_vel_w", (4, 13, 6)), + ("set_body_mass", "body_mass", (4, 13)), + ], + ) + def test_setters_update_properties(self, data, setter_name, property_name, shape): + """Test that setters properly update the corresponding properties.""" + test_value = torch.randn(shape) + setter = getattr(data, setter_name) + setter(test_value) + result = getattr(data, property_name) + assert torch.allclose(result, test_value), f"{setter_name} did not update {property_name}" + + def test_bulk_setter_with_multiple_properties(self, data): + """Test set_mock_data with multiple properties.""" + joint_pos = torch.randn(4, 12) + joint_vel = torch.randn(4, 12) + root_pose = torch.randn(4, 7) + + data.set_mock_data( + joint_pos=joint_pos, + joint_vel=joint_vel, + root_link_pose_w=root_pose, + ) + + assert torch.allclose(data.joint_pos, joint_pos) + assert torch.allclose(data.joint_vel, joint_vel) + assert torch.allclose(data.root_link_pose_w, root_pose) + + def test_bulk_setter_unknown_property_raises(self, data): + """Test that set_mock_data raises on unknown property.""" + with pytest.raises(ValueError, match="Unknown property"): + data.set_mock_data(unknown_property=torch.zeros(4)) + + +# ============================================================================== +# Rigid Object Data Property Tests +# ============================================================================== + + +class TestMockRigidObjectDataProperties: + """Comprehensive tests for MockRigidObjectData properties.""" + + @pytest.fixture + def data(self): + """Create MockRigidObjectData fixture.""" + return MockRigidObjectData(num_instances=4, device="cpu") + + @pytest.mark.parametrize( + "property_name,expected_shape", + [ + ("root_link_pose_w", (4, 7)), + ("root_link_vel_w", (4, 6)), + ("root_link_state_w", (4, 13)), + ("root_link_pos_w", (4, 3)), + ("root_link_quat_w", (4, 4)), + ("root_link_lin_vel_w", (4, 3)), + ("root_link_ang_vel_w", (4, 3)), + ("root_com_pose_w", (4, 7)), + ("root_com_vel_w", (4, 6)), + ("root_com_state_w", (4, 13)), + ("root_state_w", (4, 13)), + ("body_link_pose_w", (4, 1, 7)), + ("body_link_vel_w", (4, 1, 6)), + ("body_link_state_w", (4, 1, 13)), + ("body_com_pose_w", (4, 1, 7)), + ("body_com_vel_w", (4, 1, 6)), + ("body_com_state_w", (4, 1, 13)), + ("body_com_acc_w", (4, 1, 6)), + ("body_mass", (4, 1, 1)), + ("body_inertia", (4, 1, 9)), + ("projected_gravity_b", (4, 3)), + ("heading_w", (4,)), + ("default_root_pose", (4, 7)), + ("default_root_vel", (4, 6)), + ("default_root_state", (4, 13)), + ], + ) + def test_property_shapes(self, data, property_name, expected_shape): + """Test that all properties return tensors with correct shapes.""" + prop = getattr(data, property_name) + assert prop.shape == expected_shape, f"{property_name} has wrong shape" + + +# ============================================================================== +# Rigid Object Collection Data Property Tests +# ============================================================================== + + +class TestMockRigidObjectCollectionDataProperties: + """Comprehensive tests for MockRigidObjectCollectionData properties.""" + + @pytest.fixture + def data(self): + """Create MockRigidObjectCollectionData fixture.""" + return MockRigidObjectCollectionData( + num_instances=4, + num_bodies=5, + device="cpu", + ) + + @pytest.mark.parametrize( + "property_name,expected_shape", + [ + ("body_link_pose_w", (4, 5, 7)), + ("body_link_vel_w", (4, 5, 6)), + ("body_link_state_w", (4, 5, 13)), + ("body_link_pos_w", (4, 5, 3)), + ("body_link_quat_w", (4, 5, 4)), + ("body_link_lin_vel_w", (4, 5, 3)), + ("body_link_ang_vel_w", (4, 5, 3)), + ("body_link_lin_vel_b", (4, 5, 3)), + ("body_link_ang_vel_b", (4, 5, 3)), + ("body_com_pose_w", (4, 5, 7)), + ("body_com_vel_w", (4, 5, 6)), + ("body_com_state_w", (4, 5, 13)), + ("body_com_acc_w", (4, 5, 6)), + ("body_com_pos_w", (4, 5, 3)), + ("body_com_quat_w", (4, 5, 4)), + ("body_com_lin_vel_w", (4, 5, 3)), + ("body_com_ang_vel_w", (4, 5, 3)), + ("body_com_lin_vel_b", (4, 5, 3)), + ("body_com_ang_vel_b", (4, 5, 3)), + ("body_com_lin_acc_w", (4, 5, 3)), + ("body_com_ang_acc_w", (4, 5, 3)), + ("body_com_pose_b", (4, 5, 7)), + ("body_com_pos_b", (4, 5, 3)), + ("body_com_quat_b", (4, 5, 4)), + ("body_mass", (4, 5)), + ("body_inertia", (4, 5, 9)), + ("projected_gravity_b", (4, 5, 3)), + ("heading_w", (4, 5)), + ("default_body_pose", (4, 5, 7)), + ("default_body_vel", (4, 5, 6)), + ("default_body_state", (4, 5, 13)), + ], + ) + def test_property_shapes(self, data, property_name, expected_shape): + """Test that all properties return tensors with correct shapes.""" + prop = getattr(data, property_name) + assert prop.shape == expected_shape, f"{property_name} has wrong shape" + + +# ============================================================================== +# Device Handling Tests +# ============================================================================== + + +class TestDeviceHandling: + """Test that tensors are created on the correct device.""" + + def test_imu_data_device(self): + """Test IMU data tensors are on correct device.""" + data = MockImuData(num_instances=2, device="cpu") + assert data.pos_w.device.type == "cpu" + assert data.quat_w.device.type == "cpu" + + def test_contact_sensor_data_device(self): + """Test contact sensor data tensors are on correct device.""" + data = MockContactSensorData(num_instances=2, num_bodies=3, device="cpu") + assert data.net_forces_w.device.type == "cpu" + + def test_articulation_data_device(self): + """Test articulation data tensors are on correct device.""" + data = MockArticulationData(num_instances=2, num_joints=6, num_bodies=7, device="cpu") + assert data.joint_pos.device.type == "cpu" + assert data.root_link_pose_w.device.type == "cpu" + + def test_setter_moves_tensor_to_device(self): + """Test that setters move tensors to the correct device.""" + data = MockImuData(num_instances=2, device="cpu") + # Create tensor on CPU (default) + test_tensor = torch.randn(2, 3) + data.set_pos_w(test_tensor) + assert data.pos_w.device.type == "cpu" + + +# ============================================================================== +# Composite Property Tests +# ============================================================================== + + +class TestCompositeProperties: + """Test that composite properties are correctly composed from components.""" + + def test_imu_pose_composition(self): + """Test IMU pose_w is correctly composed from pos_w and quat_w.""" + data = MockImuData(num_instances=2, device="cpu") + pos = torch.randn(2, 3) + quat = torch.tensor([[1, 0, 0, 0], [0.707, 0.707, 0, 0]], dtype=torch.float32) + + data.set_pos_w(pos) + data.set_quat_w(quat) + + pose = data.pose_w + assert torch.allclose(pose[:, :3], pos) + assert torch.allclose(pose[:, 3:], quat) + + def test_articulation_root_state_composition(self): + """Test articulation root_link_state_w is correctly composed.""" + data = MockArticulationData(num_instances=2, num_joints=6, num_bodies=7, device="cpu") + pose = torch.randn(2, 7) + vel = torch.randn(2, 6) + + data.set_root_link_pose_w(pose) + data.set_root_link_vel_w(vel) + + state = data.root_link_state_w + assert torch.allclose(state[:, :7], pose) + assert torch.allclose(state[:, 7:], vel) + + def test_articulation_default_state_composition(self): + """Test articulation default_root_state is correctly composed.""" + data = MockArticulationData(num_instances=2, num_joints=6, num_bodies=7, device="cpu") + pose = torch.randn(2, 7) + vel = torch.randn(2, 6) + + data.set_default_root_pose(pose) + data.set_default_root_vel(vel) + + state = data.default_root_state + assert torch.allclose(state[:, :7], pose) + assert torch.allclose(state[:, 7:], vel) + + +# ============================================================================== +# Convenience Alias Property Tests +# ============================================================================== + + +class TestArticulationConvenienceAliases: + """Test convenience alias properties for MockArticulationData.""" + + @pytest.fixture + def data(self): + """Create MockArticulationData fixture.""" + return MockArticulationData( + num_instances=4, + num_joints=12, + num_bodies=13, + num_fixed_tendons=2, + device="cpu", + ) + + # -- Root state aliases (without _link_ or _com_ prefix) -- + @pytest.mark.parametrize( + "alias,source,expected_shape", + [ + ("root_pos_w", "root_link_pos_w", (4, 3)), + ("root_quat_w", "root_link_quat_w", (4, 4)), + ("root_pose_w", "root_link_pose_w", (4, 7)), + ("root_vel_w", "root_com_vel_w", (4, 6)), + ("root_lin_vel_w", "root_com_lin_vel_w", (4, 3)), + ("root_ang_vel_w", "root_com_ang_vel_w", (4, 3)), + ("root_lin_vel_b", "root_com_lin_vel_b", (4, 3)), + ("root_ang_vel_b", "root_com_ang_vel_b", (4, 3)), + ], + ) + def test_root_aliases(self, data, alias, source, expected_shape): + """Test root state aliases reference correct properties.""" + alias_prop = getattr(data, alias) + source_prop = getattr(data, source) + assert alias_prop.shape == expected_shape, f"{alias} has wrong shape" + assert torch.equal(alias_prop, source_prop), f"{alias} should equal {source}" + + # -- Body state aliases (without _link_ or _com_ prefix) -- + @pytest.mark.parametrize( + "alias,source,expected_shape", + [ + ("body_pos_w", "body_link_pos_w", (4, 13, 3)), + ("body_quat_w", "body_link_quat_w", (4, 13, 4)), + ("body_pose_w", "body_link_pose_w", (4, 13, 7)), + ("body_vel_w", "body_com_vel_w", (4, 13, 6)), + ("body_state_w", "body_com_state_w", (4, 13, 13)), + ("body_lin_vel_w", "body_com_lin_vel_w", (4, 13, 3)), + ("body_ang_vel_w", "body_com_ang_vel_w", (4, 13, 3)), + ("body_acc_w", "body_com_acc_w", (4, 13, 6)), + ("body_lin_acc_w", "body_com_lin_acc_w", (4, 13, 3)), + ("body_ang_acc_w", "body_com_ang_acc_w", (4, 13, 3)), + ], + ) + def test_body_aliases(self, data, alias, source, expected_shape): + """Test body state aliases reference correct properties.""" + alias_prop = getattr(data, alias) + source_prop = getattr(data, source) + assert alias_prop.shape == expected_shape, f"{alias} has wrong shape" + assert torch.equal(alias_prop, source_prop), f"{alias} should equal {source}" + + # -- CoM in body frame -- + @pytest.mark.parametrize( + "alias,expected_shape", + [ + ("com_pos_b", (4, 3)), + ("com_quat_b", (4, 4)), + ], + ) + def test_com_body_frame_aliases(self, data, alias, expected_shape): + """Test CoM in body frame aliases.""" + prop = getattr(data, alias) + assert prop.shape == expected_shape, f"{alias} has wrong shape" + + # -- Joint property aliases -- + @pytest.mark.parametrize( + "alias,source,expected_shape", + [ + ("joint_limits", "joint_pos_limits", (4, 12, 2)), + ("joint_velocity_limits", "joint_vel_limits", (4, 12)), + ("joint_friction", "joint_friction_coeff", (4, 12)), + ], + ) + def test_joint_aliases(self, data, alias, source, expected_shape): + """Test joint property aliases.""" + alias_prop = getattr(data, alias) + source_prop = getattr(data, source) + assert alias_prop.shape == expected_shape, f"{alias} has wrong shape" + assert torch.equal(alias_prop, source_prop), f"{alias} should equal {source}" + + # -- Fixed tendon alias -- + def test_fixed_tendon_limit_alias(self, data): + """Test fixed_tendon_limit is alias for fixed_tendon_pos_limits.""" + assert data.fixed_tendon_limit.shape == (4, 2, 2) + assert torch.equal(data.fixed_tendon_limit, data.fixed_tendon_pos_limits) + + +class TestRigidObjectConvenienceAliases: + """Test convenience alias properties for MockRigidObjectData.""" + + @pytest.fixture + def data(self): + """Create MockRigidObjectData fixture.""" + return MockRigidObjectData(num_instances=4, device="cpu") + + @pytest.mark.parametrize( + "alias,source,expected_shape", + [ + ("root_pos_w", "root_link_pos_w", (4, 3)), + ("root_quat_w", "root_link_quat_w", (4, 4)), + ("root_pose_w", "root_link_pose_w", (4, 7)), + ("root_vel_w", "root_com_vel_w", (4, 6)), + ("root_lin_vel_w", "root_com_lin_vel_w", (4, 3)), + ("root_ang_vel_w", "root_com_ang_vel_w", (4, 3)), + ("root_lin_vel_b", "root_com_lin_vel_b", (4, 3)), + ("root_ang_vel_b", "root_com_ang_vel_b", (4, 3)), + ("body_pos_w", "body_link_pos_w", (4, 1, 3)), + ("body_quat_w", "body_link_quat_w", (4, 1, 4)), + ("body_pose_w", "body_link_pose_w", (4, 1, 7)), + ("body_vel_w", "body_com_vel_w", (4, 1, 6)), + ("body_state_w", "body_com_state_w", (4, 1, 13)), + ("body_lin_vel_w", "body_com_lin_vel_w", (4, 1, 3)), + ("body_ang_vel_w", "body_com_ang_vel_w", (4, 1, 3)), + ("body_acc_w", "body_com_acc_w", (4, 1, 6)), + ("body_lin_acc_w", "body_com_lin_acc_w", (4, 1, 3)), + ("body_ang_acc_w", "body_com_ang_acc_w", (4, 1, 3)), + ], + ) + def test_aliases(self, data, alias, source, expected_shape): + """Test convenience aliases reference correct properties.""" + alias_prop = getattr(data, alias) + source_prop = getattr(data, source) + assert alias_prop.shape == expected_shape, f"{alias} has wrong shape" + assert torch.equal(alias_prop, source_prop), f"{alias} should equal {source}" + + @pytest.mark.parametrize( + "alias,expected_shape", + [ + ("com_pos_b", (4, 3)), + ("com_quat_b", (4, 4)), + ], + ) + def test_com_body_frame_aliases(self, data, alias, expected_shape): + """Test CoM in body frame aliases.""" + prop = getattr(data, alias) + assert prop.shape == expected_shape, f"{alias} has wrong shape" + + +class TestRigidObjectCollectionConvenienceAliases: + """Test convenience alias properties for MockRigidObjectCollectionData.""" + + @pytest.fixture + def data(self): + """Create MockRigidObjectCollectionData fixture.""" + return MockRigidObjectCollectionData( + num_instances=4, + num_bodies=5, + device="cpu", + ) + + def test_body_state_w_alias(self, data): + """Test body_state_w is alias for body_com_state_w.""" + assert data.body_state_w.shape == (4, 5, 13) + assert torch.equal(data.body_state_w, data.body_com_state_w) diff --git a/source/isaaclab/test/test_mock_interfaces/test_mock_sensors.py b/source/isaaclab/test/test_mock_interfaces/test_mock_sensors.py new file mode 100644 index 00000000000..db8165130d3 --- /dev/null +++ b/source/isaaclab/test/test_mock_interfaces/test_mock_sensors.py @@ -0,0 +1,309 @@ +# Copyright (c) 2022-2025, The Isaac Lab Project Developers. +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Unit tests for mock sensor interfaces.""" + +import pytest +import torch + +from isaaclab.test.mock_interfaces.sensors import MockImu, MockContactSensor, MockFrameTransformer +from isaaclab.test.mock_interfaces.sensors import ( + create_mock_imu, + create_mock_contact_sensor, + create_mock_foot_contact_sensor, + create_mock_frame_transformer, +) +from isaaclab.test.mock_interfaces.utils import MockSensorBuilder + + +# ============================================================================== +# MockImu Tests +# ============================================================================== + + +class TestMockImu: + """Tests for MockImu and MockImuData.""" + + @pytest.fixture + def imu(self): + """Create a mock IMU fixture.""" + return MockImu(num_instances=4, device="cpu") + + def test_initialization(self, imu): + """Test that MockImu initializes correctly.""" + assert imu.num_instances == 4 + assert imu.device == "cpu" + assert imu.data is not None + + def test_lazy_tensor_initialization(self, imu): + """Test that unset properties return zero tensors with correct shapes.""" + # Position + pos = imu.data.pos_w + assert pos.shape == (4, 3) + assert torch.all(pos == 0) + + # Quaternion (should be identity: w=1, x=0, y=0, z=0) + quat = imu.data.quat_w + assert quat.shape == (4, 4) + assert torch.all(quat[:, 0] == 1) # w component + assert torch.all(quat[:, 1:] == 0) # xyz components + + # Velocities and accelerations + assert imu.data.lin_vel_b.shape == (4, 3) + assert imu.data.ang_vel_b.shape == (4, 3) + assert imu.data.lin_acc_b.shape == (4, 3) + assert imu.data.ang_acc_b.shape == (4, 3) + + def test_projected_gravity_default(self, imu): + """Test default gravity direction.""" + gravity = imu.data.projected_gravity_b + assert gravity.shape == (4, 3) + # Default gravity should point down: (0, 0, -1) + assert torch.all(gravity[:, 2] == -1) + + def test_set_mock_data(self, imu): + """Test bulk data setter.""" + lin_vel = torch.randn(4, 3) + ang_vel = torch.randn(4, 3) + + imu.data.set_mock_data(lin_vel_b=lin_vel, ang_vel_b=ang_vel) + + assert torch.allclose(imu.data.lin_vel_b, lin_vel) + assert torch.allclose(imu.data.ang_vel_b, ang_vel) + + def test_per_property_setter(self, imu): + """Test individual property setters.""" + lin_acc = torch.randn(4, 3) + imu.data.set_lin_acc_b(lin_acc) + assert torch.allclose(imu.data.lin_acc_b, lin_acc) + + def test_pose_composition(self, imu): + """Test that pose_w combines pos_w and quat_w correctly.""" + pos = torch.randn(4, 3) + quat = torch.tensor([[1, 0, 0, 0]] * 4, dtype=torch.float32) + + imu.data.set_pos_w(pos) + imu.data.set_quat_w(quat) + + pose = imu.data.pose_w + assert pose.shape == (4, 7) + assert torch.allclose(pose[:, :3], pos) + assert torch.allclose(pose[:, 3:], quat) + + +# ============================================================================== +# MockContactSensor Tests +# ============================================================================== + + +class TestMockContactSensor: + """Tests for MockContactSensor and MockContactSensorData.""" + + @pytest.fixture + def sensor(self): + """Create a mock contact sensor fixture.""" + return MockContactSensor( + num_instances=4, + num_bodies=4, + body_names=["FL_foot", "FR_foot", "RL_foot", "RR_foot"], + device="cpu", + ) + + def test_initialization(self, sensor): + """Test that MockContactSensor initializes correctly.""" + assert sensor.num_instances == 4 + assert sensor.num_bodies == 4 + assert sensor.body_names == ["FL_foot", "FR_foot", "RL_foot", "RR_foot"] + assert sensor.contact_view is None + + def test_lazy_tensor_shapes(self, sensor): + """Test that unset properties return tensors with correct shapes.""" + forces = sensor.data.net_forces_w + assert forces.shape == (4, 4, 3) + + contact_time = sensor.data.current_contact_time + assert contact_time.shape == (4, 4) + + air_time = sensor.data.current_air_time + assert air_time.shape == (4, 4) + + def test_find_bodies(self, sensor): + """Test body finding by regex.""" + # Find all bodies matching ".*_foot" + indices, names = sensor.find_bodies(".*_foot") + assert len(indices) == 4 + assert set(names) == {"FL_foot", "FR_foot", "RL_foot", "RR_foot"} + + # Find specific body + indices, names = sensor.find_bodies("FL_foot") + assert indices == [0] + assert names == ["FL_foot"] + + # Find front feet + indices, names = sensor.find_bodies("F._foot") + assert len(indices) == 2 + assert "FL_foot" in names + assert "FR_foot" in names + + def test_compute_first_contact(self, sensor): + """Test first contact computation.""" + # Set contact time to 0.5 for all bodies + sensor.data.set_current_contact_time(torch.full((4, 4), 0.5)) + + # Check with dt=1.0 - should be True (0.5 < 1.0) + first_contact = sensor.compute_first_contact(dt=1.0) + assert torch.all(first_contact) + + # Check with dt=0.1 - should be False (0.5 > 0.1) + first_contact = sensor.compute_first_contact(dt=0.1) + assert torch.all(~first_contact) + + def test_compute_first_air(self, sensor): + """Test first air computation.""" + sensor.data.set_current_air_time(torch.full((4, 4), 0.2)) + + first_air = sensor.compute_first_air(dt=0.5) + assert torch.all(first_air) + + def test_history_buffer(self): + """Test history buffer when enabled.""" + sensor_with_history = MockContactSensor( + num_instances=2, + num_bodies=2, + history_length=3, + device="cpu", + ) + + history = sensor_with_history.data.net_forces_w_history + assert history is not None + assert history.shape == (2, 3, 2, 3) + + def test_no_history_buffer(self, sensor): + """Test that history buffer is None when not enabled.""" + history = sensor.data.net_forces_w_history + assert history is None + + +# ============================================================================== +# MockFrameTransformer Tests +# ============================================================================== + + +class TestMockFrameTransformer: + """Tests for MockFrameTransformer and MockFrameTransformerData.""" + + @pytest.fixture + def transformer(self): + """Create a mock frame transformer fixture.""" + return MockFrameTransformer( + num_instances=2, + num_target_frames=3, + target_frame_names=["target_1", "target_2", "target_3"], + device="cpu", + ) + + def test_initialization(self, transformer): + """Test that MockFrameTransformer initializes correctly.""" + assert transformer.num_instances == 2 + assert transformer.num_bodies == 3 + assert transformer.body_names == ["target_1", "target_2", "target_3"] + + def test_data_shapes(self, transformer): + """Test that data properties have correct shapes.""" + # Source frame + assert transformer.data.source_pos_w.shape == (2, 3) + assert transformer.data.source_quat_w.shape == (2, 4) + + # Target frames + assert transformer.data.target_pos_w.shape == (2, 3, 3) + assert transformer.data.target_quat_w.shape == (2, 3, 4) + + # Relative poses + assert transformer.data.target_pos_source.shape == (2, 3, 3) + assert transformer.data.target_quat_source.shape == (2, 3, 4) + + def test_pose_composition(self, transformer): + """Test that pose properties combine position and orientation correctly.""" + source_pose = transformer.data.source_pose_w + assert source_pose.shape == (2, 7) + + target_pose = transformer.data.target_pose_w + assert target_pose.shape == (2, 3, 7) + + def test_find_bodies(self, transformer): + """Test frame finding by regex.""" + indices, names = transformer.find_bodies("target_.*") + assert len(indices) == 3 + + indices, names = transformer.find_bodies("target_1") + assert indices == [0] + + +# ============================================================================== +# Factory Function Tests +# ============================================================================== + + +class TestSensorFactories: + """Tests for sensor factory functions.""" + + def test_create_mock_imu(self): + """Test IMU factory function.""" + imu = create_mock_imu(num_instances=4) + assert imu.num_instances == 4 + assert imu.data.projected_gravity_b.shape == (4, 3) + + def test_create_mock_foot_contact_sensor(self): + """Test foot contact sensor factory function.""" + sensor = create_mock_foot_contact_sensor(num_instances=4, num_feet=4) + assert sensor.num_instances == 4 + assert sensor.num_bodies == 4 + assert "FL_foot" in sensor.body_names + + def test_create_mock_frame_transformer(self): + """Test frame transformer factory function.""" + transformer = create_mock_frame_transformer(num_instances=2, num_target_frames=5) + assert transformer.num_instances == 2 + assert transformer.num_bodies == 5 + + +# ============================================================================== +# MockSensorBuilder Tests +# ============================================================================== + + +class TestMockSensorBuilder: + """Tests for MockSensorBuilder.""" + + def test_build_contact_sensor(self): + """Test building a contact sensor.""" + sensor = ( + MockSensorBuilder("contact") + .with_num_instances(4) + .with_bodies(["foot_1", "foot_2"]) + .with_history_length(3) + .build() + ) + + assert sensor.num_instances == 4 + assert sensor.num_bodies == 2 + assert sensor.data.net_forces_w_history.shape == (4, 3, 2, 3) + + def test_build_imu_sensor(self): + """Test building an IMU sensor.""" + sensor = MockSensorBuilder("imu").with_num_instances(2).build() + assert sensor.num_instances == 2 + + def test_build_frame_transformer(self): + """Test building a frame transformer sensor.""" + sensor = ( + MockSensorBuilder("frame_transformer") + .with_num_instances(3) + .with_target_frames(["target_1", "target_2"]) + .build() + ) + + assert sensor.num_instances == 3 + assert sensor.num_bodies == 2 diff --git a/source/isaaclab_physx/isaaclab_physx/test/__init__.py b/source/isaaclab_physx/isaaclab_physx/test/__init__.py new file mode 100644 index 00000000000..684458ea7aa --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/test/__init__.py @@ -0,0 +1,6 @@ +# Copyright (c) 2022-2025, The Isaac Lab Project Developers. +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Test utilities for isaaclab_physx.""" diff --git a/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/__init__.py b/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/__init__.py new file mode 100644 index 00000000000..cffa253263c --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/__init__.py @@ -0,0 +1,32 @@ +# Copyright (c) 2022-2025, The Isaac Lab Project Developers. +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Mock interfaces for PhysX TensorAPI views. + +This module provides mock implementations of PhysX TensorAPI views for unit testing +without requiring Isaac Sim or GPU simulation. +""" + +from .factories import ( + create_mock_articulation_view, + create_mock_humanoid_view, + create_mock_quadruped_view, + create_mock_rigid_body_view, + create_mock_rigid_contact_view, +) +from .views import MockArticulationView, MockRigidBodyView, MockRigidContactView + +__all__ = [ + # Views + "MockRigidBodyView", + "MockArticulationView", + "MockRigidContactView", + # Factories + "create_mock_rigid_body_view", + "create_mock_articulation_view", + "create_mock_rigid_contact_view", + "create_mock_quadruped_view", + "create_mock_humanoid_view", +] diff --git a/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/factories.py b/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/factories.py new file mode 100644 index 00000000000..736171c9752 --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/factories.py @@ -0,0 +1,240 @@ +# Copyright (c) 2022-2025, The Isaac Lab Project Developers. +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Factory functions for creating mock PhysX views.""" + +from __future__ import annotations + +from .views import MockArticulationView, MockRigidBodyView, MockRigidContactView + + +def create_mock_rigid_body_view( + count: int = 1, + prim_paths: list[str] | None = None, + device: str = "cpu", +) -> MockRigidBodyView: + """Create a mock rigid body view. + + Args: + count: Number of rigid body instances. + prim_paths: USD prim paths for each instance. + device: Device for tensor allocation. + + Returns: + A MockRigidBodyView instance. + """ + return MockRigidBodyView(count=count, prim_paths=prim_paths, device=device) + + +def create_mock_articulation_view( + count: int = 1, + num_dofs: int = 1, + num_links: int = 2, + dof_names: list[str] | None = None, + link_names: list[str] | None = None, + fixed_base: bool = False, + prim_paths: list[str] | None = None, + device: str = "cpu", +) -> MockArticulationView: + """Create a mock articulation view. + + Args: + count: Number of articulation instances. + num_dofs: Number of degrees of freedom (joints). + num_links: Number of links (bodies). + dof_names: Names of the DOFs. + link_names: Names of the links. + fixed_base: Whether the articulation has a fixed base. + prim_paths: USD prim paths for each instance. + device: Device for tensor allocation. + + Returns: + A MockArticulationView instance. + """ + return MockArticulationView( + count=count, + num_dofs=num_dofs, + num_links=num_links, + dof_names=dof_names, + link_names=link_names, + fixed_base=fixed_base, + prim_paths=prim_paths, + device=device, + ) + + +def create_mock_rigid_contact_view( + count: int = 1, + num_bodies: int = 1, + filter_count: int = 0, + max_contact_data_count: int = 16, + device: str = "cpu", +) -> MockRigidContactView: + """Create a mock rigid contact view. + + Args: + count: Number of instances. + num_bodies: Number of bodies per instance. + filter_count: Number of filter bodies for contact filtering. + max_contact_data_count: Maximum number of contact data points. + device: Device for tensor allocation. + + Returns: + A MockRigidContactView instance. + """ + return MockRigidContactView( + count=count, + num_bodies=num_bodies, + filter_count=filter_count, + max_contact_data_count=max_contact_data_count, + device=device, + ) + + +# -- Pre-configured factories -- + + +def create_mock_quadruped_view( + count: int = 1, + device: str = "cpu", +) -> MockArticulationView: + """Create a mock articulation view configured for a quadruped robot. + + Configuration: + - 12 DOFs (3 per leg x 4 legs: hip, thigh, calf) + - 13 links (base + 3 per leg) + - Floating base + + Args: + count: Number of articulation instances. + device: Device for tensor allocation. + + Returns: + A MockArticulationView configured for quadruped. + """ + dof_names = [ + "FL_hip_joint", + "FL_thigh_joint", + "FL_calf_joint", + "FR_hip_joint", + "FR_thigh_joint", + "FR_calf_joint", + "RL_hip_joint", + "RL_thigh_joint", + "RL_calf_joint", + "RR_hip_joint", + "RR_thigh_joint", + "RR_calf_joint", + ] + link_names = [ + "base", + "FL_hip", + "FL_thigh", + "FL_calf", + "FR_hip", + "FR_thigh", + "FR_calf", + "RL_hip", + "RL_thigh", + "RL_calf", + "RR_hip", + "RR_thigh", + "RR_calf", + ] + return MockArticulationView( + count=count, + num_dofs=12, + num_links=13, + dof_names=dof_names, + link_names=link_names, + fixed_base=False, + device=device, + ) + + +def create_mock_humanoid_view( + count: int = 1, + device: str = "cpu", +) -> MockArticulationView: + """Create a mock articulation view configured for a humanoid robot. + + Configuration: + - 21 DOFs (typical humanoid configuration) + - 22 links + - Floating base + + Args: + count: Number of articulation instances. + device: Device for tensor allocation. + + Returns: + A MockArticulationView configured for humanoid. + """ + dof_names = [ + # Torso + "torso_joint", + # Left arm + "left_shoulder_pitch", + "left_shoulder_roll", + "left_shoulder_yaw", + "left_elbow", + # Right arm + "right_shoulder_pitch", + "right_shoulder_roll", + "right_shoulder_yaw", + "right_elbow", + # Left leg + "left_hip_yaw", + "left_hip_roll", + "left_hip_pitch", + "left_knee", + "left_ankle_pitch", + "left_ankle_roll", + # Right leg + "right_hip_yaw", + "right_hip_roll", + "right_hip_pitch", + "right_knee", + "right_ankle_pitch", + "right_ankle_roll", + ] + link_names = [ + "pelvis", + "torso", + # Left arm + "left_shoulder", + "left_upper_arm", + "left_lower_arm", + "left_hand", + # Right arm + "right_shoulder", + "right_upper_arm", + "right_lower_arm", + "right_hand", + # Left leg + "left_hip", + "left_upper_leg", + "left_lower_leg", + "left_ankle", + "left_foot", + # Right leg + "right_hip", + "right_upper_leg", + "right_lower_leg", + "right_ankle", + "right_foot", + # Head + "neck", + "head", + ] + return MockArticulationView( + count=count, + num_dofs=21, + num_links=22, + dof_names=dof_names, + link_names=link_names, + fixed_base=False, + device=device, + ) diff --git a/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/utils/__init__.py b/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/utils/__init__.py new file mode 100644 index 00000000000..4c52eb2e2ad --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/utils/__init__.py @@ -0,0 +1,26 @@ +# Copyright (c) 2022-2025, The Isaac Lab Project Developers. +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Utilities for mock PhysX interfaces.""" + +from .mock_shared_metatype import MockSharedMetatype +from .patching import ( + mock_articulation_view, + mock_rigid_body_view, + mock_rigid_contact_view, + patch_articulation_view, + patch_rigid_body_view, + patch_rigid_contact_view, +) + +__all__ = [ + "MockSharedMetatype", + "patch_rigid_body_view", + "patch_articulation_view", + "patch_rigid_contact_view", + "mock_rigid_body_view", + "mock_articulation_view", + "mock_rigid_contact_view", +] diff --git a/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/utils/mock_shared_metatype.py b/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/utils/mock_shared_metatype.py new file mode 100644 index 00000000000..8b05ba96c51 --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/utils/mock_shared_metatype.py @@ -0,0 +1,64 @@ +# Copyright (c) 2022-2025, The Isaac Lab Project Developers. +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Mock implementation of PhysX shared metatype.""" + +from __future__ import annotations + + +class MockSharedMetatype: + """Mock implementation of the shared metatype for articulation views. + + The shared metatype contains metadata about the articulation structure that is + shared across all instances, such as DOF count, link count, and names. + """ + + def __init__( + self, + dof_count: int = 1, + link_count: int = 2, + dof_names: list[str] | None = None, + link_names: list[str] | None = None, + fixed_base: bool = False, + ): + """Initialize the mock shared metatype. + + Args: + dof_count: Number of degrees of freedom (joints). + link_count: Number of links (bodies). + dof_names: Names of the DOFs. Defaults to ["dof_0", "dof_1", ...]. + link_names: Names of the links. Defaults to ["link_0", "link_1", ...]. + fixed_base: Whether the articulation has a fixed base. + """ + self._dof_count = dof_count + self._link_count = link_count + self._dof_names = dof_names or [f"dof_{i}" for i in range(dof_count)] + self._link_names = link_names or [f"link_{i}" for i in range(link_count)] + self._fixed_base = fixed_base + + @property + def dof_count(self) -> int: + """Number of degrees of freedom.""" + return self._dof_count + + @property + def link_count(self) -> int: + """Number of links.""" + return self._link_count + + @property + def dof_names(self) -> list[str]: + """Names of the degrees of freedom.""" + return self._dof_names + + @property + def link_names(self) -> list[str]: + """Names of the links.""" + return self._link_names + + @property + def fixed_base(self) -> bool: + """Whether the articulation has a fixed base.""" + return self._fixed_base diff --git a/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/utils/patching.py b/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/utils/patching.py new file mode 100644 index 00000000000..41824b76e12 --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/utils/patching.py @@ -0,0 +1,284 @@ +# Copyright (c) 2022-2025, The Isaac Lab Project Developers. +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Utilities for patching PhysX views with mock implementations.""" + +from __future__ import annotations + +import functools +from contextlib import contextmanager +from typing import Any, Callable, Generator, TypeVar +from unittest.mock import patch + +F = TypeVar("F", bound=Callable[..., Any]) + + +@contextmanager +def patch_rigid_body_view( + target: str, + count: int = 1, + prim_paths: list[str] | None = None, + device: str = "cpu", +) -> Generator[Any, None, None]: + """Context manager for patching physx.RigidBodyView with a mock. + + Args: + target: The target to patch (e.g., "my_module.physx.RigidBodyView"). + count: Number of rigid body instances. + prim_paths: USD prim paths for each instance. + device: Device for tensor allocation. + + Yields: + The mock class that will be used when the target is instantiated. + + Example: + >>> with patch_rigid_body_view("my_module.view", count=4) as mock: + ... view = my_function() + ... view.set_mock_transforms(torch.randn(4, 7)) + """ + from ..views import MockRigidBodyView + + def create_mock(*args, **kwargs): + return MockRigidBodyView( + count=kwargs.get("count", count), + prim_paths=kwargs.get("prim_paths", prim_paths), + device=kwargs.get("device", device), + ) + + with patch(target, side_effect=create_mock) as mock_class: + yield mock_class + + +@contextmanager +def patch_articulation_view( + target: str, + count: int = 1, + num_dofs: int = 1, + num_links: int = 2, + dof_names: list[str] | None = None, + link_names: list[str] | None = None, + fixed_base: bool = False, + prim_paths: list[str] | None = None, + device: str = "cpu", +) -> Generator[Any, None, None]: + """Context manager for patching physx.ArticulationView with a mock. + + Args: + target: The target to patch (e.g., "my_module.physx.ArticulationView"). + count: Number of articulation instances. + num_dofs: Number of degrees of freedom (joints). + num_links: Number of links (bodies). + dof_names: Names of the DOFs. + link_names: Names of the links. + fixed_base: Whether the articulation has a fixed base. + prim_paths: USD prim paths for each instance. + device: Device for tensor allocation. + + Yields: + The mock class that will be used when the target is instantiated. + + Example: + >>> with patch_articulation_view("my_module.view", num_dofs=12) as mock: + ... view = my_function() + ... positions = view.get_dof_positions() + """ + from ..views import MockArticulationView + + def create_mock(*args, **kwargs): + return MockArticulationView( + count=kwargs.get("count", count), + num_dofs=kwargs.get("num_dofs", num_dofs), + num_links=kwargs.get("num_links", num_links), + dof_names=kwargs.get("dof_names", dof_names), + link_names=kwargs.get("link_names", link_names), + fixed_base=kwargs.get("fixed_base", fixed_base), + prim_paths=kwargs.get("prim_paths", prim_paths), + device=kwargs.get("device", device), + ) + + with patch(target, side_effect=create_mock) as mock_class: + yield mock_class + + +@contextmanager +def patch_rigid_contact_view( + target: str, + count: int = 1, + num_bodies: int = 1, + filter_count: int = 0, + max_contact_data_count: int = 16, + device: str = "cpu", +) -> Generator[Any, None, None]: + """Context manager for patching physx.RigidContactView with a mock. + + Args: + target: The target to patch (e.g., "my_module.physx.RigidContactView"). + count: Number of instances. + num_bodies: Number of bodies per instance. + filter_count: Number of filter bodies for contact filtering. + max_contact_data_count: Maximum number of contact data points. + device: Device for tensor allocation. + + Yields: + The mock class that will be used when the target is instantiated. + + Example: + >>> with patch_rigid_contact_view("my_module.view", num_bodies=4) as mock: + ... view = my_function() + ... forces = view.get_net_contact_forces(0.01) + """ + from ..views import MockRigidContactView + + def create_mock(*args, **kwargs): + return MockRigidContactView( + count=kwargs.get("count", count), + num_bodies=kwargs.get("num_bodies", num_bodies), + filter_count=kwargs.get("filter_count", filter_count), + max_contact_data_count=kwargs.get("max_contact_data_count", max_contact_data_count), + device=kwargs.get("device", device), + ) + + with patch(target, side_effect=create_mock) as mock_class: + yield mock_class + + +# -- Decorators -- + + +def mock_rigid_body_view( + count: int = 1, + prim_paths: list[str] | None = None, + device: str = "cpu", +) -> Callable[[F], F]: + """Decorator for injecting MockRigidBodyView into test function. + + The mock view is passed as the first argument to the decorated function. + + Args: + count: Number of rigid body instances. + prim_paths: USD prim paths for each instance. + device: Device for tensor allocation. + + Returns: + A decorator function. + + Example: + >>> @mock_rigid_body_view(count=4) + ... def test_my_function(mock_view): + ... transforms = mock_view.get_transforms() + ... assert transforms.shape == (4, 7) + """ + from ..views import MockRigidBodyView + + def decorator(func: F) -> F: + @functools.wraps(func) + def wrapper(*args, **kwargs): + mock = MockRigidBodyView(count=count, prim_paths=prim_paths, device=device) + return func(mock, *args, **kwargs) + + return wrapper # type: ignore[return-value] + + return decorator + + +def mock_articulation_view( + count: int = 1, + num_dofs: int = 1, + num_links: int = 2, + dof_names: list[str] | None = None, + link_names: list[str] | None = None, + fixed_base: bool = False, + prim_paths: list[str] | None = None, + device: str = "cpu", +) -> Callable[[F], F]: + """Decorator for injecting MockArticulationView into test function. + + The mock view is passed as the first argument to the decorated function. + + Args: + count: Number of articulation instances. + num_dofs: Number of degrees of freedom (joints). + num_links: Number of links (bodies). + dof_names: Names of the DOFs. + link_names: Names of the links. + fixed_base: Whether the articulation has a fixed base. + prim_paths: USD prim paths for each instance. + device: Device for tensor allocation. + + Returns: + A decorator function. + + Example: + >>> @mock_articulation_view(count=4, num_dofs=12, num_links=13) + ... def test_my_function(mock_view): + ... positions = mock_view.get_dof_positions() + ... assert positions.shape == (4, 12) + """ + from ..views import MockArticulationView + + def decorator(func: F) -> F: + @functools.wraps(func) + def wrapper(*args, **kwargs): + mock = MockArticulationView( + count=count, + num_dofs=num_dofs, + num_links=num_links, + dof_names=dof_names, + link_names=link_names, + fixed_base=fixed_base, + prim_paths=prim_paths, + device=device, + ) + return func(mock, *args, **kwargs) + + return wrapper # type: ignore[return-value] + + return decorator + + +def mock_rigid_contact_view( + count: int = 1, + num_bodies: int = 1, + filter_count: int = 0, + max_contact_data_count: int = 16, + device: str = "cpu", +) -> Callable[[F], F]: + """Decorator for injecting MockRigidContactView into test function. + + The mock view is passed as the first argument to the decorated function. + + Args: + count: Number of instances. + num_bodies: Number of bodies per instance. + filter_count: Number of filter bodies for contact filtering. + max_contact_data_count: Maximum number of contact data points. + device: Device for tensor allocation. + + Returns: + A decorator function. + + Example: + >>> @mock_rigid_contact_view(count=4, num_bodies=4, filter_count=2) + ... def test_my_function(mock_view): + ... forces = mock_view.get_net_contact_forces(0.01) + ... assert forces.shape == (16, 3) + """ + from ..views import MockRigidContactView + + def decorator(func: F) -> F: + @functools.wraps(func) + def wrapper(*args, **kwargs): + mock = MockRigidContactView( + count=count, + num_bodies=num_bodies, + filter_count=filter_count, + max_contact_data_count=max_contact_data_count, + device=device, + ) + return func(mock, *args, **kwargs) + + return wrapper # type: ignore[return-value] + + return decorator diff --git a/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/views/__init__.py b/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/views/__init__.py new file mode 100644 index 00000000000..f7e7e478ce2 --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/views/__init__.py @@ -0,0 +1,12 @@ +# Copyright (c) 2022-2025, The Isaac Lab Project Developers. +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Mock PhysX TensorAPI views.""" + +from .mock_articulation_view import MockArticulationView +from .mock_rigid_body_view import MockRigidBodyView +from .mock_rigid_contact_view import MockRigidContactView + +__all__ = ["MockRigidBodyView", "MockArticulationView", "MockRigidContactView"] diff --git a/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/views/mock_articulation_view.py b/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/views/mock_articulation_view.py new file mode 100644 index 00000000000..5ba316882d2 --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/views/mock_articulation_view.py @@ -0,0 +1,794 @@ +# Copyright (c) 2022-2025, The Isaac Lab Project Developers. +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Mock implementation of PhysX ArticulationView.""" + +from __future__ import annotations + +import torch + +from ..utils.mock_shared_metatype import MockSharedMetatype + + +class MockArticulationView: + """Mock implementation of physx.ArticulationView for unit testing. + + This class mimics the interface of the PhysX TensorAPI ArticulationView, + allowing tests to run without Isaac Sim or GPU simulation. + + Data Shapes: + - root_transforms: (N, 7) - [pos(3), quat_xyzw(4)] + - root_velocities: (N, 6) - [lin_vel(3), ang_vel(3)] + - link_transforms: (N, L, 7) - per-link poses + - link_velocities: (N, L, 6) - per-link velocities + - dof_positions: (N, J) - joint positions + - dof_velocities: (N, J) - joint velocities + - dof_limits: (N, J, 2) - [lower, upper] limits + - dof_stiffnesses: (N, J) - joint stiffnesses + - dof_dampings: (N, J) - joint dampings + - dof_max_forces: (N, J) - maximum joint forces + - dof_max_velocities: (N, J) - maximum joint velocities + - masses: (N, L, 1) - per-link masses + - coms: (N, L, 7) - per-link centers of mass + - inertias: (N, L, 3, 3) - per-link inertia tensors + + Where N = count, L = num_links, J = num_dofs + """ + + def __init__( + self, + count: int = 1, + num_dofs: int = 1, + num_links: int = 2, + dof_names: list[str] | None = None, + link_names: list[str] | None = None, + fixed_base: bool = False, + prim_paths: list[str] | None = None, + device: str = "cpu", + ): + """Initialize the mock articulation view. + + Args: + count: Number of articulation instances. + num_dofs: Number of degrees of freedom (joints). + num_links: Number of links (bodies). + dof_names: Names of the DOFs. Defaults to auto-generated names. + link_names: Names of the links. Defaults to auto-generated names. + fixed_base: Whether the articulation has a fixed base. + prim_paths: USD prim paths for each instance. + device: Device for tensor allocation ("cpu" or "cuda"). + """ + self._count = count + self._num_dofs = num_dofs + self._num_links = num_links + self._device = device + self._prim_paths = prim_paths or [f"/World/Articulation_{i}" for i in range(count)] + + # Create shared metatype + self._shared_metatype = MockSharedMetatype( + dof_count=num_dofs, + link_count=num_links, + dof_names=dof_names, + link_names=link_names, + fixed_base=fixed_base, + ) + + # Tendon properties (fixed values for mock) + self._max_fixed_tendons = 0 + self._max_spatial_tendons = 0 + + # Internal state (lazily initialized) + self._root_transforms: torch.Tensor | None = None + self._root_velocities: torch.Tensor | None = None + self._link_transforms: torch.Tensor | None = None + self._link_velocities: torch.Tensor | None = None + self._dof_positions: torch.Tensor | None = None + self._dof_velocities: torch.Tensor | None = None + self._dof_projected_joint_forces: torch.Tensor | None = None + self._dof_limits: torch.Tensor | None = None + self._dof_stiffnesses: torch.Tensor | None = None + self._dof_dampings: torch.Tensor | None = None + self._dof_max_forces: torch.Tensor | None = None + self._dof_max_velocities: torch.Tensor | None = None + self._dof_armatures: torch.Tensor | None = None + self._dof_friction_coefficients: torch.Tensor | None = None + self._masses: torch.Tensor | None = None + self._coms: torch.Tensor | None = None + self._inertias: torch.Tensor | None = None + + # -- Properties -- + + @property + def count(self) -> int: + """Number of articulation instances.""" + return self._count + + @property + def shared_metatype(self) -> MockSharedMetatype: + """Shared metatype containing articulation structure metadata.""" + return self._shared_metatype + + @property + def max_fixed_tendons(self) -> int: + """Maximum number of fixed tendons.""" + return self._max_fixed_tendons + + @property + def max_spatial_tendons(self) -> int: + """Maximum number of spatial tendons.""" + return self._max_spatial_tendons + + @property + def prim_paths(self) -> list[str]: + """USD prim paths for each instance.""" + return self._prim_paths + + # -- Root Getters -- + + def get_root_transforms(self) -> torch.Tensor: + """Get world transforms of root links. + + Returns: + Tensor of shape (N, 7) with [pos(3), quat_xyzw(4)]. + """ + if self._root_transforms is None: + self._root_transforms = torch.zeros(self._count, 7, device=self._device) + self._root_transforms[:, 6] = 1.0 # w=1 for identity quaternion + return self._root_transforms.clone() + + def get_root_velocities(self) -> torch.Tensor: + """Get velocities of root links. + + Returns: + Tensor of shape (N, 6) with [lin_vel(3), ang_vel(3)]. + """ + if self._root_velocities is None: + self._root_velocities = torch.zeros(self._count, 6, device=self._device) + return self._root_velocities.clone() + + # -- Link Getters -- + + def get_link_transforms(self) -> torch.Tensor: + """Get world transforms of all links. + + Returns: + Tensor of shape (N, L, 7) with [pos(3), quat_xyzw(4)] per link. + """ + if self._link_transforms is None: + self._link_transforms = torch.zeros( + self._count, self._num_links, 7, device=self._device + ) + self._link_transforms[:, :, 6] = 1.0 # w=1 for identity quaternion + return self._link_transforms.clone() + + def get_link_velocities(self) -> torch.Tensor: + """Get velocities of all links. + + Returns: + Tensor of shape (N, L, 6) with [lin_vel(3), ang_vel(3)] per link. + """ + if self._link_velocities is None: + self._link_velocities = torch.zeros( + self._count, self._num_links, 6, device=self._device + ) + return self._link_velocities.clone() + + # -- DOF Getters -- + + def get_dof_positions(self) -> torch.Tensor: + """Get positions of all DOFs. + + Returns: + Tensor of shape (N, J) with joint positions. + """ + if self._dof_positions is None: + self._dof_positions = torch.zeros(self._count, self._num_dofs, device=self._device) + return self._dof_positions.clone() + + def get_dof_velocities(self) -> torch.Tensor: + """Get velocities of all DOFs. + + Returns: + Tensor of shape (N, J) with joint velocities. + """ + if self._dof_velocities is None: + self._dof_velocities = torch.zeros(self._count, self._num_dofs, device=self._device) + return self._dof_velocities.clone() + + def get_dof_projected_joint_forces(self) -> torch.Tensor: + """Get projected joint forces of all DOFs. + + Returns: + Tensor of shape (N, J) with projected joint forces. + """ + if self._dof_projected_joint_forces is None: + self._dof_projected_joint_forces = torch.zeros( + self._count, self._num_dofs, device=self._device + ) + return self._dof_projected_joint_forces.clone() + + def get_dof_limits(self) -> torch.Tensor: + """Get position limits of all DOFs. + + Returns: + Tensor of shape (N, J, 2) with [lower, upper] limits. + """ + if self._dof_limits is None: + # Default: no limits (infinite) + self._dof_limits = torch.zeros(self._count, self._num_dofs, 2, device=self._device) + self._dof_limits[:, :, 0] = float("-inf") # lower limit + self._dof_limits[:, :, 1] = float("inf") # upper limit + return self._dof_limits.clone() + + def get_dof_stiffnesses(self) -> torch.Tensor: + """Get stiffnesses of all DOFs. + + Returns: + Tensor of shape (N, J) with joint stiffnesses. + """ + if self._dof_stiffnesses is None: + self._dof_stiffnesses = torch.zeros(self._count, self._num_dofs, device=self._device) + return self._dof_stiffnesses.clone() + + def get_dof_dampings(self) -> torch.Tensor: + """Get dampings of all DOFs. + + Returns: + Tensor of shape (N, J) with joint dampings. + """ + if self._dof_dampings is None: + self._dof_dampings = torch.zeros(self._count, self._num_dofs, device=self._device) + return self._dof_dampings.clone() + + def get_dof_max_forces(self) -> torch.Tensor: + """Get maximum forces of all DOFs. + + Returns: + Tensor of shape (N, J) with maximum joint forces. + """ + if self._dof_max_forces is None: + # Default: infinite max force + self._dof_max_forces = torch.full( + (self._count, self._num_dofs), float("inf"), device=self._device + ) + return self._dof_max_forces.clone() + + def get_dof_max_velocities(self) -> torch.Tensor: + """Get maximum velocities of all DOFs. + + Returns: + Tensor of shape (N, J) with maximum joint velocities. + """ + if self._dof_max_velocities is None: + # Default: infinite max velocity + self._dof_max_velocities = torch.full( + (self._count, self._num_dofs), float("inf"), device=self._device + ) + return self._dof_max_velocities.clone() + + def get_dof_armatures(self) -> torch.Tensor: + """Get armatures of all DOFs. + + Returns: + Tensor of shape (N, J) with joint armatures. + """ + if self._dof_armatures is None: + self._dof_armatures = torch.zeros(self._count, self._num_dofs, device=self._device) + return self._dof_armatures.clone() + + def get_dof_friction_coefficients(self) -> torch.Tensor: + """Get friction coefficients of all DOFs. + + Returns: + Tensor of shape (N, J) with joint friction coefficients. + """ + if self._dof_friction_coefficients is None: + self._dof_friction_coefficients = torch.zeros( + self._count, self._num_dofs, device=self._device + ) + return self._dof_friction_coefficients.clone() + + # -- Mass Property Getters -- + + def get_masses(self) -> torch.Tensor: + """Get masses of all links. + + Returns: + Tensor of shape (N, L, 1) with link masses. + """ + if self._masses is None: + self._masses = torch.ones(self._count, self._num_links, 1, device=self._device) + return self._masses.clone() + + def get_coms(self) -> torch.Tensor: + """Get centers of mass of all links. + + Returns: + Tensor of shape (N, L, 7) with [pos(3), quat_xyzw(4)] per link. + """ + if self._coms is None: + self._coms = torch.zeros(self._count, self._num_links, 7, device=self._device) + self._coms[:, :, 6] = 1.0 # w=1 for identity quaternion + return self._coms.clone() + + def get_inertias(self) -> torch.Tensor: + """Get inertia tensors of all links. + + Returns: + Tensor of shape (N, L, 3, 3) with 3x3 inertia matrices per link. + """ + if self._inertias is None: + # Default: identity inertia + self._inertias = torch.zeros( + self._count, self._num_links, 3, 3, device=self._device + ) + self._inertias[:, :, 0, 0] = 1.0 + self._inertias[:, :, 1, 1] = 1.0 + self._inertias[:, :, 2, 2] = 1.0 + return self._inertias.clone() + + # -- Root Setters -- + + def set_root_transforms( + self, + transforms: torch.Tensor, + indices: torch.Tensor | None = None, + ) -> None: + """Set world transforms of root links. + + Args: + transforms: Tensor of shape (N, 7) or (len(indices), 7). + indices: Optional indices of articulations to update. + """ + transforms = transforms.to(self._device) + if self._root_transforms is None: + self._root_transforms = torch.zeros(self._count, 7, device=self._device) + self._root_transforms[:, 6] = 1.0 + if indices is not None: + self._root_transforms[indices] = transforms + else: + self._root_transforms = transforms + + def set_root_velocities( + self, + velocities: torch.Tensor, + indices: torch.Tensor | None = None, + ) -> None: + """Set velocities of root links. + + Args: + velocities: Tensor of shape (N, 6) or (len(indices), 6). + indices: Optional indices of articulations to update. + """ + velocities = velocities.to(self._device) + if self._root_velocities is None: + self._root_velocities = torch.zeros(self._count, 6, device=self._device) + if indices is not None: + self._root_velocities[indices] = velocities + else: + self._root_velocities = velocities + + # -- DOF Setters -- + + def set_dof_positions( + self, + positions: torch.Tensor, + indices: torch.Tensor | None = None, + ) -> None: + """Set positions of all DOFs. + + Args: + positions: Tensor of shape (N, J) or (len(indices), J). + indices: Optional indices of articulations to update. + """ + positions = positions.to(self._device) + if self._dof_positions is None: + self._dof_positions = torch.zeros(self._count, self._num_dofs, device=self._device) + if indices is not None: + self._dof_positions[indices] = positions + else: + self._dof_positions = positions + + def set_dof_velocities( + self, + velocities: torch.Tensor, + indices: torch.Tensor | None = None, + ) -> None: + """Set velocities of all DOFs. + + Args: + velocities: Tensor of shape (N, J) or (len(indices), J). + indices: Optional indices of articulations to update. + """ + velocities = velocities.to(self._device) + if self._dof_velocities is None: + self._dof_velocities = torch.zeros(self._count, self._num_dofs, device=self._device) + if indices is not None: + self._dof_velocities[indices] = velocities + else: + self._dof_velocities = velocities + + def set_dof_position_targets( + self, + targets: torch.Tensor, + indices: torch.Tensor | None = None, + ) -> None: + """Set position targets for all DOFs (no-op in mock). + + Args: + targets: Tensor of shape (N, J) or (len(indices), J). + indices: Optional indices of articulations to update. + """ + pass # No-op for mock + + def set_dof_velocity_targets( + self, + targets: torch.Tensor, + indices: torch.Tensor | None = None, + ) -> None: + """Set velocity targets for all DOFs (no-op in mock). + + Args: + targets: Tensor of shape (N, J) or (len(indices), J). + indices: Optional indices of articulations to update. + """ + pass # No-op for mock + + def set_dof_actuation_forces( + self, + forces: torch.Tensor, + indices: torch.Tensor | None = None, + ) -> None: + """Set actuation forces for all DOFs (no-op in mock). + + Args: + forces: Tensor of shape (N, J) or (len(indices), J). + indices: Optional indices of articulations to update. + """ + pass # No-op for mock + + def set_dof_limits( + self, + limits: torch.Tensor, + indices: torch.Tensor | None = None, + ) -> None: + """Set position limits of all DOFs. + + Args: + limits: Tensor of shape (N, J, 2) with [lower, upper] limits. + indices: Optional indices of articulations to update. + """ + limits = limits.to(self._device) + if self._dof_limits is None: + self._dof_limits = torch.zeros(self._count, self._num_dofs, 2, device=self._device) + self._dof_limits[:, :, 0] = float("-inf") + self._dof_limits[:, :, 1] = float("inf") + if indices is not None: + self._dof_limits[indices] = limits + else: + self._dof_limits = limits + + def set_dof_stiffnesses( + self, + stiffnesses: torch.Tensor, + indices: torch.Tensor | None = None, + ) -> None: + """Set stiffnesses of all DOFs. + + Args: + stiffnesses: Tensor of shape (N, J). + indices: Optional indices of articulations to update. + """ + stiffnesses = stiffnesses.to(self._device) + if self._dof_stiffnesses is None: + self._dof_stiffnesses = torch.zeros(self._count, self._num_dofs, device=self._device) + if indices is not None: + self._dof_stiffnesses[indices] = stiffnesses + else: + self._dof_stiffnesses = stiffnesses + + def set_dof_dampings( + self, + dampings: torch.Tensor, + indices: torch.Tensor | None = None, + ) -> None: + """Set dampings of all DOFs. + + Args: + dampings: Tensor of shape (N, J). + indices: Optional indices of articulations to update. + """ + dampings = dampings.to(self._device) + if self._dof_dampings is None: + self._dof_dampings = torch.zeros(self._count, self._num_dofs, device=self._device) + if indices is not None: + self._dof_dampings[indices] = dampings + else: + self._dof_dampings = dampings + + def set_dof_max_forces( + self, + max_forces: torch.Tensor, + indices: torch.Tensor | None = None, + ) -> None: + """Set maximum forces of all DOFs. + + Args: + max_forces: Tensor of shape (N, J). + indices: Optional indices of articulations to update. + """ + max_forces = max_forces.to(self._device) + if self._dof_max_forces is None: + self._dof_max_forces = torch.full( + (self._count, self._num_dofs), float("inf"), device=self._device + ) + if indices is not None: + self._dof_max_forces[indices] = max_forces + else: + self._dof_max_forces = max_forces + + def set_dof_max_velocities( + self, + max_velocities: torch.Tensor, + indices: torch.Tensor | None = None, + ) -> None: + """Set maximum velocities of all DOFs. + + Args: + max_velocities: Tensor of shape (N, J). + indices: Optional indices of articulations to update. + """ + max_velocities = max_velocities.to(self._device) + if self._dof_max_velocities is None: + self._dof_max_velocities = torch.full( + (self._count, self._num_dofs), float("inf"), device=self._device + ) + if indices is not None: + self._dof_max_velocities[indices] = max_velocities + else: + self._dof_max_velocities = max_velocities + + def set_dof_armatures( + self, + armatures: torch.Tensor, + indices: torch.Tensor | None = None, + ) -> None: + """Set armatures of all DOFs. + + Args: + armatures: Tensor of shape (N, J). + indices: Optional indices of articulations to update. + """ + armatures = armatures.to(self._device) + if self._dof_armatures is None: + self._dof_armatures = torch.zeros(self._count, self._num_dofs, device=self._device) + if indices is not None: + self._dof_armatures[indices] = armatures + else: + self._dof_armatures = armatures + + def set_dof_friction_coefficients( + self, + friction_coefficients: torch.Tensor, + indices: torch.Tensor | None = None, + ) -> None: + """Set friction coefficients of all DOFs. + + Args: + friction_coefficients: Tensor of shape (N, J). + indices: Optional indices of articulations to update. + """ + friction_coefficients = friction_coefficients.to(self._device) + if self._dof_friction_coefficients is None: + self._dof_friction_coefficients = torch.zeros( + self._count, self._num_dofs, device=self._device + ) + if indices is not None: + self._dof_friction_coefficients[indices] = friction_coefficients + else: + self._dof_friction_coefficients = friction_coefficients + + # -- Mass Property Setters -- + + def set_masses( + self, + masses: torch.Tensor, + indices: torch.Tensor | None = None, + ) -> None: + """Set masses of all links. + + Args: + masses: Tensor of shape (N, L, 1). + indices: Optional indices of articulations to update. + """ + masses = masses.to(self._device) + if self._masses is None: + self._masses = torch.ones(self._count, self._num_links, 1, device=self._device) + if indices is not None: + self._masses[indices] = masses + else: + self._masses = masses + + def set_coms( + self, + coms: torch.Tensor, + indices: torch.Tensor | None = None, + ) -> None: + """Set centers of mass of all links. + + Args: + coms: Tensor of shape (N, L, 7). + indices: Optional indices of articulations to update. + """ + coms = coms.to(self._device) + if self._coms is None: + self._coms = torch.zeros(self._count, self._num_links, 7, device=self._device) + self._coms[:, :, 6] = 1.0 + if indices is not None: + self._coms[indices] = coms + else: + self._coms = coms + + def set_inertias( + self, + inertias: torch.Tensor, + indices: torch.Tensor | None = None, + ) -> None: + """Set inertia tensors of all links. + + Args: + inertias: Tensor of shape (N, L, 3, 3). + indices: Optional indices of articulations to update. + """ + inertias = inertias.to(self._device) + if self._inertias is None: + self._inertias = torch.zeros( + self._count, self._num_links, 3, 3, device=self._device + ) + self._inertias[:, :, 0, 0] = 1.0 + self._inertias[:, :, 1, 1] = 1.0 + self._inertias[:, :, 2, 2] = 1.0 + if indices is not None: + self._inertias[indices] = inertias + else: + self._inertias = inertias + + # -- Mock setters (direct test data injection) -- + + def set_mock_root_transforms(self, transforms: torch.Tensor) -> None: + """Set mock root transform data directly for testing. + + Args: + transforms: Tensor of shape (N, 7). + """ + self._root_transforms = transforms.to(self._device) + + def set_mock_root_velocities(self, velocities: torch.Tensor) -> None: + """Set mock root velocity data directly for testing. + + Args: + velocities: Tensor of shape (N, 6). + """ + self._root_velocities = velocities.to(self._device) + + def set_mock_link_transforms(self, transforms: torch.Tensor) -> None: + """Set mock link transform data directly for testing. + + Args: + transforms: Tensor of shape (N, L, 7). + """ + self._link_transforms = transforms.to(self._device) + + def set_mock_link_velocities(self, velocities: torch.Tensor) -> None: + """Set mock link velocity data directly for testing. + + Args: + velocities: Tensor of shape (N, L, 6). + """ + self._link_velocities = velocities.to(self._device) + + def set_mock_dof_positions(self, positions: torch.Tensor) -> None: + """Set mock DOF position data directly for testing. + + Args: + positions: Tensor of shape (N, J). + """ + self._dof_positions = positions.to(self._device) + + def set_mock_dof_velocities(self, velocities: torch.Tensor) -> None: + """Set mock DOF velocity data directly for testing. + + Args: + velocities: Tensor of shape (N, J). + """ + self._dof_velocities = velocities.to(self._device) + + def set_mock_dof_projected_joint_forces(self, forces: torch.Tensor) -> None: + """Set mock projected joint force data directly for testing. + + Args: + forces: Tensor of shape (N, J). + """ + self._dof_projected_joint_forces = forces.to(self._device) + + def set_mock_dof_limits(self, limits: torch.Tensor) -> None: + """Set mock DOF limit data directly for testing. + + Args: + limits: Tensor of shape (N, J, 2). + """ + self._dof_limits = limits.to(self._device) + + def set_mock_dof_stiffnesses(self, stiffnesses: torch.Tensor) -> None: + """Set mock DOF stiffness data directly for testing. + + Args: + stiffnesses: Tensor of shape (N, J). + """ + self._dof_stiffnesses = stiffnesses.to(self._device) + + def set_mock_dof_dampings(self, dampings: torch.Tensor) -> None: + """Set mock DOF damping data directly for testing. + + Args: + dampings: Tensor of shape (N, J). + """ + self._dof_dampings = dampings.to(self._device) + + def set_mock_dof_max_forces(self, max_forces: torch.Tensor) -> None: + """Set mock DOF max force data directly for testing. + + Args: + max_forces: Tensor of shape (N, J). + """ + self._dof_max_forces = max_forces.to(self._device) + + def set_mock_dof_max_velocities(self, max_velocities: torch.Tensor) -> None: + """Set mock DOF max velocity data directly for testing. + + Args: + max_velocities: Tensor of shape (N, J). + """ + self._dof_max_velocities = max_velocities.to(self._device) + + def set_mock_dof_armatures(self, armatures: torch.Tensor) -> None: + """Set mock DOF armature data directly for testing. + + Args: + armatures: Tensor of shape (N, J). + """ + self._dof_armatures = armatures.to(self._device) + + def set_mock_dof_friction_coefficients(self, friction_coefficients: torch.Tensor) -> None: + """Set mock DOF friction coefficient data directly for testing. + + Args: + friction_coefficients: Tensor of shape (N, J). + """ + self._dof_friction_coefficients = friction_coefficients.to(self._device) + + def set_mock_masses(self, masses: torch.Tensor) -> None: + """Set mock mass data directly for testing. + + Args: + masses: Tensor of shape (N, L, 1). + """ + self._masses = masses.to(self._device) + + def set_mock_coms(self, coms: torch.Tensor) -> None: + """Set mock center of mass data directly for testing. + + Args: + coms: Tensor of shape (N, L, 7). + """ + self._coms = coms.to(self._device) + + def set_mock_inertias(self, inertias: torch.Tensor) -> None: + """Set mock inertia data directly for testing. + + Args: + inertias: Tensor of shape (N, L, 3, 3). + """ + self._inertias = inertias.to(self._device) diff --git a/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/views/mock_rigid_body_view.py b/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/views/mock_rigid_body_view.py new file mode 100644 index 00000000000..ceb332ad936 --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/views/mock_rigid_body_view.py @@ -0,0 +1,307 @@ +# Copyright (c) 2022-2025, The Isaac Lab Project Developers. +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Mock implementation of PhysX RigidBodyView.""" + +from __future__ import annotations + +import torch + + +class MockRigidBodyView: + """Mock implementation of physx.RigidBodyView for unit testing. + + This class mimics the interface of the PhysX TensorAPI RigidBodyView, + allowing tests to run without Isaac Sim or GPU simulation. + + Data Shapes: + - transforms: (N, 7) - [pos(3), quat_xyzw(4)] + - velocities: (N, 6) - [lin_vel(3), ang_vel(3)] + - accelerations: (N, 6) - [lin_acc(3), ang_acc(3)] + - masses: (N, 1, 1) + - coms: (N, 1, 7) - center of mass [pos(3), quat_xyzw(4)] + - inertias: (N, 1, 3, 3) - 3x3 inertia matrix + """ + + def __init__( + self, + count: int = 1, + prim_paths: list[str] | None = None, + device: str = "cpu", + ): + """Initialize the mock rigid body view. + + Args: + count: Number of rigid body instances. + prim_paths: USD prim paths for each instance. + device: Device for tensor allocation ("cpu" or "cuda"). + """ + self._count = count + self._prim_paths = prim_paths or [f"/World/RigidBody_{i}" for i in range(count)] + self._device = device + self._backend = "torch" + + # Internal state (lazily initialized) + self._transforms: torch.Tensor | None = None + self._velocities: torch.Tensor | None = None + self._accelerations: torch.Tensor | None = None + self._masses: torch.Tensor | None = None + self._coms: torch.Tensor | None = None + self._inertias: torch.Tensor | None = None + + # -- Properties -- + + @property + def count(self) -> int: + """Number of rigid body instances.""" + return self._count + + @property + def prim_paths(self) -> list[str]: + """USD prim paths for each instance.""" + return self._prim_paths + + # -- Getters -- + + def get_transforms(self) -> torch.Tensor: + """Get world transforms of all rigid bodies. + + Returns: + Tensor of shape (N, 7) with [pos_x, pos_y, pos_z, quat_x, quat_y, quat_z, quat_w]. + """ + if self._transforms is None: + # Default: origin position with identity quaternion (xyzw format) + self._transforms = torch.zeros(self._count, 7, device=self._device) + self._transforms[:, 6] = 1.0 # w=1 for identity quaternion + return self._transforms.clone() + + def get_velocities(self) -> torch.Tensor: + """Get velocities of all rigid bodies. + + Returns: + Tensor of shape (N, 6) with [lin_vel(3), ang_vel(3)]. + """ + if self._velocities is None: + self._velocities = torch.zeros(self._count, 6, device=self._device) + return self._velocities.clone() + + def get_accelerations(self) -> torch.Tensor: + """Get accelerations of all rigid bodies. + + Returns: + Tensor of shape (N, 6) with [lin_acc(3), ang_acc(3)]. + """ + if self._accelerations is None: + self._accelerations = torch.zeros(self._count, 6, device=self._device) + return self._accelerations.clone() + + def get_masses(self) -> torch.Tensor: + """Get masses of all rigid bodies. + + Returns: + Tensor of shape (N, 1, 1) with mass values. + """ + if self._masses is None: + self._masses = torch.ones(self._count, 1, 1, device=self._device) + return self._masses.clone() + + def get_coms(self) -> torch.Tensor: + """Get centers of mass of all rigid bodies. + + Returns: + Tensor of shape (N, 1, 7) with [pos(3), quat_xyzw(4)]. + """ + if self._coms is None: + # Default: local origin with identity quaternion + self._coms = torch.zeros(self._count, 1, 7, device=self._device) + self._coms[:, :, 6] = 1.0 # w=1 for identity quaternion + return self._coms.clone() + + def get_inertias(self) -> torch.Tensor: + """Get inertia tensors of all rigid bodies. + + Returns: + Tensor of shape (N, 1, 3, 3) with 3x3 inertia matrices. + """ + if self._inertias is None: + # Default: identity inertia (unit sphere) + self._inertias = torch.zeros(self._count, 1, 3, 3, device=self._device) + self._inertias[:, :, 0, 0] = 1.0 + self._inertias[:, :, 1, 1] = 1.0 + self._inertias[:, :, 2, 2] = 1.0 + return self._inertias.clone() + + # -- Setters (simulation interface) -- + + def set_transforms( + self, + transforms: torch.Tensor, + indices: torch.Tensor | None = None, + ) -> None: + """Set world transforms of rigid bodies. + + Args: + transforms: Tensor of shape (N, 7) or (len(indices), 7). + indices: Optional indices of bodies to update. + """ + transforms = transforms.to(self._device) + if self._transforms is None: + self._transforms = torch.zeros(self._count, 7, device=self._device) + self._transforms[:, 6] = 1.0 + if indices is not None: + self._transforms[indices] = transforms + else: + self._transforms = transforms + + def set_velocities( + self, + velocities: torch.Tensor, + indices: torch.Tensor | None = None, + ) -> None: + """Set velocities of rigid bodies. + + Args: + velocities: Tensor of shape (N, 6) or (len(indices), 6). + indices: Optional indices of bodies to update. + """ + velocities = velocities.to(self._device) + if self._velocities is None: + self._velocities = torch.zeros(self._count, 6, device=self._device) + if indices is not None: + self._velocities[indices] = velocities + else: + self._velocities = velocities + + def set_masses( + self, + masses: torch.Tensor, + indices: torch.Tensor | None = None, + ) -> None: + """Set masses of rigid bodies. + + Args: + masses: Tensor of shape (N, 1, 1) or (len(indices), 1, 1). + indices: Optional indices of bodies to update. + """ + masses = masses.to(self._device) + if self._masses is None: + self._masses = torch.ones(self._count, 1, 1, device=self._device) + if indices is not None: + self._masses[indices] = masses + else: + self._masses = masses + + def set_coms( + self, + coms: torch.Tensor, + indices: torch.Tensor | None = None, + ) -> None: + """Set centers of mass of rigid bodies. + + Args: + coms: Tensor of shape (N, 1, 7) or (len(indices), 1, 7). + indices: Optional indices of bodies to update. + """ + coms = coms.to(self._device) + if self._coms is None: + self._coms = torch.zeros(self._count, 1, 7, device=self._device) + self._coms[:, :, 6] = 1.0 + if indices is not None: + self._coms[indices] = coms + else: + self._coms = coms + + def set_inertias( + self, + inertias: torch.Tensor, + indices: torch.Tensor | None = None, + ) -> None: + """Set inertia tensors of rigid bodies. + + Args: + inertias: Tensor of shape (N, 1, 3, 3) or (len(indices), 1, 3, 3). + indices: Optional indices of bodies to update. + """ + inertias = inertias.to(self._device) + if self._inertias is None: + self._inertias = torch.zeros(self._count, 1, 3, 3, device=self._device) + self._inertias[:, :, 0, 0] = 1.0 + self._inertias[:, :, 1, 1] = 1.0 + self._inertias[:, :, 2, 2] = 1.0 + if indices is not None: + self._inertias[indices] = inertias + else: + self._inertias = inertias + + # -- Mock setters (direct test data injection) -- + + def set_mock_transforms(self, transforms: torch.Tensor) -> None: + """Set mock transform data directly for testing. + + Args: + transforms: Tensor of shape (N, 7). + """ + self._transforms = transforms.to(self._device) + + def set_mock_velocities(self, velocities: torch.Tensor) -> None: + """Set mock velocity data directly for testing. + + Args: + velocities: Tensor of shape (N, 6). + """ + self._velocities = velocities.to(self._device) + + def set_mock_accelerations(self, accelerations: torch.Tensor) -> None: + """Set mock acceleration data directly for testing. + + Args: + accelerations: Tensor of shape (N, 6). + """ + self._accelerations = accelerations.to(self._device) + + def set_mock_masses(self, masses: torch.Tensor) -> None: + """Set mock mass data directly for testing. + + Args: + masses: Tensor of shape (N, 1, 1). + """ + self._masses = masses.to(self._device) + + def set_mock_coms(self, coms: torch.Tensor) -> None: + """Set mock center of mass data directly for testing. + + Args: + coms: Tensor of shape (N, 1, 7). + """ + self._coms = coms.to(self._device) + + def set_mock_inertias(self, inertias: torch.Tensor) -> None: + """Set mock inertia data directly for testing. + + Args: + inertias: Tensor of shape (N, 1, 3, 3). + """ + self._inertias = inertias.to(self._device) + + # -- Actions (no-op for testing) -- + + def apply_forces_and_torques_at_position( + self, + forces: torch.Tensor | None = None, + torques: torch.Tensor | None = None, + positions: torch.Tensor | None = None, + indices: torch.Tensor | None = None, + is_global: bool = True, + ) -> None: + """Apply forces and torques at positions (no-op in mock). + + Args: + forces: Forces to apply, shape (N, 3) or (len(indices), 3). + torques: Torques to apply, shape (N, 3) or (len(indices), 3). + positions: Positions to apply forces at, shape (N, 3) or (len(indices), 3). + indices: Optional indices of bodies to apply to. + is_global: Whether forces/torques are in global frame. + """ + pass # No-op for mock diff --git a/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/views/mock_rigid_contact_view.py b/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/views/mock_rigid_contact_view.py new file mode 100644 index 00000000000..3aa00c5c9fd --- /dev/null +++ b/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/views/mock_rigid_contact_view.py @@ -0,0 +1,284 @@ +# Copyright (c) 2022-2025, The Isaac Lab Project Developers. +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Mock implementation of PhysX RigidContactView.""" + +from __future__ import annotations + +import torch + + +class MockRigidContactView: + """Mock implementation of physx.RigidContactView for unit testing. + + This class mimics the interface of the PhysX TensorAPI RigidContactView, + allowing tests to run without Isaac Sim or GPU simulation. + + Data Shapes: + - net_contact_forces: (N*B, 3) - flattened net forces + - contact_force_matrix: (N*B, F, 3) - per-filter forces + - contact_data: tuple of 6 tensors (positions, normals, impulses, separations, num_found, patch_id) + - friction_data: tuple of 4 tensors (friction_forces, friction_impulses, friction_points, patch_id) + + Where: + - N = count (number of instances) + - B = num_bodies (bodies per instance) + - F = filter_count (number of filter bodies) + """ + + def __init__( + self, + count: int = 1, + num_bodies: int = 1, + filter_count: int = 0, + max_contact_data_count: int = 16, + device: str = "cpu", + ): + """Initialize the mock rigid contact view. + + Args: + count: Number of instances. + num_bodies: Number of bodies per instance. + filter_count: Number of filter bodies for contact filtering. + max_contact_data_count: Maximum number of contact data points. + device: Device for tensor allocation ("cpu" or "cuda"). + """ + self._count = count + self._num_bodies = num_bodies + self._filter_count = filter_count + self._max_contact_data_count = max_contact_data_count + self._device = device + + # Total number of bodies (flattened) + self._total_bodies = count * num_bodies + + # Internal state (lazily initialized) + self._net_contact_forces: torch.Tensor | None = None + self._contact_force_matrix: torch.Tensor | None = None + self._contact_positions: torch.Tensor | None = None + self._contact_normals: torch.Tensor | None = None + self._contact_impulses: torch.Tensor | None = None + self._contact_separations: torch.Tensor | None = None + self._contact_num_found: torch.Tensor | None = None + self._contact_patch_id: torch.Tensor | None = None + self._friction_forces: torch.Tensor | None = None + self._friction_impulses: torch.Tensor | None = None + self._friction_points: torch.Tensor | None = None + self._friction_patch_id: torch.Tensor | None = None + + # -- Properties -- + + @property + def filter_count(self) -> int: + """Number of filter bodies for contact filtering.""" + return self._filter_count + + @property + def count(self) -> int: + """Number of instances.""" + return self._count + + @property + def num_bodies(self) -> int: + """Number of bodies per instance.""" + return self._num_bodies + + # -- Getters -- + + def get_net_contact_forces(self, dt: float) -> torch.Tensor: + """Get net contact forces on all bodies. + + Args: + dt: Physics timestep (unused in mock, but required for API compatibility). + + Returns: + Tensor of shape (N*B, 3) with net forces per body. + """ + if self._net_contact_forces is None: + self._net_contact_forces = torch.zeros(self._total_bodies, 3, device=self._device) + return self._net_contact_forces.clone() + + def get_contact_force_matrix(self, dt: float) -> torch.Tensor: + """Get contact force matrix (per-filter forces). + + Args: + dt: Physics timestep (unused in mock, but required for API compatibility). + + Returns: + Tensor of shape (N*B, F, 3) with forces per body per filter. + """ + if self._contact_force_matrix is None: + self._contact_force_matrix = torch.zeros( + self._total_bodies, self._filter_count, 3, device=self._device + ) + return self._contact_force_matrix.clone() + + def get_contact_data( + self, dt: float + ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: + """Get detailed contact data. + + Args: + dt: Physics timestep (unused in mock, but required for API compatibility). + + Returns: + Tuple of 6 tensors: + - contact_positions: (N*B, max_contacts, 3) - contact point positions + - contact_normals: (N*B, max_contacts, 3) - contact normals + - contact_impulses: (N*B, max_contacts, 3) - contact impulses + - contact_separations: (N*B, max_contacts) - contact separations + - contact_num_found: (N*B,) - number of contacts found + - contact_patch_id: (N*B, max_contacts) - contact patch IDs + """ + max_contacts = self._max_contact_data_count + + if self._contact_positions is None: + self._contact_positions = torch.zeros( + self._total_bodies, max_contacts, 3, device=self._device + ) + if self._contact_normals is None: + self._contact_normals = torch.zeros( + self._total_bodies, max_contacts, 3, device=self._device + ) + if self._contact_impulses is None: + self._contact_impulses = torch.zeros( + self._total_bodies, max_contacts, 3, device=self._device + ) + if self._contact_separations is None: + self._contact_separations = torch.zeros( + self._total_bodies, max_contacts, device=self._device + ) + if self._contact_num_found is None: + self._contact_num_found = torch.zeros( + self._total_bodies, dtype=torch.int32, device=self._device + ) + if self._contact_patch_id is None: + self._contact_patch_id = torch.zeros( + self._total_bodies, max_contacts, dtype=torch.int32, device=self._device + ) + + return ( + self._contact_positions.clone(), + self._contact_normals.clone(), + self._contact_impulses.clone(), + self._contact_separations.clone(), + self._contact_num_found.clone(), + self._contact_patch_id.clone(), + ) + + def get_friction_data( + self, dt: float + ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: + """Get friction data. + + Args: + dt: Physics timestep (unused in mock, but required for API compatibility). + + Returns: + Tuple of 4 tensors: + - friction_forces: (N*B, max_contacts, 3) - friction forces + - friction_impulses: (N*B, max_contacts, 3) - friction impulses + - friction_points: (N*B, max_contacts, 3) - friction application points + - friction_patch_id: (N*B, max_contacts) - friction patch IDs + """ + max_contacts = self._max_contact_data_count + + if self._friction_forces is None: + self._friction_forces = torch.zeros( + self._total_bodies, max_contacts, 3, device=self._device + ) + if self._friction_impulses is None: + self._friction_impulses = torch.zeros( + self._total_bodies, max_contacts, 3, device=self._device + ) + if self._friction_points is None: + self._friction_points = torch.zeros( + self._total_bodies, max_contacts, 3, device=self._device + ) + if self._friction_patch_id is None: + self._friction_patch_id = torch.zeros( + self._total_bodies, max_contacts, dtype=torch.int32, device=self._device + ) + + return ( + self._friction_forces.clone(), + self._friction_impulses.clone(), + self._friction_points.clone(), + self._friction_patch_id.clone(), + ) + + # -- Mock setters (direct test data injection) -- + + def set_mock_net_contact_forces(self, forces: torch.Tensor) -> None: + """Set mock net contact force data directly for testing. + + Args: + forces: Tensor of shape (N*B, 3). + """ + self._net_contact_forces = forces.to(self._device) + + def set_mock_contact_force_matrix(self, matrix: torch.Tensor) -> None: + """Set mock contact force matrix data directly for testing. + + Args: + matrix: Tensor of shape (N*B, F, 3). + """ + self._contact_force_matrix = matrix.to(self._device) + + def set_mock_contact_data( + self, + positions: torch.Tensor | None = None, + normals: torch.Tensor | None = None, + impulses: torch.Tensor | None = None, + separations: torch.Tensor | None = None, + num_found: torch.Tensor | None = None, + patch_id: torch.Tensor | None = None, + ) -> None: + """Set mock contact data directly for testing. + + Args: + positions: Contact positions, shape (N*B, max_contacts, 3). + normals: Contact normals, shape (N*B, max_contacts, 3). + impulses: Contact impulses, shape (N*B, max_contacts, 3). + separations: Contact separations, shape (N*B, max_contacts). + num_found: Number of contacts found, shape (N*B,). + patch_id: Contact patch IDs, shape (N*B, max_contacts). + """ + if positions is not None: + self._contact_positions = positions.to(self._device) + if normals is not None: + self._contact_normals = normals.to(self._device) + if impulses is not None: + self._contact_impulses = impulses.to(self._device) + if separations is not None: + self._contact_separations = separations.to(self._device) + if num_found is not None: + self._contact_num_found = num_found.to(self._device) + if patch_id is not None: + self._contact_patch_id = patch_id.to(self._device) + + def set_mock_friction_data( + self, + forces: torch.Tensor | None = None, + impulses: torch.Tensor | None = None, + points: torch.Tensor | None = None, + patch_id: torch.Tensor | None = None, + ) -> None: + """Set mock friction data directly for testing. + + Args: + forces: Friction forces, shape (N*B, max_contacts, 3). + impulses: Friction impulses, shape (N*B, max_contacts, 3). + points: Friction application points, shape (N*B, max_contacts, 3). + patch_id: Friction patch IDs, shape (N*B, max_contacts). + """ + if forces is not None: + self._friction_forces = forces.to(self._device) + if impulses is not None: + self._friction_impulses = impulses.to(self._device) + if points is not None: + self._friction_points = points.to(self._device) + if patch_id is not None: + self._friction_patch_id = patch_id.to(self._device) diff --git a/source/isaaclab_physx/test/test_mock_interfaces/__init__.py b/source/isaaclab_physx/test/test_mock_interfaces/__init__.py new file mode 100644 index 00000000000..cc0c3e20b15 --- /dev/null +++ b/source/isaaclab_physx/test/test_mock_interfaces/__init__.py @@ -0,0 +1,6 @@ +# Copyright (c) 2022-2025, The Isaac Lab Project Developers. +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Tests for mock PhysX interfaces.""" diff --git a/source/isaaclab_physx/test/test_mock_interfaces/conftest.py b/source/isaaclab_physx/test/test_mock_interfaces/conftest.py new file mode 100644 index 00000000000..056274d4f36 --- /dev/null +++ b/source/isaaclab_physx/test/test_mock_interfaces/conftest.py @@ -0,0 +1,62 @@ +# Copyright (c) 2022-2025, The Isaac Lab Project Developers. +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Pytest configuration for mock interfaces tests. + +This conftest sets up imports to allow testing the mock interfaces without +requiring Isaac Sim or the full isaaclab_physx package. + +The mock interfaces are designed to be importable without Isaac Sim, so we +pre-load them into sys.modules to avoid triggering the full isaaclab_physx +package initialization. +""" + +import sys +from pathlib import Path + +# Get the path to the mock_interfaces module +_TEST_DIR = Path(__file__).parent +_MOCK_INTERFACES_DIR = _TEST_DIR.parent.parent / "isaaclab_physx" / "test" / "mock_interfaces" + +# Add paths for direct imports +if str(_MOCK_INTERFACES_DIR) not in sys.path: + sys.path.insert(0, str(_MOCK_INTERFACES_DIR)) +if str(_MOCK_INTERFACES_DIR.parent) not in sys.path: + sys.path.insert(0, str(_MOCK_INTERFACES_DIR.parent)) + +# Pre-load mock_interfaces modules to prevent isaaclab_physx from being imported +# This allows tests to use `from isaaclab_physx.test.mock_interfaces import ...` +# without triggering the full package (which requires Isaac Sim) +import mock_interfaces +import mock_interfaces.views +import mock_interfaces.utils +import mock_interfaces.factories +import mock_interfaces.views.mock_rigid_body_view +import mock_interfaces.views.mock_articulation_view +import mock_interfaces.views.mock_rigid_contact_view +import mock_interfaces.utils.mock_shared_metatype +import mock_interfaces.utils.patching + +# Create fake module entries in sys.modules for the isaaclab_physx path +# This makes `from isaaclab_physx.test.mock_interfaces import ...` work +sys.modules["isaaclab_physx"] = type(sys)("isaaclab_physx") +sys.modules["isaaclab_physx.test"] = type(sys)("isaaclab_physx.test") +sys.modules["isaaclab_physx.test.mock_interfaces"] = mock_interfaces +sys.modules["isaaclab_physx.test.mock_interfaces.views"] = mock_interfaces.views +sys.modules["isaaclab_physx.test.mock_interfaces.utils"] = mock_interfaces.utils +sys.modules["isaaclab_physx.test.mock_interfaces.factories"] = mock_interfaces.factories +sys.modules["isaaclab_physx.test.mock_interfaces.views.mock_rigid_body_view"] = ( + mock_interfaces.views.mock_rigid_body_view +) +sys.modules["isaaclab_physx.test.mock_interfaces.views.mock_articulation_view"] = ( + mock_interfaces.views.mock_articulation_view +) +sys.modules["isaaclab_physx.test.mock_interfaces.views.mock_rigid_contact_view"] = ( + mock_interfaces.views.mock_rigid_contact_view +) +sys.modules["isaaclab_physx.test.mock_interfaces.utils.mock_shared_metatype"] = ( + mock_interfaces.utils.mock_shared_metatype +) +sys.modules["isaaclab_physx.test.mock_interfaces.utils.patching"] = mock_interfaces.utils.patching diff --git a/source/isaaclab_physx/test/test_mock_interfaces/test_mock_articulation_view.py b/source/isaaclab_physx/test/test_mock_interfaces/test_mock_articulation_view.py new file mode 100644 index 00000000000..cb65e28bcac --- /dev/null +++ b/source/isaaclab_physx/test/test_mock_interfaces/test_mock_articulation_view.py @@ -0,0 +1,335 @@ +# Copyright (c) 2022-2025, The Isaac Lab Project Developers. +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Tests for MockArticulationView.""" + +import pytest +import torch + +# Note: we import the mock interfaces directly from within their own package to avoid routing through the +# isaaclab_physx package. This allows us to test the mock interfaces without requiring Isaac Sim or GPU simulation. +from mock_interfaces.views import MockArticulationView +from mock_interfaces.factories import ( + create_mock_articulation_view, + create_mock_humanoid_view, + create_mock_quadruped_view, +) + + +class TestMockArticulationViewInit: + """Tests for MockArticulationView initialization.""" + + def test_default_init(self): + """Test default initialization.""" + view = MockArticulationView() + assert view.count == 1 + assert view.shared_metatype.dof_count == 1 + assert view.shared_metatype.link_count == 2 + + def test_custom_configuration(self): + """Test initialization with custom configuration.""" + view = MockArticulationView( + count=4, + num_dofs=12, + num_links=13, + fixed_base=True, + ) + assert view.count == 4 + assert view.shared_metatype.dof_count == 12 + assert view.shared_metatype.link_count == 13 + assert view.shared_metatype.fixed_base is True + + def test_custom_names(self): + """Test initialization with custom DOF and link names.""" + dof_names = ["joint_0", "joint_1"] + link_names = ["base", "link_1", "link_2"] + view = MockArticulationView( + num_dofs=2, + num_links=3, + dof_names=dof_names, + link_names=link_names, + ) + assert view.shared_metatype.dof_names == dof_names + assert view.shared_metatype.link_names == link_names + + def test_tendon_properties(self): + """Test tendon properties are zero.""" + view = MockArticulationView() + assert view.max_fixed_tendons == 0 + assert view.max_spatial_tendons == 0 + + +class TestMockArticulationViewRootGetters: + """Tests for MockArticulationView root getter methods.""" + + @pytest.fixture + def view(self): + """Create a view with 4 instances, 12 DOFs, 13 links.""" + return MockArticulationView(count=4, num_dofs=12, num_links=13, device="cpu") + + def test_get_root_transforms_shape(self, view): + """Test root transforms shape.""" + transforms = view.get_root_transforms() + assert transforms.shape == (4, 7) + + def test_get_root_transforms_default_quaternion(self, view): + """Test that default quaternion is identity (xyzw format).""" + transforms = view.get_root_transforms() + assert torch.allclose(transforms[:, 6], torch.ones(4)) # w = 1 + + def test_get_root_velocities_shape(self, view): + """Test root velocities shape.""" + velocities = view.get_root_velocities() + assert velocities.shape == (4, 6) + + +class TestMockArticulationViewLinkGetters: + """Tests for MockArticulationView link getter methods.""" + + @pytest.fixture + def view(self): + """Create a view with 4 instances, 12 DOFs, 13 links.""" + return MockArticulationView(count=4, num_dofs=12, num_links=13, device="cpu") + + def test_get_link_transforms_shape(self, view): + """Test link transforms shape.""" + transforms = view.get_link_transforms() + assert transforms.shape == (4, 13, 7) + + def test_get_link_velocities_shape(self, view): + """Test link velocities shape.""" + velocities = view.get_link_velocities() + assert velocities.shape == (4, 13, 6) + + +class TestMockArticulationViewDOFGetters: + """Tests for MockArticulationView DOF getter methods.""" + + @pytest.fixture + def view(self): + """Create a view with 4 instances, 12 DOFs, 13 links.""" + return MockArticulationView(count=4, num_dofs=12, num_links=13, device="cpu") + + def test_get_dof_positions_shape(self, view): + """Test DOF positions shape.""" + positions = view.get_dof_positions() + assert positions.shape == (4, 12) + + def test_get_dof_velocities_shape(self, view): + """Test DOF velocities shape.""" + velocities = view.get_dof_velocities() + assert velocities.shape == (4, 12) + + def test_get_dof_projected_joint_forces_shape(self, view): + """Test projected joint forces shape.""" + forces = view.get_dof_projected_joint_forces() + assert forces.shape == (4, 12) + + def test_get_dof_limits_shape(self, view): + """Test DOF limits shape.""" + limits = view.get_dof_limits() + assert limits.shape == (4, 12, 2) + + def test_get_dof_limits_default_values(self, view): + """Test that default limits are infinite.""" + limits = view.get_dof_limits() + assert torch.all(limits[:, :, 0] == float("-inf")) # lower + assert torch.all(limits[:, :, 1] == float("inf")) # upper + + def test_get_dof_stiffnesses_shape(self, view): + """Test DOF stiffnesses shape.""" + stiffnesses = view.get_dof_stiffnesses() + assert stiffnesses.shape == (4, 12) + + def test_get_dof_dampings_shape(self, view): + """Test DOF dampings shape.""" + dampings = view.get_dof_dampings() + assert dampings.shape == (4, 12) + + def test_get_dof_max_forces_shape(self, view): + """Test DOF max forces shape.""" + max_forces = view.get_dof_max_forces() + assert max_forces.shape == (4, 12) + + def test_get_dof_max_velocities_shape(self, view): + """Test DOF max velocities shape.""" + max_velocities = view.get_dof_max_velocities() + assert max_velocities.shape == (4, 12) + + def test_get_dof_armatures_shape(self, view): + """Test DOF armatures shape.""" + armatures = view.get_dof_armatures() + assert armatures.shape == (4, 12) + + def test_get_dof_friction_coefficients_shape(self, view): + """Test DOF friction coefficients shape.""" + friction = view.get_dof_friction_coefficients() + assert friction.shape == (4, 12) + + +class TestMockArticulationViewMassGetters: + """Tests for MockArticulationView mass property getter methods.""" + + @pytest.fixture + def view(self): + """Create a view with 4 instances, 12 DOFs, 13 links.""" + return MockArticulationView(count=4, num_dofs=12, num_links=13, device="cpu") + + def test_get_masses_shape(self, view): + """Test masses shape.""" + masses = view.get_masses() + assert masses.shape == (4, 13, 1) + + def test_get_coms_shape(self, view): + """Test centers of mass shape.""" + coms = view.get_coms() + assert coms.shape == (4, 13, 7) + + def test_get_inertias_shape(self, view): + """Test inertias shape.""" + inertias = view.get_inertias() + assert inertias.shape == (4, 13, 3, 3) + + +class TestMockArticulationViewSetters: + """Tests for MockArticulationView setter methods.""" + + @pytest.fixture + def view(self): + """Create a view with 4 instances, 12 DOFs, 13 links.""" + return MockArticulationView(count=4, num_dofs=12, num_links=13, device="cpu") + + def test_set_root_transforms(self, view): + """Test setting root transforms.""" + new_data = torch.randn(4, 7) + view.set_root_transforms(new_data) + result = view.get_root_transforms() + assert torch.allclose(result, new_data) + + def test_set_dof_positions(self, view): + """Test setting DOF positions.""" + new_data = torch.randn(4, 12) + view.set_dof_positions(new_data) + result = view.get_dof_positions() + assert torch.allclose(result, new_data) + + def test_set_dof_positions_with_indices(self, view): + """Test setting DOF positions with indices.""" + new_data = torch.randn(2, 12) + indices = torch.tensor([0, 2]) + view.set_dof_positions(new_data, indices=indices) + result = view.get_dof_positions() + assert torch.allclose(result[0], new_data[0]) + assert torch.allclose(result[2], new_data[1]) + + def test_set_dof_velocities(self, view): + """Test setting DOF velocities.""" + new_data = torch.randn(4, 12) + view.set_dof_velocities(new_data) + result = view.get_dof_velocities() + assert torch.allclose(result, new_data) + + def test_set_dof_limits(self, view): + """Test setting DOF limits.""" + new_data = torch.randn(4, 12, 2) + view.set_dof_limits(new_data) + result = view.get_dof_limits() + assert torch.allclose(result, new_data) + + def test_set_dof_stiffnesses(self, view): + """Test setting DOF stiffnesses.""" + new_data = torch.randn(4, 12).abs() + view.set_dof_stiffnesses(new_data) + result = view.get_dof_stiffnesses() + assert torch.allclose(result, new_data) + + +class TestMockArticulationViewNoopSetters: + """Tests for MockArticulationView no-op setter methods.""" + + @pytest.fixture + def view(self): + """Create a view with 4 instances, 12 DOFs.""" + return MockArticulationView(count=4, num_dofs=12, device="cpu") + + def test_set_dof_position_targets_noop(self, view): + """Test that set_dof_position_targets is a no-op.""" + view.set_dof_position_targets(torch.randn(4, 12)) + + def test_set_dof_velocity_targets_noop(self, view): + """Test that set_dof_velocity_targets is a no-op.""" + view.set_dof_velocity_targets(torch.randn(4, 12)) + + def test_set_dof_actuation_forces_noop(self, view): + """Test that set_dof_actuation_forces is a no-op.""" + view.set_dof_actuation_forces(torch.randn(4, 12)) + + +class TestMockArticulationViewMockSetters: + """Tests for MockArticulationView mock setter methods.""" + + @pytest.fixture + def view(self): + """Create a view with 4 instances, 12 DOFs, 13 links.""" + return MockArticulationView(count=4, num_dofs=12, num_links=13, device="cpu") + + def test_set_mock_root_transforms(self, view): + """Test mock root transform setter.""" + mock_data = torch.randn(4, 7) + view.set_mock_root_transforms(mock_data) + result = view.get_root_transforms() + assert torch.allclose(result, mock_data) + + def test_set_mock_link_transforms(self, view): + """Test mock link transform setter.""" + mock_data = torch.randn(4, 13, 7) + view.set_mock_link_transforms(mock_data) + result = view.get_link_transforms() + assert torch.allclose(result, mock_data) + + def test_set_mock_dof_positions(self, view): + """Test mock DOF position setter.""" + mock_data = torch.randn(4, 12) + view.set_mock_dof_positions(mock_data) + result = view.get_dof_positions() + assert torch.allclose(result, mock_data) + + def test_set_mock_dof_velocities(self, view): + """Test mock DOF velocity setter.""" + mock_data = torch.randn(4, 12) + view.set_mock_dof_velocities(mock_data) + result = view.get_dof_velocities() + assert torch.allclose(result, mock_data) + + +class TestMockArticulationViewFactories: + """Tests for articulation view factory functions.""" + + def test_create_mock_articulation_view_basic(self): + """Test basic factory usage.""" + view = create_mock_articulation_view(count=4, num_dofs=12, num_links=13) + assert view.count == 4 + assert view.shared_metatype.dof_count == 12 + assert view.shared_metatype.link_count == 13 + + def test_create_mock_quadruped_view(self): + """Test quadruped factory.""" + view = create_mock_quadruped_view(count=4) + assert view.count == 4 + assert view.shared_metatype.dof_count == 12 + assert view.shared_metatype.link_count == 13 + assert view.shared_metatype.fixed_base is False + assert "FL_hip_joint" in view.shared_metatype.dof_names + assert "base" in view.shared_metatype.link_names + + def test_create_mock_humanoid_view(self): + """Test humanoid factory.""" + view = create_mock_humanoid_view(count=2) + assert view.count == 2 + assert view.shared_metatype.dof_count == 21 + assert view.shared_metatype.link_count == 22 + assert view.shared_metatype.fixed_base is False + assert "left_elbow" in view.shared_metatype.dof_names + assert "pelvis" in view.shared_metatype.link_names diff --git a/source/isaaclab_physx/test/test_mock_interfaces/test_mock_rigid_body_view.py b/source/isaaclab_physx/test/test_mock_interfaces/test_mock_rigid_body_view.py new file mode 100644 index 00000000000..ad49b84bde9 --- /dev/null +++ b/source/isaaclab_physx/test/test_mock_interfaces/test_mock_rigid_body_view.py @@ -0,0 +1,220 @@ +# Copyright (c) 2022-2025, The Isaac Lab Project Developers. +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Tests for MockRigidBodyView.""" + +import pytest +import torch + +# Note: we import the mock interfaces directly from within their own package to avoid routing through the +# isaaclab_physx package. This allows us to test the mock interfaces without requiring Isaac Sim or GPU simulation. +from mock_interfaces.views import MockRigidBodyView +from mock_interfaces.factories import create_mock_rigid_body_view + + +class TestMockRigidBodyViewInit: + """Tests for MockRigidBodyView initialization.""" + + def test_default_init(self): + """Test default initialization.""" + view = MockRigidBodyView() + assert view.count == 1 + assert len(view.prim_paths) == 1 + assert view._backend == "torch" + + def test_custom_count(self): + """Test initialization with custom count.""" + view = MockRigidBodyView(count=4) + assert view.count == 4 + assert len(view.prim_paths) == 4 + + def test_custom_prim_paths(self): + """Test initialization with custom prim paths.""" + paths = ["/World/Body_A", "/World/Body_B"] + view = MockRigidBodyView(count=2, prim_paths=paths) + assert view.prim_paths == paths + + +class TestMockRigidBodyViewGetters: + """Tests for MockRigidBodyView getter methods.""" + + @pytest.fixture + def view(self): + """Create a view with 4 instances.""" + return MockRigidBodyView(count=4, device="cpu") + + def test_get_transforms_shape(self, view): + """Test transforms shape.""" + transforms = view.get_transforms() + assert transforms.shape == (4, 7) + + def test_get_transforms_default_quaternion(self, view): + """Test that default quaternion is identity (xyzw format).""" + transforms = view.get_transforms() + # xyzw format: [x, y, z, w] = [0, 0, 0, 1] for identity + assert torch.allclose(transforms[:, 3:6], torch.zeros(4, 3)) # xyz = 0 + assert torch.allclose(transforms[:, 6], torch.ones(4)) # w = 1 + + def test_get_velocities_shape(self, view): + """Test velocities shape.""" + velocities = view.get_velocities() + assert velocities.shape == (4, 6) + + def test_get_accelerations_shape(self, view): + """Test accelerations shape.""" + accelerations = view.get_accelerations() + assert accelerations.shape == (4, 6) + + def test_get_masses_shape(self, view): + """Test masses shape.""" + masses = view.get_masses() + assert masses.shape == (4, 1, 1) + + def test_get_masses_default_value(self, view): + """Test that default mass is 1.""" + masses = view.get_masses() + assert torch.allclose(masses, torch.ones(4, 1, 1)) + + def test_get_coms_shape(self, view): + """Test centers of mass shape.""" + coms = view.get_coms() + assert coms.shape == (4, 1, 7) + + def test_get_inertias_shape(self, view): + """Test inertias shape.""" + inertias = view.get_inertias() + assert inertias.shape == (4, 1, 3, 3) + + def test_get_inertias_default_value(self, view): + """Test that default inertia is identity.""" + inertias = view.get_inertias() + expected = torch.eye(3).unsqueeze(0).unsqueeze(0).expand(4, 1, 3, 3) + assert torch.allclose(inertias, expected) + + def test_getters_return_clones(self, view): + """Test that getters return clones, not references.""" + transforms1 = view.get_transforms() + transforms1[0, 0] = 999.0 + transforms2 = view.get_transforms() + assert transforms2[0, 0] != 999.0 + + +class TestMockRigidBodyViewSetters: + """Tests for MockRigidBodyView setter methods.""" + + @pytest.fixture + def view(self): + """Create a view with 4 instances.""" + return MockRigidBodyView(count=4, device="cpu") + + def test_set_transforms(self, view): + """Test setting transforms.""" + new_transforms = torch.randn(4, 7) + view.set_transforms(new_transforms) + result = view.get_transforms() + assert torch.allclose(result, new_transforms) + + def test_set_transforms_with_indices(self, view): + """Test setting transforms with indices.""" + new_transforms = torch.randn(2, 7) + indices = torch.tensor([0, 2]) + view.set_transforms(new_transforms, indices=indices) + result = view.get_transforms() + assert torch.allclose(result[0], new_transforms[0]) + assert torch.allclose(result[2], new_transforms[1]) + + def test_set_velocities(self, view): + """Test setting velocities.""" + new_velocities = torch.randn(4, 6) + view.set_velocities(new_velocities) + result = view.get_velocities() + assert torch.allclose(result, new_velocities) + + def test_set_masses(self, view): + """Test setting masses.""" + new_masses = torch.randn(4, 1, 1).abs() + view.set_masses(new_masses) + result = view.get_masses() + assert torch.allclose(result, new_masses) + + +class TestMockRigidBodyViewMockSetters: + """Tests for MockRigidBodyView mock setter methods.""" + + @pytest.fixture + def view(self): + """Create a view with 4 instances.""" + return MockRigidBodyView(count=4, device="cpu") + + def test_set_mock_transforms(self, view): + """Test mock transform setter.""" + mock_data = torch.randn(4, 7) + view.set_mock_transforms(mock_data) + result = view.get_transforms() + assert torch.allclose(result, mock_data) + + def test_set_mock_velocities(self, view): + """Test mock velocity setter.""" + mock_data = torch.randn(4, 6) + view.set_mock_velocities(mock_data) + result = view.get_velocities() + assert torch.allclose(result, mock_data) + + def test_set_mock_accelerations(self, view): + """Test mock acceleration setter.""" + mock_data = torch.randn(4, 6) + view.set_mock_accelerations(mock_data) + result = view.get_accelerations() + assert torch.allclose(result, mock_data) + + def test_set_mock_masses(self, view): + """Test mock mass setter.""" + mock_data = torch.randn(4, 1, 1).abs() + view.set_mock_masses(mock_data) + result = view.get_masses() + assert torch.allclose(result, mock_data) + + def test_set_mock_coms(self, view): + """Test mock center of mass setter.""" + mock_data = torch.randn(4, 1, 7) + view.set_mock_coms(mock_data) + result = view.get_coms() + assert torch.allclose(result, mock_data) + + def test_set_mock_inertias(self, view): + """Test mock inertia setter.""" + mock_data = torch.randn(4, 1, 3, 3) + view.set_mock_inertias(mock_data) + result = view.get_inertias() + assert torch.allclose(result, mock_data) + + +class TestMockRigidBodyViewActions: + """Tests for MockRigidBodyView action methods.""" + + def test_apply_forces_and_torques_at_position_noop(self): + """Test that apply_forces_and_torques_at_position is a no-op.""" + view = MockRigidBodyView(count=4) + # Should not raise + view.apply_forces_and_torques_at_position( + forces=torch.randn(4, 3), + torques=torch.randn(4, 3), + positions=torch.randn(4, 3), + ) + + +class TestMockRigidBodyViewFactory: + """Tests for create_mock_rigid_body_view factory function.""" + + def test_factory_basic(self): + """Test basic factory usage.""" + view = create_mock_rigid_body_view(count=4) + assert view.count == 4 + + def test_factory_with_prim_paths(self): + """Test factory with custom prim paths.""" + paths = ["/World/A", "/World/B"] + view = create_mock_rigid_body_view(count=2, prim_paths=paths) + assert view.prim_paths == paths diff --git a/source/isaaclab_physx/test/test_mock_interfaces/test_mock_rigid_contact_view.py b/source/isaaclab_physx/test/test_mock_interfaces/test_mock_rigid_contact_view.py new file mode 100644 index 00000000000..08c646a569e --- /dev/null +++ b/source/isaaclab_physx/test/test_mock_interfaces/test_mock_rigid_contact_view.py @@ -0,0 +1,201 @@ +# Copyright (c) 2022-2025, The Isaac Lab Project Developers. +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Tests for MockRigidContactView.""" + +import pytest +import torch + +# Note: we import the mock interfaces directly from within their own package to avoid routing through the +# isaaclab_physx package. This allows us to test the mock interfaces without requiring Isaac Sim or GPU simulation. +from mock_interfaces.views import MockRigidContactView +from mock_interfaces.factories import create_mock_rigid_contact_view + + +class TestMockRigidContactViewInit: + """Tests for MockRigidContactView initialization.""" + + def test_default_init(self): + """Test default initialization.""" + view = MockRigidContactView() + assert view.count == 1 + assert view.num_bodies == 1 + assert view.filter_count == 0 + + def test_custom_configuration(self): + """Test initialization with custom configuration.""" + view = MockRigidContactView( + count=4, + num_bodies=5, + filter_count=3, + ) + assert view.count == 4 + assert view.num_bodies == 5 + assert view.filter_count == 3 + + +class TestMockRigidContactViewGetters: + """Tests for MockRigidContactView getter methods.""" + + @pytest.fixture + def view(self): + """Create a view with 4 instances, 5 bodies, 3 filters.""" + return MockRigidContactView(count=4, num_bodies=5, filter_count=3, device="cpu") + + def test_get_net_contact_forces_shape(self, view): + """Test net contact forces shape.""" + forces = view.get_net_contact_forces(dt=0.01) + # Shape: (N*B, 3) = (4*5, 3) = (20, 3) + assert forces.shape == (20, 3) + + def test_get_contact_force_matrix_shape(self, view): + """Test contact force matrix shape.""" + matrix = view.get_contact_force_matrix(dt=0.01) + # Shape: (N*B, F, 3) = (4*5, 3, 3) = (20, 3, 3) + assert matrix.shape == (20, 3, 3) + + def test_get_contact_data_shapes(self, view): + """Test contact data tuple shapes.""" + data = view.get_contact_data(dt=0.01) + assert len(data) == 6 + positions, normals, impulses, separations, num_found, patch_id = data + + total_bodies = 4 * 5 # count * num_bodies + max_contacts = 16 # default + + assert positions.shape == (total_bodies, max_contacts, 3) + assert normals.shape == (total_bodies, max_contacts, 3) + assert impulses.shape == (total_bodies, max_contacts, 3) + assert separations.shape == (total_bodies, max_contacts) + assert num_found.shape == (total_bodies,) + assert patch_id.shape == (total_bodies, max_contacts) + + def test_get_friction_data_shapes(self, view): + """Test friction data tuple shapes.""" + data = view.get_friction_data(dt=0.01) + assert len(data) == 4 + forces, impulses, points, patch_id = data + + total_bodies = 4 * 5 + max_contacts = 16 + + assert forces.shape == (total_bodies, max_contacts, 3) + assert impulses.shape == (total_bodies, max_contacts, 3) + assert points.shape == (total_bodies, max_contacts, 3) + assert patch_id.shape == (total_bodies, max_contacts) + + def test_getters_return_clones(self, view): + """Test that getters return clones, not references.""" + forces1 = view.get_net_contact_forces(0.01) + forces1[0, 0] = 999.0 + forces2 = view.get_net_contact_forces(0.01) + assert forces2[0, 0] != 999.0 + + +class TestMockRigidContactViewMockSetters: + """Tests for MockRigidContactView mock setter methods.""" + + @pytest.fixture + def view(self): + """Create a view with 4 instances, 5 bodies, 3 filters.""" + return MockRigidContactView(count=4, num_bodies=5, filter_count=3, device="cpu") + + def test_set_mock_net_contact_forces(self, view): + """Test mock net contact forces setter.""" + mock_data = torch.randn(20, 3) + view.set_mock_net_contact_forces(mock_data) + result = view.get_net_contact_forces(0.01) + assert torch.allclose(result, mock_data) + + def test_set_mock_contact_force_matrix(self, view): + """Test mock contact force matrix setter.""" + mock_data = torch.randn(20, 3, 3) + view.set_mock_contact_force_matrix(mock_data) + result = view.get_contact_force_matrix(0.01) + assert torch.allclose(result, mock_data) + + def test_set_mock_contact_data_partial(self, view): + """Test setting partial contact data.""" + mock_positions = torch.randn(20, 16, 3) + mock_normals = torch.randn(20, 16, 3) + view.set_mock_contact_data(positions=mock_positions, normals=mock_normals) + + positions, normals, _, _, _, _ = view.get_contact_data(0.01) + assert torch.allclose(positions, mock_positions) + assert torch.allclose(normals, mock_normals) + + def test_set_mock_contact_data_full(self, view): + """Test setting full contact data.""" + total_bodies = 20 + max_contacts = 16 + + mock_positions = torch.randn(total_bodies, max_contacts, 3) + mock_normals = torch.randn(total_bodies, max_contacts, 3) + mock_impulses = torch.randn(total_bodies, max_contacts, 3) + mock_separations = torch.randn(total_bodies, max_contacts) + mock_num_found = torch.randint(0, max_contacts, (total_bodies,), dtype=torch.int32) + mock_patch_id = torch.randint(0, 10, (total_bodies, max_contacts), dtype=torch.int32) + + view.set_mock_contact_data( + positions=mock_positions, + normals=mock_normals, + impulses=mock_impulses, + separations=mock_separations, + num_found=mock_num_found, + patch_id=mock_patch_id, + ) + + positions, normals, impulses, separations, num_found, patch_id = view.get_contact_data(0.01) + + assert torch.allclose(positions, mock_positions) + assert torch.allclose(normals, mock_normals) + assert torch.allclose(impulses, mock_impulses) + assert torch.allclose(separations, mock_separations) + assert torch.equal(num_found, mock_num_found) + assert torch.equal(patch_id, mock_patch_id) + + def test_set_mock_friction_data(self, view): + """Test setting friction data.""" + total_bodies = 20 + max_contacts = 16 + + mock_forces = torch.randn(total_bodies, max_contacts, 3) + mock_impulses = torch.randn(total_bodies, max_contacts, 3) + + view.set_mock_friction_data(forces=mock_forces, impulses=mock_impulses) + + forces, impulses, _, _ = view.get_friction_data(0.01) + assert torch.allclose(forces, mock_forces) + assert torch.allclose(impulses, mock_impulses) + + +class TestMockRigidContactViewFactory: + """Tests for create_mock_rigid_contact_view factory function.""" + + def test_factory_basic(self): + """Test basic factory usage.""" + view = create_mock_rigid_contact_view(count=4, num_bodies=5, filter_count=3) + assert view.count == 4 + assert view.num_bodies == 5 + assert view.filter_count == 3 + + def test_factory_custom_max_contacts(self): + """Test factory with custom max contact data count.""" + view = create_mock_rigid_contact_view( + count=2, num_bodies=3, max_contact_data_count=32 + ) + _, _, _, separations, _, _ = view.get_contact_data(0.01) + assert separations.shape[1] == 32 + + +class TestMockRigidContactViewZeroFilterCount: + """Tests for MockRigidContactView with zero filter count.""" + + def test_contact_force_matrix_zero_filters(self): + """Test contact force matrix with zero filters.""" + view = MockRigidContactView(count=4, num_bodies=5, filter_count=0) + matrix = view.get_contact_force_matrix(0.01) + # Shape: (N*B, F, 3) = (20, 0, 3) + assert matrix.shape == (20, 0, 3) diff --git a/source/isaaclab_physx/test/test_mock_interfaces/test_patching.py b/source/isaaclab_physx/test/test_mock_interfaces/test_patching.py new file mode 100644 index 00000000000..29248ce4c13 --- /dev/null +++ b/source/isaaclab_physx/test/test_mock_interfaces/test_patching.py @@ -0,0 +1,189 @@ +# Copyright (c) 2022-2025, The Isaac Lab Project Developers. +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Tests for patching utilities.""" + +import torch + +# Note: we import the mock interfaces directly from within their own package to avoid routing through the +# isaaclab_physx package. This allows us to test the mock interfaces without requiring Isaac Sim or GPU simulation. +from mock_interfaces.views import ( + MockArticulationView, + MockRigidBodyView, + MockRigidContactView, +) +from mock_interfaces.utils import ( + mock_articulation_view, + mock_rigid_body_view, + mock_rigid_contact_view, + patch_articulation_view, + patch_rigid_body_view, + patch_rigid_contact_view, +) + + +class TestPatchRigidBodyView: + """Tests for patch_rigid_body_view context manager.""" + + def test_basic_patching(self): + """Test basic patching functionality. + + Note: Patching works when the target module imports the class dynamically. + In tests where we import at module level, we need to import inside the + context to get the patched version. + """ + target = "mock_interfaces.views.mock_rigid_body_view.MockRigidBodyView" + with patch_rigid_body_view(target, count=4): + # Import inside context to get patched version + from mock_interfaces.views import mock_rigid_body_view + + view = mock_rigid_body_view.MockRigidBodyView() + assert view.count == 4 + + def test_patching_preserves_configuration(self): + """Test that patching preserves configuration.""" + target = "mock_interfaces.views.mock_rigid_body_view.MockRigidBodyView" + prim_paths = ["/World/A", "/World/B", "/World/C", "/World/D"] + with patch_rigid_body_view(target, count=4, prim_paths=prim_paths): + from mock_interfaces.views import mock_rigid_body_view + + view = mock_rigid_body_view.MockRigidBodyView() + assert view.prim_paths == prim_paths + + +class TestPatchArticulationView: + """Tests for patch_articulation_view context manager.""" + + def test_basic_patching(self): + """Test basic patching functionality.""" + target = "mock_interfaces.views.mock_articulation_view.MockArticulationView" + with patch_articulation_view(target, count=4, num_dofs=12, num_links=13): + from mock_interfaces.views import mock_articulation_view + + view = mock_articulation_view.MockArticulationView() + assert view.count == 4 + assert view.shared_metatype.dof_count == 12 + assert view.shared_metatype.link_count == 13 + + def test_patching_with_names(self): + """Test patching with custom names.""" + target = "mock_interfaces.views.mock_articulation_view.MockArticulationView" + dof_names = ["joint_a", "joint_b"] + with patch_articulation_view(target, num_dofs=2, dof_names=dof_names): + from mock_interfaces.views import mock_articulation_view + + view = mock_articulation_view.MockArticulationView() + assert view.shared_metatype.dof_names == dof_names + + +class TestPatchRigidContactView: + """Tests for patch_rigid_contact_view context manager.""" + + def test_basic_patching(self): + """Test basic patching functionality.""" + target = "mock_interfaces.views.mock_rigid_contact_view.MockRigidContactView" + with patch_rigid_contact_view(target, count=4, num_bodies=5, filter_count=3): + from mock_interfaces.views import mock_rigid_contact_view + + view = mock_rigid_contact_view.MockRigidContactView() + assert view.count == 4 + assert view.num_bodies == 5 + assert view.filter_count == 3 + + +class TestDecoratorUtilities: + """Tests for decorator utilities. + + Note: These decorators are designed for wrapping functions, not pytest tests. + Pytest inspects function signatures for fixtures, which conflicts with our + decorator pattern. We test them by manually invoking the decorated functions. + """ + + def test_mock_rigid_body_view_decorator(self): + """Test mock_rigid_body_view decorator injects mock view.""" + + @mock_rigid_body_view(count=4) + def my_function(view): + return view.count, view.get_transforms().shape + + count, shape = my_function() + assert count == 4 + assert shape == (4, 7) + + def test_mock_articulation_view_decorator(self): + """Test mock_articulation_view decorator injects mock view.""" + + @mock_articulation_view(count=4, num_dofs=12, num_links=13) + def my_function(view): + return ( + view.count, + view.shared_metatype.dof_count, + view.get_dof_positions().shape, + ) + + count, dof_count, shape = my_function() + assert count == 4 + assert dof_count == 12 + assert shape == (4, 12) + + def test_mock_rigid_contact_view_decorator(self): + """Test mock_rigid_contact_view decorator injects mock view.""" + + @mock_rigid_contact_view(count=4, num_bodies=5, filter_count=3) + def my_function(view): + return ( + view.count, + view.num_bodies, + view.get_net_contact_forces(0.01).shape, + ) + + count, num_bodies, shape = my_function() + assert count == 4 + assert num_bodies == 5 + assert shape == (20, 3) # 4 * 5 = 20 + + def test_decorator_with_prim_paths(self): + """Test decorator with custom prim paths.""" + + @mock_rigid_body_view(count=2, prim_paths=["/World/A", "/World/B"]) + def my_function(view): + return view.prim_paths + + paths = my_function() + assert paths == ["/World/A", "/World/B"] + + def test_decorator_with_fixed_base(self): + """Test decorator with fixed base.""" + + @mock_articulation_view(count=2, num_dofs=6, fixed_base=True) + def my_function(view): + return view.shared_metatype.fixed_base + + fixed_base = my_function() + assert fixed_base is True + + +class TestDecoratorFunctionPreservation: + """Tests that decorators preserve function metadata.""" + + def test_function_name_preserved(self): + """Test that function name is preserved.""" + + @mock_rigid_body_view(count=4) + def my_documented_function(view): + """This is a documented function.""" + pass + + assert my_documented_function.__name__ == "my_documented_function" + + def test_docstring_preserved(self): + """Test that docstring is preserved.""" + + @mock_rigid_body_view(count=4) + def my_documented_function(view): + """This is a documented function.""" + pass + + assert "documented function" in my_documented_function.__doc__ From 07708fb484a352c7034f71fedd6366e83fed53ec Mon Sep 17 00:00:00 2001 From: Antoine Richard Date: Thu, 29 Jan 2026 16:04:37 +0100 Subject: [PATCH 24/38] cleaning up --- .../isaaclab_physx/isaaclab_physx/__init__.py | 3 - .../assets/articulation/articulation.py | 2 +- .../test/test_mock_interfaces/conftest.py | 62 ------------------- .../test_mock_articulation_view.py | 6 +- .../test_mock_rigid_body_view.py | 6 +- .../test_mock_rigid_contact_view.py | 6 +- .../test_mock_interfaces/test_patching.py | 26 ++++---- 7 files changed, 19 insertions(+), 92 deletions(-) delete mode 100644 source/isaaclab_physx/test/test_mock_interfaces/conftest.py diff --git a/source/isaaclab_physx/isaaclab_physx/__init__.py b/source/isaaclab_physx/isaaclab_physx/__init__.py index 69bbf740a59..68a8a553287 100644 --- a/source/isaaclab_physx/isaaclab_physx/__init__.py +++ b/source/isaaclab_physx/isaaclab_physx/__init__.py @@ -17,6 +17,3 @@ # Configure the module-level variables __version__ = ISAACLAB_PHYSX_METADATA["package"]["version"] - -# Import sensors module for auto-registration with factory -from . import sensors # noqa: F401, E402 diff --git a/source/isaaclab_physx/isaaclab_physx/assets/articulation/articulation.py b/source/isaaclab_physx/isaaclab_physx/assets/articulation/articulation.py index b72cebcd9e9..ef4cbc38aa2 100644 --- a/source/isaaclab_physx/isaaclab_physx/assets/articulation/articulation.py +++ b/source/isaaclab_physx/isaaclab_physx/assets/articulation/articulation.py @@ -166,7 +166,7 @@ def body_names(self) -> list[str]: return self.root_view.shared_metatype.link_names @property - def root_view(self): + def root_view(self) -> physx.ArticulationView: """Root view for the asset. .. note:: diff --git a/source/isaaclab_physx/test/test_mock_interfaces/conftest.py b/source/isaaclab_physx/test/test_mock_interfaces/conftest.py deleted file mode 100644 index 056274d4f36..00000000000 --- a/source/isaaclab_physx/test/test_mock_interfaces/conftest.py +++ /dev/null @@ -1,62 +0,0 @@ -# Copyright (c) 2022-2025, The Isaac Lab Project Developers. -# All rights reserved. -# -# SPDX-License-Identifier: BSD-3-Clause - -"""Pytest configuration for mock interfaces tests. - -This conftest sets up imports to allow testing the mock interfaces without -requiring Isaac Sim or the full isaaclab_physx package. - -The mock interfaces are designed to be importable without Isaac Sim, so we -pre-load them into sys.modules to avoid triggering the full isaaclab_physx -package initialization. -""" - -import sys -from pathlib import Path - -# Get the path to the mock_interfaces module -_TEST_DIR = Path(__file__).parent -_MOCK_INTERFACES_DIR = _TEST_DIR.parent.parent / "isaaclab_physx" / "test" / "mock_interfaces" - -# Add paths for direct imports -if str(_MOCK_INTERFACES_DIR) not in sys.path: - sys.path.insert(0, str(_MOCK_INTERFACES_DIR)) -if str(_MOCK_INTERFACES_DIR.parent) not in sys.path: - sys.path.insert(0, str(_MOCK_INTERFACES_DIR.parent)) - -# Pre-load mock_interfaces modules to prevent isaaclab_physx from being imported -# This allows tests to use `from isaaclab_physx.test.mock_interfaces import ...` -# without triggering the full package (which requires Isaac Sim) -import mock_interfaces -import mock_interfaces.views -import mock_interfaces.utils -import mock_interfaces.factories -import mock_interfaces.views.mock_rigid_body_view -import mock_interfaces.views.mock_articulation_view -import mock_interfaces.views.mock_rigid_contact_view -import mock_interfaces.utils.mock_shared_metatype -import mock_interfaces.utils.patching - -# Create fake module entries in sys.modules for the isaaclab_physx path -# This makes `from isaaclab_physx.test.mock_interfaces import ...` work -sys.modules["isaaclab_physx"] = type(sys)("isaaclab_physx") -sys.modules["isaaclab_physx.test"] = type(sys)("isaaclab_physx.test") -sys.modules["isaaclab_physx.test.mock_interfaces"] = mock_interfaces -sys.modules["isaaclab_physx.test.mock_interfaces.views"] = mock_interfaces.views -sys.modules["isaaclab_physx.test.mock_interfaces.utils"] = mock_interfaces.utils -sys.modules["isaaclab_physx.test.mock_interfaces.factories"] = mock_interfaces.factories -sys.modules["isaaclab_physx.test.mock_interfaces.views.mock_rigid_body_view"] = ( - mock_interfaces.views.mock_rigid_body_view -) -sys.modules["isaaclab_physx.test.mock_interfaces.views.mock_articulation_view"] = ( - mock_interfaces.views.mock_articulation_view -) -sys.modules["isaaclab_physx.test.mock_interfaces.views.mock_rigid_contact_view"] = ( - mock_interfaces.views.mock_rigid_contact_view -) -sys.modules["isaaclab_physx.test.mock_interfaces.utils.mock_shared_metatype"] = ( - mock_interfaces.utils.mock_shared_metatype -) -sys.modules["isaaclab_physx.test.mock_interfaces.utils.patching"] = mock_interfaces.utils.patching diff --git a/source/isaaclab_physx/test/test_mock_interfaces/test_mock_articulation_view.py b/source/isaaclab_physx/test/test_mock_interfaces/test_mock_articulation_view.py index cb65e28bcac..c852133c279 100644 --- a/source/isaaclab_physx/test/test_mock_interfaces/test_mock_articulation_view.py +++ b/source/isaaclab_physx/test/test_mock_interfaces/test_mock_articulation_view.py @@ -8,10 +8,8 @@ import pytest import torch -# Note: we import the mock interfaces directly from within their own package to avoid routing through the -# isaaclab_physx package. This allows us to test the mock interfaces without requiring Isaac Sim or GPU simulation. -from mock_interfaces.views import MockArticulationView -from mock_interfaces.factories import ( +from isaaclab_physx.test.mock_interfaces.views import MockArticulationView +from isaaclab_physx.test.mock_interfaces.factories import ( create_mock_articulation_view, create_mock_humanoid_view, create_mock_quadruped_view, diff --git a/source/isaaclab_physx/test/test_mock_interfaces/test_mock_rigid_body_view.py b/source/isaaclab_physx/test/test_mock_interfaces/test_mock_rigid_body_view.py index ad49b84bde9..bf2f3e2e272 100644 --- a/source/isaaclab_physx/test/test_mock_interfaces/test_mock_rigid_body_view.py +++ b/source/isaaclab_physx/test/test_mock_interfaces/test_mock_rigid_body_view.py @@ -8,10 +8,8 @@ import pytest import torch -# Note: we import the mock interfaces directly from within their own package to avoid routing through the -# isaaclab_physx package. This allows us to test the mock interfaces without requiring Isaac Sim or GPU simulation. -from mock_interfaces.views import MockRigidBodyView -from mock_interfaces.factories import create_mock_rigid_body_view +from isaaclab_physx.test.mock_interfaces.views import MockRigidBodyView +from isaaclab_physx.test.mock_interfaces.factories import create_mock_rigid_body_view class TestMockRigidBodyViewInit: diff --git a/source/isaaclab_physx/test/test_mock_interfaces/test_mock_rigid_contact_view.py b/source/isaaclab_physx/test/test_mock_interfaces/test_mock_rigid_contact_view.py index 08c646a569e..91c9dee6d03 100644 --- a/source/isaaclab_physx/test/test_mock_interfaces/test_mock_rigid_contact_view.py +++ b/source/isaaclab_physx/test/test_mock_interfaces/test_mock_rigid_contact_view.py @@ -8,10 +8,8 @@ import pytest import torch -# Note: we import the mock interfaces directly from within their own package to avoid routing through the -# isaaclab_physx package. This allows us to test the mock interfaces without requiring Isaac Sim or GPU simulation. -from mock_interfaces.views import MockRigidContactView -from mock_interfaces.factories import create_mock_rigid_contact_view +from isaaclab_physx.test.mock_interfaces.views import MockRigidContactView +from isaaclab_physx.test.mock_interfaces.factories import create_mock_rigid_contact_view class TestMockRigidContactViewInit: diff --git a/source/isaaclab_physx/test/test_mock_interfaces/test_patching.py b/source/isaaclab_physx/test/test_mock_interfaces/test_patching.py index 29248ce4c13..37680cb9f07 100644 --- a/source/isaaclab_physx/test/test_mock_interfaces/test_patching.py +++ b/source/isaaclab_physx/test/test_mock_interfaces/test_patching.py @@ -7,14 +7,12 @@ import torch -# Note: we import the mock interfaces directly from within their own package to avoid routing through the -# isaaclab_physx package. This allows us to test the mock interfaces without requiring Isaac Sim or GPU simulation. -from mock_interfaces.views import ( +from isaaclab_physx.test.mock_interfaces.views import ( MockArticulationView, MockRigidBodyView, MockRigidContactView, ) -from mock_interfaces.utils import ( +from isaaclab_physx.test.mock_interfaces.utils import ( mock_articulation_view, mock_rigid_body_view, mock_rigid_contact_view, @@ -34,20 +32,20 @@ def test_basic_patching(self): In tests where we import at module level, we need to import inside the context to get the patched version. """ - target = "mock_interfaces.views.mock_rigid_body_view.MockRigidBodyView" + target = "isaaclab_physx.test.mock_interfaces.views.mock_rigid_body_view.MockRigidBodyView" with patch_rigid_body_view(target, count=4): # Import inside context to get patched version - from mock_interfaces.views import mock_rigid_body_view + from isaaclab_physx.test.mock_interfaces.views import mock_rigid_body_view view = mock_rigid_body_view.MockRigidBodyView() assert view.count == 4 def test_patching_preserves_configuration(self): """Test that patching preserves configuration.""" - target = "mock_interfaces.views.mock_rigid_body_view.MockRigidBodyView" + target = "isaaclab_physx.test.mock_interfaces.views.mock_rigid_body_view.MockRigidBodyView" prim_paths = ["/World/A", "/World/B", "/World/C", "/World/D"] with patch_rigid_body_view(target, count=4, prim_paths=prim_paths): - from mock_interfaces.views import mock_rigid_body_view + from isaaclab_physx.test.mock_interfaces.views import mock_rigid_body_view view = mock_rigid_body_view.MockRigidBodyView() assert view.prim_paths == prim_paths @@ -58,9 +56,9 @@ class TestPatchArticulationView: def test_basic_patching(self): """Test basic patching functionality.""" - target = "mock_interfaces.views.mock_articulation_view.MockArticulationView" + target = "isaaclab_physx.test.mock_interfaces.views.mock_articulation_view.MockArticulationView" with patch_articulation_view(target, count=4, num_dofs=12, num_links=13): - from mock_interfaces.views import mock_articulation_view + from isaaclab_physx.test.mock_interfaces.views import mock_articulation_view view = mock_articulation_view.MockArticulationView() assert view.count == 4 @@ -69,10 +67,10 @@ def test_basic_patching(self): def test_patching_with_names(self): """Test patching with custom names.""" - target = "mock_interfaces.views.mock_articulation_view.MockArticulationView" + target = "isaaclab_physx.test.mock_interfaces.views.mock_articulation_view.MockArticulationView" dof_names = ["joint_a", "joint_b"] with patch_articulation_view(target, num_dofs=2, dof_names=dof_names): - from mock_interfaces.views import mock_articulation_view + from isaaclab_physx.test.mock_interfaces.views import mock_articulation_view view = mock_articulation_view.MockArticulationView() assert view.shared_metatype.dof_names == dof_names @@ -83,9 +81,9 @@ class TestPatchRigidContactView: def test_basic_patching(self): """Test basic patching functionality.""" - target = "mock_interfaces.views.mock_rigid_contact_view.MockRigidContactView" + target = "isaaclab_physx.test.mock_interfaces.views.mock_rigid_contact_view.MockRigidContactView" with patch_rigid_contact_view(target, count=4, num_bodies=5, filter_count=3): - from mock_interfaces.views import mock_rigid_contact_view + from isaaclab_physx.test.mock_interfaces.views import mock_rigid_contact_view view = mock_rigid_contact_view.MockRigidContactView() assert view.count == 4 From e7af2cea1e8f37345349393b6ca4823f9b718f55 Mon Sep 17 00:00:00 2001 From: Antoine Richard Date: Thu, 29 Jan 2026 16:06:56 +0100 Subject: [PATCH 25/38] pre-commits --- .../isaaclab/test/mock_interfaces/__init__.py | 5 ++ .../test/mock_interfaces/assets/__init__.py | 5 ++ .../test/mock_interfaces/assets/factories.py | 52 +++++++++++++---- .../assets/mock_articulation.py | 24 ++++---- .../assets/mock_rigid_object.py | 12 ++-- .../assets/mock_rigid_object_collection.py | 8 ++- .../test/mock_interfaces/sensors/__init__.py | 5 ++ .../test/mock_interfaces/sensors/factories.py | 9 ++- .../sensors/mock_contact_sensor.py | 32 +++++----- .../sensors/mock_frame_transformer.py | 16 ++--- .../test/mock_interfaces/sensors/mock_imu.py | 9 ++- .../test/mock_interfaces/utils/__init__.py | 5 ++ .../mock_interfaces/utils/mock_generator.py | 58 ++++++++++--------- .../test/mock_interfaces/utils/patching.py | 20 +++---- .../test_mock_interfaces/test_mock_assets.py | 10 +++- .../test_mock_data_properties.py | 16 +++-- .../test_mock_interfaces/test_mock_sensors.py | 13 +++-- .../isaaclab_physx/test/__init__.py | 5 ++ .../test/mock_interfaces/__init__.py | 5 ++ .../test/mock_interfaces/factories.py | 5 ++ .../test/mock_interfaces/utils/__init__.py | 5 ++ .../utils/mock_shared_metatype.py | 5 ++ .../test/mock_interfaces/utils/patching.py | 8 ++- .../test/mock_interfaces/views/__init__.py | 5 ++ .../views/mock_articulation_view.py | 49 +++++----------- .../views/mock_rigid_body_view.py | 5 ++ .../views/mock_rigid_contact_view.py | 45 +++++--------- .../test/test_mock_interfaces/__init__.py | 5 ++ .../test_mock_articulation_view.py | 8 ++- .../test_mock_rigid_body_view.py | 8 ++- .../test_mock_rigid_contact_view.py | 12 ++-- .../test_mock_interfaces/test_patching.py | 12 ++-- 32 files changed, 288 insertions(+), 193 deletions(-) diff --git a/source/isaaclab/isaaclab/test/mock_interfaces/__init__.py b/source/isaaclab/isaaclab/test/mock_interfaces/__init__.py index 91a45c39692..271765d769d 100644 --- a/source/isaaclab/isaaclab/test/mock_interfaces/__init__.py +++ b/source/isaaclab/isaaclab/test/mock_interfaces/__init__.py @@ -1,3 +1,8 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + # Copyright (c) 2022-2025, The Isaac Lab Project Developers. # All rights reserved. # diff --git a/source/isaaclab/isaaclab/test/mock_interfaces/assets/__init__.py b/source/isaaclab/isaaclab/test/mock_interfaces/assets/__init__.py index 42a1b2f7705..2dce1f8450a 100644 --- a/source/isaaclab/isaaclab/test/mock_interfaces/assets/__init__.py +++ b/source/isaaclab/isaaclab/test/mock_interfaces/assets/__init__.py @@ -1,3 +1,8 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + # Copyright (c) 2022-2025, The Isaac Lab Project Developers. # All rights reserved. # diff --git a/source/isaaclab/isaaclab/test/mock_interfaces/assets/factories.py b/source/isaaclab/isaaclab/test/mock_interfaces/assets/factories.py index a04cf7b699f..697a8706eba 100644 --- a/source/isaaclab/isaaclab/test/mock_interfaces/assets/factories.py +++ b/source/isaaclab/isaaclab/test/mock_interfaces/assets/factories.py @@ -1,3 +1,8 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + # Copyright (c) 2022-2025, The Isaac Lab Project Developers. # All rights reserved. # @@ -82,7 +87,7 @@ def create_mock_quadruped( # Set reasonable default joint limits for a quadruped joint_pos_limits = torch.zeros(num_instances, 12, 2, device=device) joint_pos_limits[..., 0] = -1.57 # Lower limit - joint_pos_limits[..., 1] = 1.57 # Upper limit + joint_pos_limits[..., 1] = 1.57 # Upper limit robot.data.set_joint_pos_limits(joint_pos_limits) return robot @@ -106,23 +111,48 @@ def create_mock_humanoid( # Simplified humanoid joint structure joint_names = [ # Torso - "torso_yaw", "torso_pitch", "torso_roll", + "torso_yaw", + "torso_pitch", + "torso_roll", # Left arm - "L_shoulder_pitch", "L_shoulder_roll", "L_shoulder_yaw", "L_elbow", + "L_shoulder_pitch", + "L_shoulder_roll", + "L_shoulder_yaw", + "L_elbow", # Right arm - "R_shoulder_pitch", "R_shoulder_roll", "R_shoulder_yaw", "R_elbow", + "R_shoulder_pitch", + "R_shoulder_roll", + "R_shoulder_yaw", + "R_elbow", # Left leg - "L_hip_yaw", "L_hip_roll", "L_hip_pitch", "L_knee", "L_ankle_pitch", + "L_hip_yaw", + "L_hip_roll", + "L_hip_pitch", + "L_knee", + "L_ankle_pitch", # Right leg - "R_hip_yaw", "R_hip_roll", "R_hip_pitch", "R_knee", "R_ankle_pitch", + "R_hip_yaw", + "R_hip_roll", + "R_hip_pitch", + "R_knee", + "R_ankle_pitch", ] body_names = [ - "pelvis", "torso", - "L_upper_arm", "L_lower_arm", "L_hand", - "R_upper_arm", "R_lower_arm", "R_hand", - "L_thigh", "L_shin", "L_foot", - "R_thigh", "R_shin", "R_foot", + "pelvis", + "torso", + "L_upper_arm", + "L_lower_arm", + "L_hand", + "R_upper_arm", + "R_lower_arm", + "R_hand", + "L_thigh", + "L_shin", + "L_foot", + "R_thigh", + "R_shin", + "R_foot", "head", ] diff --git a/source/isaaclab/isaaclab/test/mock_interfaces/assets/mock_articulation.py b/source/isaaclab/isaaclab/test/mock_interfaces/assets/mock_articulation.py index bb45770019c..fc9a8619698 100644 --- a/source/isaaclab/isaaclab/test/mock_interfaces/assets/mock_articulation.py +++ b/source/isaaclab/isaaclab/test/mock_interfaces/assets/mock_articulation.py @@ -1,3 +1,8 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + # Copyright (c) 2022-2025, The Isaac Lab Project Developers. # All rights reserved. # @@ -8,8 +13,9 @@ from __future__ import annotations import re +from collections.abc import Sequence + import torch -from typing import Sequence class MockArticulationData: @@ -1143,9 +1149,7 @@ def update(self, dt: float) -> None: # -- Finder methods -- - def find_bodies( - self, name_keys: str | Sequence[str], preserve_order: bool = False - ) -> tuple[list[int], list[str]]: + def find_bodies(self, name_keys: str | Sequence[str], preserve_order: bool = False) -> tuple[list[int], list[str]]: """Find bodies by name regex patterns.""" return self._find_by_regex(self._body_names, name_keys, preserve_order) @@ -1457,9 +1461,7 @@ def set_joint_position_target( if joint_ids is None: joint_ids = slice(None) if self._data._joint_pos_target is None: - self._data._joint_pos_target = torch.zeros( - self._num_instances, self._num_joints, device=self._device - ) + self._data._joint_pos_target = torch.zeros(self._num_instances, self._num_joints, device=self._device) self._data._joint_pos_target[env_ids, joint_ids] = target.to(self._device) def set_joint_velocity_target( @@ -1474,9 +1476,7 @@ def set_joint_velocity_target( if joint_ids is None: joint_ids = slice(None) if self._data._joint_vel_target is None: - self._data._joint_vel_target = torch.zeros( - self._num_instances, self._num_joints, device=self._device - ) + self._data._joint_vel_target = torch.zeros(self._num_instances, self._num_joints, device=self._device) self._data._joint_vel_target[env_ids, joint_ids] = target.to(self._device) def set_joint_effort_target( @@ -1491,9 +1491,7 @@ def set_joint_effort_target( if joint_ids is None: joint_ids = slice(None) if self._data._joint_effort_target is None: - self._data._joint_effort_target = torch.zeros( - self._num_instances, self._num_joints, device=self._device - ) + self._data._joint_effort_target = torch.zeros(self._num_instances, self._num_joints, device=self._device) self._data._joint_effort_target[env_ids, joint_ids] = target.to(self._device) # -- Tendon methods (fixed) -- diff --git a/source/isaaclab/isaaclab/test/mock_interfaces/assets/mock_rigid_object.py b/source/isaaclab/isaaclab/test/mock_interfaces/assets/mock_rigid_object.py index 7c901996a33..ec6d6ea45c3 100644 --- a/source/isaaclab/isaaclab/test/mock_interfaces/assets/mock_rigid_object.py +++ b/source/isaaclab/isaaclab/test/mock_interfaces/assets/mock_rigid_object.py @@ -1,3 +1,8 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + # Copyright (c) 2022-2025, The Isaac Lab Project Developers. # All rights reserved. # @@ -8,8 +13,9 @@ from __future__ import annotations import re +from collections.abc import Sequence + import torch -from typing import Sequence class MockRigidObjectData: @@ -616,9 +622,7 @@ def update(self, dt: float) -> None: # -- Finder methods -- - def find_bodies( - self, name_keys: str | Sequence[str], preserve_order: bool = False - ) -> tuple[list[int], list[str]]: + def find_bodies(self, name_keys: str | Sequence[str], preserve_order: bool = False) -> tuple[list[int], list[str]]: """Find bodies by name regex patterns.""" if isinstance(name_keys, str): name_keys = [name_keys] diff --git a/source/isaaclab/isaaclab/test/mock_interfaces/assets/mock_rigid_object_collection.py b/source/isaaclab/isaaclab/test/mock_interfaces/assets/mock_rigid_object_collection.py index eaf636b50cb..5ccac7951f1 100644 --- a/source/isaaclab/isaaclab/test/mock_interfaces/assets/mock_rigid_object_collection.py +++ b/source/isaaclab/isaaclab/test/mock_interfaces/assets/mock_rigid_object_collection.py @@ -1,3 +1,8 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + # Copyright (c) 2022-2025, The Isaac Lab Project Developers. # All rights reserved. # @@ -8,8 +13,9 @@ from __future__ import annotations import re +from collections.abc import Sequence + import torch -from typing import Sequence class MockRigidObjectCollectionData: diff --git a/source/isaaclab/isaaclab/test/mock_interfaces/sensors/__init__.py b/source/isaaclab/isaaclab/test/mock_interfaces/sensors/__init__.py index dadef613a91..f99b9347f2d 100644 --- a/source/isaaclab/isaaclab/test/mock_interfaces/sensors/__init__.py +++ b/source/isaaclab/isaaclab/test/mock_interfaces/sensors/__init__.py @@ -1,3 +1,8 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + # Copyright (c) 2022-2025, The Isaac Lab Project Developers. # All rights reserved. # diff --git a/source/isaaclab/isaaclab/test/mock_interfaces/sensors/factories.py b/source/isaaclab/isaaclab/test/mock_interfaces/sensors/factories.py index 1a9419ab4ce..8268928af3a 100644 --- a/source/isaaclab/isaaclab/test/mock_interfaces/sensors/factories.py +++ b/source/isaaclab/isaaclab/test/mock_interfaces/sensors/factories.py @@ -1,3 +1,8 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + # Copyright (c) 2022-2025, The Isaac Lab Project Developers. # All rights reserved. # @@ -30,9 +35,7 @@ def create_mock_imu( Configured MockImu instance. """ imu = MockImu(num_instances=num_instances, device=device) - imu.data.set_projected_gravity_b( - torch.tensor([gravity], device=device).expand(num_instances, -1).clone() - ) + imu.data.set_projected_gravity_b(torch.tensor([gravity], device=device).expand(num_instances, -1).clone()) return imu diff --git a/source/isaaclab/isaaclab/test/mock_interfaces/sensors/mock_contact_sensor.py b/source/isaaclab/isaaclab/test/mock_interfaces/sensors/mock_contact_sensor.py index af7ca0fbd7d..5cc51d8afde 100644 --- a/source/isaaclab/isaaclab/test/mock_interfaces/sensors/mock_contact_sensor.py +++ b/source/isaaclab/isaaclab/test/mock_interfaces/sensors/mock_contact_sensor.py @@ -1,3 +1,8 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + # Copyright (c) 2022-2025, The Isaac Lab Project Developers. # All rights reserved. # @@ -8,8 +13,9 @@ from __future__ import annotations import re +from collections.abc import Sequence + import torch -from typing import Sequence class MockContactSensorData: @@ -93,9 +99,7 @@ def net_forces_w_history(self) -> torch.Tensor | None: if self._history_length == 0: return None if self._net_forces_w_history is None: - return torch.zeros( - self._num_instances, self._history_length, self._num_bodies, 3, device=self._device - ) + return torch.zeros(self._num_instances, self._history_length, self._num_bodies, 3, device=self._device) return self._net_forces_w_history @property @@ -104,9 +108,7 @@ def force_matrix_w(self) -> torch.Tensor | None: if self._num_filter_bodies == 0: return None if self._force_matrix_w is None: - return torch.zeros( - self._num_instances, self._num_bodies, self._num_filter_bodies, 3, device=self._device - ) + return torch.zeros(self._num_instances, self._num_bodies, self._num_filter_bodies, 3, device=self._device) return self._force_matrix_w @property @@ -131,9 +133,7 @@ def contact_pos_w(self) -> torch.Tensor | None: if self._num_filter_bodies == 0: return None if self._contact_pos_w is None: - return torch.zeros( - self._num_instances, self._num_bodies, self._num_filter_bodies, 3, device=self._device - ) + return torch.zeros(self._num_instances, self._num_bodies, self._num_filter_bodies, 3, device=self._device) return self._contact_pos_w @property @@ -142,9 +142,7 @@ def friction_forces_w(self) -> torch.Tensor | None: if self._num_filter_bodies == 0: return None if self._friction_forces_w is None: - return torch.zeros( - self._num_instances, self._num_bodies, self._num_filter_bodies, 3, device=self._device - ) + return torch.zeros(self._num_instances, self._num_bodies, self._num_filter_bodies, 3, device=self._device) return self._friction_forces_w @property @@ -312,9 +310,7 @@ def __init__( self._num_bodies = num_bodies self._body_names = body_names or [f"body_{i}" for i in range(num_bodies)] self._device = device - self._data = MockContactSensorData( - num_instances, num_bodies, device, history_length, num_filter_bodies - ) + self._data = MockContactSensorData(num_instances, num_bodies, device, history_length, num_filter_bodies) # -- Properties -- @@ -350,9 +346,7 @@ def device(self) -> str: # -- Methods -- - def find_bodies( - self, name_keys: str | Sequence[str], preserve_order: bool = False - ) -> tuple[list[int], list[str]]: + def find_bodies(self, name_keys: str | Sequence[str], preserve_order: bool = False) -> tuple[list[int], list[str]]: """Find bodies by name regex patterns. Args: diff --git a/source/isaaclab/isaaclab/test/mock_interfaces/sensors/mock_frame_transformer.py b/source/isaaclab/isaaclab/test/mock_interfaces/sensors/mock_frame_transformer.py index a7661d37c74..7b1b9081b06 100644 --- a/source/isaaclab/isaaclab/test/mock_interfaces/sensors/mock_frame_transformer.py +++ b/source/isaaclab/isaaclab/test/mock_interfaces/sensors/mock_frame_transformer.py @@ -1,3 +1,8 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + # Copyright (c) 2022-2025, The Isaac Lab Project Developers. # All rights reserved. # @@ -8,8 +13,9 @@ from __future__ import annotations import re +from collections.abc import Sequence + import torch -from typing import Sequence class MockFrameTransformerData: @@ -202,9 +208,7 @@ def __init__( self._num_target_frames = num_target_frames self._target_frame_names = target_frame_names or [f"frame_{i}" for i in range(num_target_frames)] self._device = device - self._data = MockFrameTransformerData( - num_instances, num_target_frames, self._target_frame_names, device - ) + self._data = MockFrameTransformerData(num_instances, num_target_frames, self._target_frame_names, device) # -- Properties -- @@ -235,9 +239,7 @@ def device(self) -> str: # -- Methods -- - def find_bodies( - self, name_keys: str | Sequence[str], preserve_order: bool = False - ) -> tuple[list[int], list[str]]: + def find_bodies(self, name_keys: str | Sequence[str], preserve_order: bool = False) -> tuple[list[int], list[str]]: """Find target frames by name regex patterns. Args: diff --git a/source/isaaclab/isaaclab/test/mock_interfaces/sensors/mock_imu.py b/source/isaaclab/isaaclab/test/mock_interfaces/sensors/mock_imu.py index 66e663a3c8d..202465211ca 100644 --- a/source/isaaclab/isaaclab/test/mock_interfaces/sensors/mock_imu.py +++ b/source/isaaclab/isaaclab/test/mock_interfaces/sensors/mock_imu.py @@ -1,3 +1,8 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + # Copyright (c) 2022-2025, The Isaac Lab Project Developers. # All rights reserved. # @@ -7,9 +12,9 @@ from __future__ import annotations -import re +from collections.abc import Sequence + import torch -from typing import Sequence class MockImuData: diff --git a/source/isaaclab/isaaclab/test/mock_interfaces/utils/__init__.py b/source/isaaclab/isaaclab/test/mock_interfaces/utils/__init__.py index a8cfc933e97..b8cb3c38a10 100644 --- a/source/isaaclab/isaaclab/test/mock_interfaces/utils/__init__.py +++ b/source/isaaclab/isaaclab/test/mock_interfaces/utils/__init__.py @@ -1,3 +1,8 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + # Copyright (c) 2022-2025, The Isaac Lab Project Developers. # All rights reserved. # diff --git a/source/isaaclab/isaaclab/test/mock_interfaces/utils/mock_generator.py b/source/isaaclab/isaaclab/test/mock_interfaces/utils/mock_generator.py index d18a6b6283e..f9bc6fb246d 100644 --- a/source/isaaclab/isaaclab/test/mock_interfaces/utils/mock_generator.py +++ b/source/isaaclab/isaaclab/test/mock_interfaces/utils/mock_generator.py @@ -1,3 +1,8 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + # Copyright (c) 2022-2025, The Isaac Lab Project Developers. # All rights reserved. # @@ -7,9 +12,10 @@ from __future__ import annotations -import torch from typing import TYPE_CHECKING +import torch + if TYPE_CHECKING: from ..assets import MockArticulation from ..sensors import MockContactSensor, MockFrameTransformer, MockImu @@ -19,12 +25,14 @@ class MockArticulationBuilder: """Builder class for creating custom MockArticulation instances. Example: - >>> robot = (MockArticulationBuilder() + >>> robot = ( + ... MockArticulationBuilder() ... .with_joints(["hip", "knee", "ankle"], default_pos=[0.0, 0.5, -0.5]) ... .with_bodies(["base", "thigh", "shin", "foot"]) ... .with_fixed_base(True) ... .with_num_instances(8) - ... .build()) + ... .build() + ... ) """ def __init__(self): @@ -42,7 +50,7 @@ def __init__(self): self._fixed_tendon_names: list[str] | None = None self._spatial_tendon_names: list[str] | None = None - def with_num_instances(self, num_instances: int) -> "MockArticulationBuilder": + def with_num_instances(self, num_instances: int) -> MockArticulationBuilder: """Set the number of articulation instances. Args: @@ -59,7 +67,7 @@ def with_joints( joint_names: list[str], default_pos: list[float] | None = None, default_vel: list[float] | None = None, - ) -> "MockArticulationBuilder": + ) -> MockArticulationBuilder: """Set joint configuration. Args: @@ -75,7 +83,7 @@ def with_joints( self._default_joint_vel = default_vel return self - def with_bodies(self, body_names: list[str]) -> "MockArticulationBuilder": + def with_bodies(self, body_names: list[str]) -> MockArticulationBuilder: """Set body configuration. Args: @@ -87,7 +95,7 @@ def with_bodies(self, body_names: list[str]) -> "MockArticulationBuilder": self._body_names = body_names return self - def with_fixed_base(self, is_fixed: bool) -> "MockArticulationBuilder": + def with_fixed_base(self, is_fixed: bool) -> MockArticulationBuilder: """Set whether the articulation has a fixed base. Args: @@ -99,7 +107,7 @@ def with_fixed_base(self, is_fixed: bool) -> "MockArticulationBuilder": self._is_fixed_base = is_fixed return self - def with_device(self, device: str) -> "MockArticulationBuilder": + def with_device(self, device: str) -> MockArticulationBuilder: """Set the device for tensor allocation. Args: @@ -111,9 +119,7 @@ def with_device(self, device: str) -> "MockArticulationBuilder": self._device = device return self - def with_joint_limits( - self, lower: float, upper: float - ) -> "MockArticulationBuilder": + def with_joint_limits(self, lower: float, upper: float) -> MockArticulationBuilder: """Set uniform joint position limits for all joints. Args: @@ -126,9 +132,7 @@ def with_joint_limits( self._joint_pos_limits = (lower, upper) return self - def with_fixed_tendons( - self, tendon_names: list[str] - ) -> "MockArticulationBuilder": + def with_fixed_tendons(self, tendon_names: list[str]) -> MockArticulationBuilder: """Set fixed tendon configuration. Args: @@ -141,9 +145,7 @@ def with_fixed_tendons( self._num_fixed_tendons = len(tendon_names) return self - def with_spatial_tendons( - self, tendon_names: list[str] - ) -> "MockArticulationBuilder": + def with_spatial_tendons(self, tendon_names: list[str]) -> MockArticulationBuilder: """Set spatial tendon configuration. Args: @@ -156,7 +158,7 @@ def with_spatial_tendons( self._num_spatial_tendons = len(tendon_names) return self - def build(self) -> "MockArticulation": + def build(self) -> MockArticulation: """Build the MockArticulation instance. Returns: @@ -213,11 +215,13 @@ class MockSensorBuilder: """Builder class for creating custom mock sensor instances. Example: - >>> sensor = (MockSensorBuilder("contact") + >>> sensor = ( + ... MockSensorBuilder("contact") ... .with_num_instances(4) ... .with_bodies(["FL_foot", "FR_foot", "RL_foot", "RR_foot"]) ... .with_device("cuda") - ... .build()) + ... .build() + ... ) """ def __init__(self, sensor_type: str): @@ -240,7 +244,7 @@ def __init__(self, sensor_type: str): # Frame transformer specific self._target_frame_names: list[str] = [] - def with_num_instances(self, num_instances: int) -> "MockSensorBuilder": + def with_num_instances(self, num_instances: int) -> MockSensorBuilder: """Set the number of sensor instances. Args: @@ -252,7 +256,7 @@ def with_num_instances(self, num_instances: int) -> "MockSensorBuilder": self._num_instances = num_instances return self - def with_device(self, device: str) -> "MockSensorBuilder": + def with_device(self, device: str) -> MockSensorBuilder: """Set the device for tensor allocation. Args: @@ -264,7 +268,7 @@ def with_device(self, device: str) -> "MockSensorBuilder": self._device = device return self - def with_bodies(self, body_names: list[str]) -> "MockSensorBuilder": + def with_bodies(self, body_names: list[str]) -> MockSensorBuilder: """Set body names (for contact sensor). Args: @@ -276,7 +280,7 @@ def with_bodies(self, body_names: list[str]) -> "MockSensorBuilder": self._body_names = body_names return self - def with_history_length(self, length: int) -> "MockSensorBuilder": + def with_history_length(self, length: int) -> MockSensorBuilder: """Set history buffer length (for contact sensor). Args: @@ -288,7 +292,7 @@ def with_history_length(self, length: int) -> "MockSensorBuilder": self._history_length = length return self - def with_filter_bodies(self, num_filter_bodies: int) -> "MockSensorBuilder": + def with_filter_bodies(self, num_filter_bodies: int) -> MockSensorBuilder: """Set number of filter bodies (for contact sensor). Args: @@ -300,7 +304,7 @@ def with_filter_bodies(self, num_filter_bodies: int) -> "MockSensorBuilder": self._num_filter_bodies = num_filter_bodies return self - def with_target_frames(self, frame_names: list[str]) -> "MockSensorBuilder": + def with_target_frames(self, frame_names: list[str]) -> MockSensorBuilder: """Set target frame names (for frame transformer). Args: @@ -312,7 +316,7 @@ def with_target_frames(self, frame_names: list[str]) -> "MockSensorBuilder": self._target_frame_names = frame_names return self - def build(self) -> "MockContactSensor | MockImu | MockFrameTransformer": + def build(self) -> MockContactSensor | MockImu | MockFrameTransformer: """Build the mock sensor instance. Returns: diff --git a/source/isaaclab/isaaclab/test/mock_interfaces/utils/patching.py b/source/isaaclab/isaaclab/test/mock_interfaces/utils/patching.py index cc44c316530..4dcb5038bb0 100644 --- a/source/isaaclab/isaaclab/test/mock_interfaces/utils/patching.py +++ b/source/isaaclab/isaaclab/test/mock_interfaces/utils/patching.py @@ -1,3 +1,8 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + # Copyright (c) 2022-2025, The Isaac Lab Project Developers. # All rights reserved. # @@ -8,8 +13,9 @@ from __future__ import annotations import functools +from collections.abc import Callable, Generator from contextlib import contextmanager -from typing import Any, Callable, Generator, TypeVar +from typing import Any, TypeVar from unittest.mock import patch F = TypeVar("F", bound=Callable[..., Any]) @@ -104,9 +110,7 @@ def create_mock(*args: Any, **create_kwargs: Any) -> MockContactSensor: body_names=create_kwargs.get("body_names", kwargs.get("body_names")), device=create_kwargs.get("device", device), history_length=create_kwargs.get("history_length", kwargs.get("history_length", 0)), - num_filter_bodies=create_kwargs.get( - "num_filter_bodies", kwargs.get("num_filter_bodies", 0) - ), + num_filter_bodies=create_kwargs.get("num_filter_bodies", kwargs.get("num_filter_bodies", 0)), ) elif sensor_type == "imu": @@ -124,12 +128,8 @@ def create_mock(*args: Any, **create_kwargs: Any) -> MockImu: def create_mock(*args: Any, **create_kwargs: Any) -> MockFrameTransformer: return MockFrameTransformer( num_instances=create_kwargs.get("num_instances", num_instances), - num_target_frames=create_kwargs.get( - "num_target_frames", kwargs.get("num_target_frames", 1) - ), - target_frame_names=create_kwargs.get( - "target_frame_names", kwargs.get("target_frame_names") - ), + num_target_frames=create_kwargs.get("num_target_frames", kwargs.get("num_target_frames", 1)), + target_frame_names=create_kwargs.get("target_frame_names", kwargs.get("target_frame_names")), device=create_kwargs.get("device", device), ) diff --git a/source/isaaclab/test/test_mock_interfaces/test_mock_assets.py b/source/isaaclab/test/test_mock_interfaces/test_mock_assets.py index 9718a15aaea..7eac32396f4 100644 --- a/source/isaaclab/test/test_mock_interfaces/test_mock_assets.py +++ b/source/isaaclab/test/test_mock_interfaces/test_mock_assets.py @@ -1,3 +1,8 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + # Copyright (c) 2022-2025, The Isaac Lab Project Developers. # All rights reserved. # @@ -13,13 +18,12 @@ MockRigidObject, MockRigidObjectCollection, create_mock_articulation, - create_mock_quadruped, create_mock_humanoid, + create_mock_quadruped, create_mock_rigid_object, create_mock_rigid_object_collection, ) -from isaaclab.test.mock_interfaces.utils import MockArticulationBuilder, MockSensorBuilder - +from isaaclab.test.mock_interfaces.utils import MockArticulationBuilder # ============================================================================== # MockArticulation Tests diff --git a/source/isaaclab/test/test_mock_interfaces/test_mock_data_properties.py b/source/isaaclab/test/test_mock_interfaces/test_mock_data_properties.py index c76dadbbbb2..628a42c2d2a 100644 --- a/source/isaaclab/test/test_mock_interfaces/test_mock_data_properties.py +++ b/source/isaaclab/test/test_mock_interfaces/test_mock_data_properties.py @@ -1,3 +1,8 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + # Copyright (c) 2022-2025, The Isaac Lab Project Developers. # All rights reserved. # @@ -9,23 +14,16 @@ import torch from isaaclab.test.mock_interfaces.assets import ( - MockArticulation, MockArticulationData, - MockRigidObject, - MockRigidObjectData, - MockRigidObjectCollection, MockRigidObjectCollectionData, + MockRigidObjectData, ) from isaaclab.test.mock_interfaces.sensors import ( - MockImu, - MockImuData, - MockContactSensor, MockContactSensorData, - MockFrameTransformer, MockFrameTransformerData, + MockImuData, ) - # ============================================================================== # IMU Data Property Tests # ============================================================================== diff --git a/source/isaaclab/test/test_mock_interfaces/test_mock_sensors.py b/source/isaaclab/test/test_mock_interfaces/test_mock_sensors.py index db8165130d3..88a3b4db6f3 100644 --- a/source/isaaclab/test/test_mock_interfaces/test_mock_sensors.py +++ b/source/isaaclab/test/test_mock_interfaces/test_mock_sensors.py @@ -1,3 +1,8 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + # Copyright (c) 2022-2025, The Isaac Lab Project Developers. # All rights reserved. # @@ -8,16 +13,16 @@ import pytest import torch -from isaaclab.test.mock_interfaces.sensors import MockImu, MockContactSensor, MockFrameTransformer from isaaclab.test.mock_interfaces.sensors import ( - create_mock_imu, - create_mock_contact_sensor, + MockContactSensor, + MockFrameTransformer, + MockImu, create_mock_foot_contact_sensor, create_mock_frame_transformer, + create_mock_imu, ) from isaaclab.test.mock_interfaces.utils import MockSensorBuilder - # ============================================================================== # MockImu Tests # ============================================================================== diff --git a/source/isaaclab_physx/isaaclab_physx/test/__init__.py b/source/isaaclab_physx/isaaclab_physx/test/__init__.py index 684458ea7aa..a8484a8989a 100644 --- a/source/isaaclab_physx/isaaclab_physx/test/__init__.py +++ b/source/isaaclab_physx/isaaclab_physx/test/__init__.py @@ -1,3 +1,8 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + # Copyright (c) 2022-2025, The Isaac Lab Project Developers. # All rights reserved. # diff --git a/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/__init__.py b/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/__init__.py index cffa253263c..d743be4fdf5 100644 --- a/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/__init__.py +++ b/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/__init__.py @@ -1,3 +1,8 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + # Copyright (c) 2022-2025, The Isaac Lab Project Developers. # All rights reserved. # diff --git a/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/factories.py b/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/factories.py index 736171c9752..e99604d4175 100644 --- a/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/factories.py +++ b/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/factories.py @@ -1,3 +1,8 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + # Copyright (c) 2022-2025, The Isaac Lab Project Developers. # All rights reserved. # diff --git a/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/utils/__init__.py b/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/utils/__init__.py index 4c52eb2e2ad..9abf21654b1 100644 --- a/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/utils/__init__.py +++ b/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/utils/__init__.py @@ -1,3 +1,8 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + # Copyright (c) 2022-2025, The Isaac Lab Project Developers. # All rights reserved. # diff --git a/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/utils/mock_shared_metatype.py b/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/utils/mock_shared_metatype.py index 8b05ba96c51..d9858c11744 100644 --- a/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/utils/mock_shared_metatype.py +++ b/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/utils/mock_shared_metatype.py @@ -1,3 +1,8 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + # Copyright (c) 2022-2025, The Isaac Lab Project Developers. # All rights reserved. # diff --git a/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/utils/patching.py b/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/utils/patching.py index 41824b76e12..3d787e74b2a 100644 --- a/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/utils/patching.py +++ b/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/utils/patching.py @@ -1,3 +1,8 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + # Copyright (c) 2022-2025, The Isaac Lab Project Developers. # All rights reserved. # @@ -8,8 +13,9 @@ from __future__ import annotations import functools +from collections.abc import Callable, Generator from contextlib import contextmanager -from typing import Any, Callable, Generator, TypeVar +from typing import Any, TypeVar from unittest.mock import patch F = TypeVar("F", bound=Callable[..., Any]) diff --git a/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/views/__init__.py b/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/views/__init__.py index f7e7e478ce2..ae4fcc892b3 100644 --- a/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/views/__init__.py +++ b/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/views/__init__.py @@ -1,3 +1,8 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + # Copyright (c) 2022-2025, The Isaac Lab Project Developers. # All rights reserved. # diff --git a/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/views/mock_articulation_view.py b/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/views/mock_articulation_view.py index 5ba316882d2..173cfa6a306 100644 --- a/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/views/mock_articulation_view.py +++ b/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/views/mock_articulation_view.py @@ -1,3 +1,8 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + # Copyright (c) 2022-2025, The Isaac Lab Project Developers. # All rights reserved. # @@ -157,9 +162,7 @@ def get_link_transforms(self) -> torch.Tensor: Tensor of shape (N, L, 7) with [pos(3), quat_xyzw(4)] per link. """ if self._link_transforms is None: - self._link_transforms = torch.zeros( - self._count, self._num_links, 7, device=self._device - ) + self._link_transforms = torch.zeros(self._count, self._num_links, 7, device=self._device) self._link_transforms[:, :, 6] = 1.0 # w=1 for identity quaternion return self._link_transforms.clone() @@ -170,9 +173,7 @@ def get_link_velocities(self) -> torch.Tensor: Tensor of shape (N, L, 6) with [lin_vel(3), ang_vel(3)] per link. """ if self._link_velocities is None: - self._link_velocities = torch.zeros( - self._count, self._num_links, 6, device=self._device - ) + self._link_velocities = torch.zeros(self._count, self._num_links, 6, device=self._device) return self._link_velocities.clone() # -- DOF Getters -- @@ -204,9 +205,7 @@ def get_dof_projected_joint_forces(self) -> torch.Tensor: Tensor of shape (N, J) with projected joint forces. """ if self._dof_projected_joint_forces is None: - self._dof_projected_joint_forces = torch.zeros( - self._count, self._num_dofs, device=self._device - ) + self._dof_projected_joint_forces = torch.zeros(self._count, self._num_dofs, device=self._device) return self._dof_projected_joint_forces.clone() def get_dof_limits(self) -> torch.Tensor: @@ -250,9 +249,7 @@ def get_dof_max_forces(self) -> torch.Tensor: """ if self._dof_max_forces is None: # Default: infinite max force - self._dof_max_forces = torch.full( - (self._count, self._num_dofs), float("inf"), device=self._device - ) + self._dof_max_forces = torch.full((self._count, self._num_dofs), float("inf"), device=self._device) return self._dof_max_forces.clone() def get_dof_max_velocities(self) -> torch.Tensor: @@ -263,9 +260,7 @@ def get_dof_max_velocities(self) -> torch.Tensor: """ if self._dof_max_velocities is None: # Default: infinite max velocity - self._dof_max_velocities = torch.full( - (self._count, self._num_dofs), float("inf"), device=self._device - ) + self._dof_max_velocities = torch.full((self._count, self._num_dofs), float("inf"), device=self._device) return self._dof_max_velocities.clone() def get_dof_armatures(self) -> torch.Tensor: @@ -285,9 +280,7 @@ def get_dof_friction_coefficients(self) -> torch.Tensor: Tensor of shape (N, J) with joint friction coefficients. """ if self._dof_friction_coefficients is None: - self._dof_friction_coefficients = torch.zeros( - self._count, self._num_dofs, device=self._device - ) + self._dof_friction_coefficients = torch.zeros(self._count, self._num_dofs, device=self._device) return self._dof_friction_coefficients.clone() # -- Mass Property Getters -- @@ -321,9 +314,7 @@ def get_inertias(self) -> torch.Tensor: """ if self._inertias is None: # Default: identity inertia - self._inertias = torch.zeros( - self._count, self._num_links, 3, 3, device=self._device - ) + self._inertias = torch.zeros(self._count, self._num_links, 3, 3, device=self._device) self._inertias[:, :, 0, 0] = 1.0 self._inertias[:, :, 1, 1] = 1.0 self._inertias[:, :, 2, 2] = 1.0 @@ -521,9 +512,7 @@ def set_dof_max_forces( """ max_forces = max_forces.to(self._device) if self._dof_max_forces is None: - self._dof_max_forces = torch.full( - (self._count, self._num_dofs), float("inf"), device=self._device - ) + self._dof_max_forces = torch.full((self._count, self._num_dofs), float("inf"), device=self._device) if indices is not None: self._dof_max_forces[indices] = max_forces else: @@ -542,9 +531,7 @@ def set_dof_max_velocities( """ max_velocities = max_velocities.to(self._device) if self._dof_max_velocities is None: - self._dof_max_velocities = torch.full( - (self._count, self._num_dofs), float("inf"), device=self._device - ) + self._dof_max_velocities = torch.full((self._count, self._num_dofs), float("inf"), device=self._device) if indices is not None: self._dof_max_velocities[indices] = max_velocities else: @@ -582,9 +569,7 @@ def set_dof_friction_coefficients( """ friction_coefficients = friction_coefficients.to(self._device) if self._dof_friction_coefficients is None: - self._dof_friction_coefficients = torch.zeros( - self._count, self._num_dofs, device=self._device - ) + self._dof_friction_coefficients = torch.zeros(self._count, self._num_dofs, device=self._device) if indices is not None: self._dof_friction_coefficients[indices] = friction_coefficients else: @@ -644,9 +629,7 @@ def set_inertias( """ inertias = inertias.to(self._device) if self._inertias is None: - self._inertias = torch.zeros( - self._count, self._num_links, 3, 3, device=self._device - ) + self._inertias = torch.zeros(self._count, self._num_links, 3, 3, device=self._device) self._inertias[:, :, 0, 0] = 1.0 self._inertias[:, :, 1, 1] = 1.0 self._inertias[:, :, 2, 2] = 1.0 diff --git a/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/views/mock_rigid_body_view.py b/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/views/mock_rigid_body_view.py index ceb332ad936..98e86fa51e6 100644 --- a/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/views/mock_rigid_body_view.py +++ b/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/views/mock_rigid_body_view.py @@ -1,3 +1,8 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + # Copyright (c) 2022-2025, The Isaac Lab Project Developers. # All rights reserved. # diff --git a/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/views/mock_rigid_contact_view.py b/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/views/mock_rigid_contact_view.py index 3aa00c5c9fd..0235700b3e9 100644 --- a/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/views/mock_rigid_contact_view.py +++ b/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/views/mock_rigid_contact_view.py @@ -1,3 +1,8 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + # Copyright (c) 2022-2025, The Isaac Lab Project Developers. # All rights reserved. # @@ -110,9 +115,7 @@ def get_contact_force_matrix(self, dt: float) -> torch.Tensor: Tensor of shape (N*B, F, 3) with forces per body per filter. """ if self._contact_force_matrix is None: - self._contact_force_matrix = torch.zeros( - self._total_bodies, self._filter_count, 3, device=self._device - ) + self._contact_force_matrix = torch.zeros(self._total_bodies, self._filter_count, 3, device=self._device) return self._contact_force_matrix.clone() def get_contact_data( @@ -135,25 +138,15 @@ def get_contact_data( max_contacts = self._max_contact_data_count if self._contact_positions is None: - self._contact_positions = torch.zeros( - self._total_bodies, max_contacts, 3, device=self._device - ) + self._contact_positions = torch.zeros(self._total_bodies, max_contacts, 3, device=self._device) if self._contact_normals is None: - self._contact_normals = torch.zeros( - self._total_bodies, max_contacts, 3, device=self._device - ) + self._contact_normals = torch.zeros(self._total_bodies, max_contacts, 3, device=self._device) if self._contact_impulses is None: - self._contact_impulses = torch.zeros( - self._total_bodies, max_contacts, 3, device=self._device - ) + self._contact_impulses = torch.zeros(self._total_bodies, max_contacts, 3, device=self._device) if self._contact_separations is None: - self._contact_separations = torch.zeros( - self._total_bodies, max_contacts, device=self._device - ) + self._contact_separations = torch.zeros(self._total_bodies, max_contacts, device=self._device) if self._contact_num_found is None: - self._contact_num_found = torch.zeros( - self._total_bodies, dtype=torch.int32, device=self._device - ) + self._contact_num_found = torch.zeros(self._total_bodies, dtype=torch.int32, device=self._device) if self._contact_patch_id is None: self._contact_patch_id = torch.zeros( self._total_bodies, max_contacts, dtype=torch.int32, device=self._device @@ -168,9 +161,7 @@ def get_contact_data( self._contact_patch_id.clone(), ) - def get_friction_data( - self, dt: float - ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: + def get_friction_data(self, dt: float) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: """Get friction data. Args: @@ -186,17 +177,11 @@ def get_friction_data( max_contacts = self._max_contact_data_count if self._friction_forces is None: - self._friction_forces = torch.zeros( - self._total_bodies, max_contacts, 3, device=self._device - ) + self._friction_forces = torch.zeros(self._total_bodies, max_contacts, 3, device=self._device) if self._friction_impulses is None: - self._friction_impulses = torch.zeros( - self._total_bodies, max_contacts, 3, device=self._device - ) + self._friction_impulses = torch.zeros(self._total_bodies, max_contacts, 3, device=self._device) if self._friction_points is None: - self._friction_points = torch.zeros( - self._total_bodies, max_contacts, 3, device=self._device - ) + self._friction_points = torch.zeros(self._total_bodies, max_contacts, 3, device=self._device) if self._friction_patch_id is None: self._friction_patch_id = torch.zeros( self._total_bodies, max_contacts, dtype=torch.int32, device=self._device diff --git a/source/isaaclab_physx/test/test_mock_interfaces/__init__.py b/source/isaaclab_physx/test/test_mock_interfaces/__init__.py index cc0c3e20b15..f8b9c5fcd4e 100644 --- a/source/isaaclab_physx/test/test_mock_interfaces/__init__.py +++ b/source/isaaclab_physx/test/test_mock_interfaces/__init__.py @@ -1,3 +1,8 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + # Copyright (c) 2022-2025, The Isaac Lab Project Developers. # All rights reserved. # diff --git a/source/isaaclab_physx/test/test_mock_interfaces/test_mock_articulation_view.py b/source/isaaclab_physx/test/test_mock_interfaces/test_mock_articulation_view.py index c852133c279..4ed69a36513 100644 --- a/source/isaaclab_physx/test/test_mock_interfaces/test_mock_articulation_view.py +++ b/source/isaaclab_physx/test/test_mock_interfaces/test_mock_articulation_view.py @@ -1,3 +1,8 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + # Copyright (c) 2022-2025, The Isaac Lab Project Developers. # All rights reserved. # @@ -7,13 +12,12 @@ import pytest import torch - -from isaaclab_physx.test.mock_interfaces.views import MockArticulationView from isaaclab_physx.test.mock_interfaces.factories import ( create_mock_articulation_view, create_mock_humanoid_view, create_mock_quadruped_view, ) +from isaaclab_physx.test.mock_interfaces.views import MockArticulationView class TestMockArticulationViewInit: diff --git a/source/isaaclab_physx/test/test_mock_interfaces/test_mock_rigid_body_view.py b/source/isaaclab_physx/test/test_mock_interfaces/test_mock_rigid_body_view.py index bf2f3e2e272..4c2a210617a 100644 --- a/source/isaaclab_physx/test/test_mock_interfaces/test_mock_rigid_body_view.py +++ b/source/isaaclab_physx/test/test_mock_interfaces/test_mock_rigid_body_view.py @@ -1,3 +1,8 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + # Copyright (c) 2022-2025, The Isaac Lab Project Developers. # All rights reserved. # @@ -7,9 +12,8 @@ import pytest import torch - -from isaaclab_physx.test.mock_interfaces.views import MockRigidBodyView from isaaclab_physx.test.mock_interfaces.factories import create_mock_rigid_body_view +from isaaclab_physx.test.mock_interfaces.views import MockRigidBodyView class TestMockRigidBodyViewInit: diff --git a/source/isaaclab_physx/test/test_mock_interfaces/test_mock_rigid_contact_view.py b/source/isaaclab_physx/test/test_mock_interfaces/test_mock_rigid_contact_view.py index 91c9dee6d03..8aa4d5e6ce7 100644 --- a/source/isaaclab_physx/test/test_mock_interfaces/test_mock_rigid_contact_view.py +++ b/source/isaaclab_physx/test/test_mock_interfaces/test_mock_rigid_contact_view.py @@ -1,3 +1,8 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + # Copyright (c) 2022-2025, The Isaac Lab Project Developers. # All rights reserved. # @@ -7,9 +12,8 @@ import pytest import torch - -from isaaclab_physx.test.mock_interfaces.views import MockRigidContactView from isaaclab_physx.test.mock_interfaces.factories import create_mock_rigid_contact_view +from isaaclab_physx.test.mock_interfaces.views import MockRigidContactView class TestMockRigidContactViewInit: @@ -181,9 +185,7 @@ def test_factory_basic(self): def test_factory_custom_max_contacts(self): """Test factory with custom max contact data count.""" - view = create_mock_rigid_contact_view( - count=2, num_bodies=3, max_contact_data_count=32 - ) + view = create_mock_rigid_contact_view(count=2, num_bodies=3, max_contact_data_count=32) _, _, _, separations, _, _ = view.get_contact_data(0.01) assert separations.shape[1] == 32 diff --git a/source/isaaclab_physx/test/test_mock_interfaces/test_patching.py b/source/isaaclab_physx/test/test_mock_interfaces/test_patching.py index 37680cb9f07..b475c1ae8cc 100644 --- a/source/isaaclab_physx/test/test_mock_interfaces/test_patching.py +++ b/source/isaaclab_physx/test/test_mock_interfaces/test_patching.py @@ -1,3 +1,8 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + # Copyright (c) 2022-2025, The Isaac Lab Project Developers. # All rights reserved. # @@ -5,13 +10,6 @@ """Tests for patching utilities.""" -import torch - -from isaaclab_physx.test.mock_interfaces.views import ( - MockArticulationView, - MockRigidBodyView, - MockRigidContactView, -) from isaaclab_physx.test.mock_interfaces.utils import ( mock_articulation_view, mock_rigid_body_view, From 618af8ae2ae2c0e4541d962f27f9f6aa01adba31 Mon Sep 17 00:00:00 2001 From: Kelly Guo Date: Thu, 29 Jan 2026 11:04:20 -0800 Subject: [PATCH 26/38] Update tendon property references to fixed values Signed-off-by: Kelly Guo --- source/isaaclab/isaaclab/envs/mdp/events.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/source/isaaclab/isaaclab/envs/mdp/events.py b/source/isaaclab/isaaclab/envs/mdp/events.py index bdfc1d03614..3112924c677 100644 --- a/source/isaaclab/isaaclab/envs/mdp/events.py +++ b/source/isaaclab/isaaclab/envs/mdp/events.py @@ -933,7 +933,7 @@ def __call__( # stiffness if stiffness_distribution_params is not None: stiffness = _randomize_prop_by_op( - self.asset.data.default_fixed_tendon_stiffness.clone(), + self.asset.data.fixed_tendon_stiffness.clone(), stiffness_distribution_params, env_ids, tendon_ids, @@ -945,7 +945,7 @@ def __call__( # damping if damping_distribution_params is not None: damping = _randomize_prop_by_op( - self.asset.data.default_fixed_tendon_damping.clone(), + self.asset.data.fixed_tendon_damping.clone(), damping_distribution_params, env_ids, tendon_ids, @@ -957,7 +957,7 @@ def __call__( # limit stiffness if limit_stiffness_distribution_params is not None: limit_stiffness = _randomize_prop_by_op( - self.asset.data.default_fixed_tendon_limit_stiffness.clone(), + self.asset.data.fixed_tendon_limit_stiffness.clone(), limit_stiffness_distribution_params, env_ids, tendon_ids, @@ -970,7 +970,7 @@ def __call__( # position limits if lower_limit_distribution_params is not None or upper_limit_distribution_params is not None: - limit = self.asset.data.default_fixed_tendon_pos_limits.clone() + limit = self.asset.data.fixed_tendon_pos_limits.clone() # -- lower limit if lower_limit_distribution_params is not None: limit[..., 0] = _randomize_prop_by_op( @@ -1004,7 +1004,7 @@ def __call__( # rest length if rest_length_distribution_params is not None: rest_length = _randomize_prop_by_op( - self.asset.data.default_fixed_tendon_rest_length.clone(), + self.asset.data.fixed_tendon_rest_length.clone(), rest_length_distribution_params, env_ids, tendon_ids, @@ -1016,7 +1016,7 @@ def __call__( # offset if offset_distribution_params is not None: offset = _randomize_prop_by_op( - self.asset.data.default_fixed_tendon_offset.clone(), + self.asset.data.fixed_tendon_offset.clone(), offset_distribution_params, env_ids, tendon_ids, From 730d513a3a0eebd6717dabba8c5bf28cc6e3bc99 Mon Sep 17 00:00:00 2001 From: Antoine Richard Date: Fri, 30 Jan 2026 11:00:03 +0100 Subject: [PATCH 27/38] Improvements around the mocking. Some QOL to make patching less tedious in full kitless mode. --- docs/source/testing/index.rst | 11 + docs/source/testing/mock_interfaces.rst | 470 ++++++++++++++++++ .../articulation/base_articulation_data.py | 2 +- .../rigid_object/base_rigid_object_data.py | 2 +- .../test/mock_interfaces/utils/__init__.py | 1 + .../utils/mock_wrench_composer.py | 171 +++++++ .../assets/articulation/articulation.py | 123 ++++- .../assets/articulation/articulation_data.py | 40 +- .../assets/rigid_object/rigid_object.py | 37 +- .../assets/rigid_object/rigid_object_data.py | 48 +- .../rigid_object_collection.py | 99 +++- .../rigid_object_collection_data.py | 98 ++-- .../views/mock_articulation_view.py | 269 +++++++--- .../views/mock_rigid_body_view.py | 115 +++-- 14 files changed, 1277 insertions(+), 209 deletions(-) create mode 100644 docs/source/testing/index.rst create mode 100644 docs/source/testing/mock_interfaces.rst create mode 100644 source/isaaclab/isaaclab/test/mock_interfaces/utils/mock_wrench_composer.py diff --git a/docs/source/testing/index.rst b/docs/source/testing/index.rst new file mode 100644 index 00000000000..91b08ac948d --- /dev/null +++ b/docs/source/testing/index.rst @@ -0,0 +1,11 @@ +.. _testing: + +Testing +======= + +This section covers testing utilities and patterns for Isaac Lab development. + +.. toctree:: + :maxdepth: 2 + + mock_interfaces diff --git a/docs/source/testing/mock_interfaces.rst b/docs/source/testing/mock_interfaces.rst new file mode 100644 index 00000000000..63c9a2477db --- /dev/null +++ b/docs/source/testing/mock_interfaces.rst @@ -0,0 +1,470 @@ +.. _testing_mock_interfaces: + +Mock Interfaces for Unit Testing +================================ + +Isaac Lab provides mock implementations of sensor, asset, and PhysX view classes +for unit testing without requiring Isaac Sim or GPU simulation. + +Overview +-------- + +Two levels of mock interfaces are available: + +1. **High-level mocks** (``isaaclab.test.mock_interfaces``) - Mock sensors and assets +2. **Low-level PhysX mocks** (``isaaclab_physx.test.mock_interfaces``) - Mock PhysX TensorAPI views + +High-Level Mock Interfaces +-------------------------- + +Located in ``isaaclab.test.mock_interfaces``, these mock the public API of sensor +and asset base classes. + +Available Mocks +~~~~~~~~~~~~~~~ + +**Sensors:** + +.. list-table:: + :header-rows: 1 + :widths: 25 25 50 + + * - Mock Class + - Real Class + - Description + * - ``MockImu`` + - ``BaseImu`` + - IMU sensor (accelerometer + gyroscope) + * - ``MockContactSensor`` + - ``BaseContactSensor`` + - Contact force sensor + * - ``MockFrameTransformer`` + - ``BaseFrameTransformer`` + - Frame tracking sensor + +**Assets:** + +.. list-table:: + :header-rows: 1 + :widths: 25 25 50 + + * - Mock Class + - Real Class + - Description + * - ``MockArticulation`` + - ``BaseArticulation`` + - Articulated robot (joints + bodies) + * - ``MockRigidObject`` + - ``BaseRigidObject`` + - Single rigid body + * - ``MockRigidObjectCollection`` + - ``BaseRigidObjectCollection`` + - Collection of rigid bodies + +Quick Start +~~~~~~~~~~~ + +.. code-block:: python + + import torch + from isaaclab.test.mock_interfaces import MockArticulation, MockContactSensor + + # Create a mock quadruped robot + robot = MockArticulation( + num_instances=4, + num_joints=12, + num_bodies=13, + joint_names=[f"joint_{i}" for i in range(12)], + device="cpu" + ) + + # Set joint states + robot.data.set_mock_data( + joint_pos=torch.zeros(4, 12), + joint_vel=torch.randn(4, 12) * 0.1 + ) + + # Access data like the real asset + positions = robot.data.joint_pos # Shape: (4, 12) + + # Create a mock contact sensor + sensor = MockContactSensor(num_instances=4, num_bodies=4, device="cpu") + sensor.data.set_net_forces_w(torch.randn(4, 4, 3)) + +Factory Functions +~~~~~~~~~~~~~~~~~ + +Pre-configured factories for common use cases: + +.. code-block:: python + + from isaaclab.test.mock_interfaces import ( + create_mock_quadruped, + create_mock_humanoid, + create_mock_foot_contact_sensor, + ) + + # Pre-configured quadruped (12 joints, 13 bodies) + robot = create_mock_quadruped(num_instances=4) + + # Pre-configured humanoid (21 joints, 22 bodies) + humanoid = create_mock_humanoid(num_instances=2) + + # Foot contact sensor for quadruped + foot_contact = create_mock_foot_contact_sensor(num_instances=4, num_feet=4) + +Patching Utilities +~~~~~~~~~~~~~~~~~~ + +Use context managers or decorators to inject mocks into tests: + +**Context Managers:** + +.. code-block:: python + + from isaaclab.test.mock_interfaces.utils import patch_articulation, patch_sensor + + with patch_articulation("my_module.Articulation", num_joints=12) as MockRobot: + robot = my_function_that_creates_robot() + robot.data.set_joint_pos(torch.zeros(1, 12)) + + with patch_sensor("my_module.ContactSensor", "contact", num_bodies=4): + sensor = my_function_that_creates_sensor() + +**Decorators:** + +.. code-block:: python + + from isaaclab.test.mock_interfaces.utils import mock_articulation, mock_sensor + + @mock_articulation(num_joints=12, num_bodies=13) + def test_observation_function(mock_robot): + mock_robot.data.set_joint_pos(torch.zeros(1, 12)) + obs = compute_observation(mock_robot) + assert obs.shape == (1, 24) + + @mock_sensor("contact", num_instances=4, num_bodies=4) + def test_contact_reward(mock_contact): + mock_contact.data.set_net_forces_w(torch.randn(4, 4, 3)) + reward = compute_contact_reward(mock_contact) + +Low-Level PhysX Mock Interfaces +------------------------------- + +Located in ``isaaclab_physx.test.mock_interfaces``, these mock the PhysX TensorAPI +views used internally by assets and sensors. + +Available Mocks +~~~~~~~~~~~~~~~ + +.. list-table:: + :header-rows: 1 + :widths: 25 25 50 + + * - Mock Class + - PhysX Class + - Used By + * - ``MockRigidBodyView`` + - ``physx.RigidBodyView`` + - ``RigidObject`` + * - ``MockArticulationView`` + - ``physx.ArticulationView`` + - ``Articulation`` + * - ``MockRigidContactView`` + - ``physx.RigidContactView`` + - ``ContactSensor`` + +Quick Start +~~~~~~~~~~~ + +.. code-block:: python + + from isaaclab_physx.test.mock_interfaces import ( + MockRigidBodyView, + MockArticulationView, + MockRigidContactView, + ) + + # Create a mock rigid body view + view = MockRigidBodyView(count=4, device="cpu") + transforms = view.get_transforms() # Shape: (4, 7) + + # Set mock data + view.set_mock_transforms(torch.randn(4, 7)) + + # Create a mock articulation view + art_view = MockArticulationView( + count=4, + num_dofs=12, + num_links=13, + device="cpu" + ) + positions = art_view.get_dof_positions() # Shape: (4, 12) + +Factory Functions +~~~~~~~~~~~~~~~~~ + +.. code-block:: python + + from isaaclab_physx.test.mock_interfaces import ( + create_mock_rigid_body_view, + create_mock_articulation_view, + create_mock_quadruped_view, + create_mock_rigid_contact_view, + ) + + # Pre-configured quadruped view (12 DOFs, 13 links) + view = create_mock_quadruped_view(count=4) + + # Pre-configured humanoid view (21 DOFs, 22 links) + humanoid = create_mock_humanoid_view(count=2) + +Patching Utilities +~~~~~~~~~~~~~~~~~~ + +.. code-block:: python + + from isaaclab_physx.test.mock_interfaces.utils import ( + patch_rigid_body_view, + patch_articulation_view, + mock_articulation_view, + ) + + # Context manager + with patch_rigid_body_view("my_module.physx.RigidBodyView", count=4): + view = create_rigid_body() + + # Decorator + @mock_articulation_view(count=4, num_dofs=12, num_links=13) + def test_my_function(mock_view): + positions = mock_view.get_dof_positions() + assert positions.shape == (4, 12) + +Data Shapes Reference +--------------------- + +High-Level Mocks +~~~~~~~~~~~~~~~~ + +**IMU Data:** + +.. list-table:: + :header-rows: 1 + :widths: 25 20 55 + + * - Property + - Shape + - Description + * - ``pos_w`` + - ``(N, 3)`` + - Position in world frame + * - ``quat_w`` + - ``(N, 4)`` + - Orientation (w,x,y,z) in world frame + * - ``lin_vel_b`` + - ``(N, 3)`` + - Linear velocity in body frame + * - ``ang_vel_b`` + - ``(N, 3)`` + - Angular velocity in body frame + * - ``projected_gravity_b`` + - ``(N, 3)`` + - Gravity in body frame + +**Contact Sensor Data:** + +.. list-table:: + :header-rows: 1 + :widths: 25 20 55 + + * - Property + - Shape + - Description + * - ``net_forces_w`` + - ``(N, B, 3)`` + - Net contact forces + * - ``force_matrix_w`` + - ``(N, B, M, 3)`` + - Filtered forces + * - ``current_contact_time`` + - ``(N, B)`` + - Time in contact + * - ``current_air_time`` + - ``(N, B)`` + - Time in air + +Where N = instances, B = bodies, M = filter bodies + +**Articulation Data:** + +.. list-table:: + :header-rows: 1 + :widths: 25 20 55 + + * - Property + - Shape + - Description + * - ``joint_pos`` + - ``(N, J)`` + - Joint positions + * - ``joint_vel`` + - ``(N, J)`` + - Joint velocities + * - ``root_link_pose_w`` + - ``(N, 7)`` + - Root pose in world + * - ``body_link_pose_w`` + - ``(N, B, 7)`` + - Body poses in world + +Where N = instances, J = joints, B = bodies + +PhysX View Mocks +~~~~~~~~~~~~~~~~ + +**RigidBodyView:** + +.. list-table:: + :header-rows: 1 + :widths: 30 20 50 + + * - Method + - Shape + - Description + * - ``get_transforms()`` + - ``(N, 7)`` + - [pos(3), quat_xyzw(4)] + * - ``get_velocities()`` + - ``(N, 6)`` + - [lin_vel(3), ang_vel(3)] + * - ``get_masses()`` + - ``(N, 1, 1)`` + - mass per body + * - ``get_inertias()`` + - ``(N, 1, 3, 3)`` + - 3x3 inertia matrix + +**ArticulationView:** + +.. list-table:: + :header-rows: 1 + :widths: 35 20 45 + + * - Method + - Shape + - Description + * - ``get_root_transforms()`` + - ``(N, 7)`` + - Root pose + * - ``get_link_transforms()`` + - ``(N, L, 7)`` + - Link poses + * - ``get_dof_positions()`` + - ``(N, J)`` + - Joint positions + * - ``get_dof_limits()`` + - ``(N, J, 2)`` + - [lower, upper] + +**RigidContactView:** + +.. list-table:: + :header-rows: 1 + :widths: 40 20 40 + + * - Method + - Shape + - Description + * - ``get_net_contact_forces(dt)`` + - ``(N*B, 3)`` + - Flat net forces + * - ``get_contact_force_matrix(dt)`` + - ``(N*B, F, 3)`` + - Per-filter forces + +Design Patterns +--------------- + +All mock interfaces follow these patterns: + +1. **Lazy Initialization** - Tensors created on first access with correct shapes +2. **Device Transfer** - All setters call ``.to(device)`` +3. **Identity Quaternion Defaults** - Quaternions default to identity +4. **Clone on Getters** - Return ``.clone()`` to prevent mutation +5. **No-op Actions** - Simulation operations (reset, update, write_to_sim) do nothing +6. **Mock Setters** - Direct ``set_mock_*`` methods for test setup + +Example: Testing an Observation Function +---------------------------------------- + +.. code-block:: python + + import pytest + import torch + from isaaclab.test.mock_interfaces import MockArticulation + + @pytest.fixture + def robot(): + return MockArticulation( + num_instances=4, + num_joints=12, + num_bodies=13, + device="cpu" + ) + + def test_joint_observation(robot): + # Setup + joint_pos = torch.randn(4, 12) + robot.data.set_joint_pos(joint_pos) + + # Test your observation function + obs = compute_joint_observation(robot) + + # Verify + assert obs.shape == (4, 24) # pos + vel + assert torch.allclose(obs[:, :12], joint_pos) + + def test_body_observation(robot): + # Setup + body_poses = torch.randn(4, 13, 7) + robot.data.set_body_link_pose_w(body_poses) + + # Test + obs = compute_body_observation(robot) + assert obs.shape == (4, 13, 7) + +Example: Testing with PhysX View Mocks +-------------------------------------- + +.. code-block:: python + + import pytest + import torch + from isaaclab_physx.test.mock_interfaces import create_mock_quadruped_view + from isaaclab_physx.test.mock_interfaces.utils import mock_articulation_view + + def test_quadruped_joint_limits(): + """Test quadruped joint limit handling.""" + view = create_mock_quadruped_view(count=4) + + # Set custom joint limits + limits = torch.zeros(4, 12, 2) + limits[:, :, 0] = -1.5 # lower limit + limits[:, :, 1] = 1.5 # upper limit + view.set_mock_dof_limits(limits) + + # Verify limits + result = view.get_dof_limits() + assert torch.allclose(result[:, :, 0], torch.full((4, 12), -1.5)) + assert torch.allclose(result[:, :, 1], torch.full((4, 12), 1.5)) + + @mock_articulation_view(count=4, num_dofs=12, num_links=13) + def test_articulation_with_decorator(mock_view): + """Test using the decorator pattern.""" + # Set some initial positions + positions = torch.randn(4, 12) + mock_view.set_mock_dof_positions(positions) + + # Verify + result = mock_view.get_dof_positions() + assert torch.allclose(result, positions) diff --git a/source/isaaclab/isaaclab/assets/articulation/base_articulation_data.py b/source/isaaclab/isaaclab/assets/articulation/base_articulation_data.py index 5e306e1180f..c6da5c3dcea 100644 --- a/source/isaaclab/isaaclab/assets/articulation/base_articulation_data.py +++ b/source/isaaclab/isaaclab/assets/articulation/base_articulation_data.py @@ -436,7 +436,7 @@ def body_mass(self) -> torch.Tensor: @property @abstractmethod def body_inertia(self) -> torch.Tensor: - """Body inertia ``wp.mat33`` in the world frame. Shape is (num_instances, num_bodies, 3, 3).""" + """Body inertia ``wp.mat33`` in the world frame. Shape is (num_instances, num_bodies, 9).""" raise NotImplementedError @property diff --git a/source/isaaclab/isaaclab/assets/rigid_object/base_rigid_object_data.py b/source/isaaclab/isaaclab/assets/rigid_object/base_rigid_object_data.py index 849e648de57..f3a15b778c7 100644 --- a/source/isaaclab/isaaclab/assets/rigid_object/base_rigid_object_data.py +++ b/source/isaaclab/isaaclab/assets/rigid_object/base_rigid_object_data.py @@ -264,7 +264,7 @@ def body_com_pose_b(self) -> torch.Tensor: @property @abstractmethod def body_mass(self) -> torch.Tensor: - """Mass of all bodies in the simulation world frame. Shape is (num_instances, 1, 1).""" + """Mass of all bodies in the simulation world frame. Shape is (num_instances, 1).""" raise NotImplementedError() @property diff --git a/source/isaaclab/isaaclab/test/mock_interfaces/utils/__init__.py b/source/isaaclab/isaaclab/test/mock_interfaces/utils/__init__.py index b8cb3c38a10..474483b6098 100644 --- a/source/isaaclab/isaaclab/test/mock_interfaces/utils/__init__.py +++ b/source/isaaclab/isaaclab/test/mock_interfaces/utils/__init__.py @@ -11,4 +11,5 @@ """Utilities for creating and using mock interfaces.""" from .mock_generator import MockArticulationBuilder, MockSensorBuilder +from .mock_wrench_composer import MockWrenchComposer from .patching import patch_articulation, patch_sensor diff --git a/source/isaaclab/isaaclab/test/mock_interfaces/utils/mock_wrench_composer.py b/source/isaaclab/isaaclab/test/mock_interfaces/utils/mock_wrench_composer.py new file mode 100644 index 00000000000..4532f694420 --- /dev/null +++ b/source/isaaclab/isaaclab/test/mock_interfaces/utils/mock_wrench_composer.py @@ -0,0 +1,171 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Mock WrenchComposer for testing and benchmarking. + +This module provides a mock implementation of the WrenchComposer class that can be used +in testing and benchmarking without requiring the full simulation environment. +""" + +from __future__ import annotations + +from typing import TYPE_CHECKING + +import torch +import warp as wp + +if TYPE_CHECKING: + from isaaclab.assets import BaseArticulation, BaseRigidObject, BaseRigidObjectCollection + + +class MockWrenchComposer: + """Mock WrenchComposer for testing. + + This class provides a mock implementation of WrenchComposer that matches the real interface + but does not launch Warp kernels. It can be used for testing and benchmarking asset classes + without requiring the full simulation environment. + + The mock maintains simple buffers and sets the active flag when forces/torques are added, + but does not perform actual force composition computations. + """ + + def __init__(self, asset: BaseArticulation | BaseRigidObject | BaseRigidObjectCollection) -> None: + """Initialize the mock wrench composer. + + Args: + asset: Asset to use (Articulation, RigidObject, or RigidObjectCollection). + """ + self.num_envs = asset.num_instances + if hasattr(asset, "num_bodies"): + self.num_bodies = asset.num_bodies + else: + raise ValueError(f"Unsupported asset type: {asset.__class__.__name__}") + self.device = asset.device + self._asset = asset + self._active = False + + # Create buffers using Warp (matching real WrenchComposer) + self._composed_force_b = wp.zeros((self.num_envs, self.num_bodies), dtype=wp.vec3f, device=self.device) + self._composed_torque_b = wp.zeros((self.num_envs, self.num_bodies), dtype=wp.vec3f, device=self.device) + + # Create torch views (matching real WrenchComposer) + self._composed_force_b_torch = wp.to_torch(self._composed_force_b) + self._composed_torque_b_torch = wp.to_torch(self._composed_torque_b) + + # Create index arrays + self._ALL_ENV_INDICES_WP = wp.from_torch( + torch.arange(self.num_envs, dtype=torch.int32, device=self.device), dtype=wp.int32 + ) + self._ALL_BODY_INDICES_WP = wp.from_torch( + torch.arange(self.num_bodies, dtype=torch.int32, device=self.device), dtype=wp.int32 + ) + self._ALL_ENV_INDICES_TORCH = wp.to_torch(self._ALL_ENV_INDICES_WP) + self._ALL_BODY_INDICES_TORCH = wp.to_torch(self._ALL_BODY_INDICES_WP) + + @property + def active(self) -> bool: + """Whether the wrench composer is active.""" + return self._active + + @property + def composed_force(self) -> wp.array: + """Composed force at the body's link frame. + + Returns: + wp.array: Composed force at the body's link frame. (num_envs, num_bodies, 3) + """ + return self._composed_force_b + + @property + def composed_torque(self) -> wp.array: + """Composed torque at the body's link frame. + + Returns: + wp.array: Composed torque at the body's link frame. (num_envs, num_bodies, 3) + """ + return self._composed_torque_b + + @property + def composed_force_as_torch(self) -> torch.Tensor: + """Composed force at the body's link frame as torch tensor. + + Returns: + torch.Tensor: Composed force at the body's link frame. (num_envs, num_bodies, 3) + """ + return self._composed_force_b_torch + + @property + def composed_torque_as_torch(self) -> torch.Tensor: + """Composed torque at the body's link frame as torch tensor. + + Returns: + torch.Tensor: Composed torque at the body's link frame. (num_envs, num_bodies, 3) + """ + return self._composed_torque_b_torch + + def add_forces_and_torques( + self, + forces: wp.array | torch.Tensor | None = None, + torques: wp.array | torch.Tensor | None = None, + positions: wp.array | torch.Tensor | None = None, + body_ids: wp.array | torch.Tensor | None = None, + env_ids: wp.array | torch.Tensor | None = None, + is_global: bool = False, + ) -> None: + """Add forces and torques (mock - just sets active flag). + + Args: + forces: Forces. (num_envs, num_bodies, 3). Defaults to None. + torques: Torques. (num_envs, num_bodies, 3). Defaults to None. + positions: Positions. (num_envs, num_bodies, 3). Defaults to None. + body_ids: Body ids. Defaults to None (all bodies). + env_ids: Environment ids. Defaults to None (all environments). + is_global: Whether the forces and torques are applied in the global frame. Defaults to False. + """ + if forces is not None or torques is not None: + self._active = True + + def set_forces_and_torques( + self, + forces: wp.array | torch.Tensor | None = None, + torques: wp.array | torch.Tensor | None = None, + positions: wp.array | torch.Tensor | None = None, + body_ids: wp.array | torch.Tensor | None = None, + env_ids: wp.array | torch.Tensor | None = None, + is_global: bool = False, + ) -> None: + """Set forces and torques (mock - just sets active flag). + + Args: + forces: Forces. (num_envs, num_bodies, 3). Defaults to None. + torques: Torques. (num_envs, num_bodies, 3). Defaults to None. + positions: Positions. (num_envs, num_bodies, 3). Defaults to None. + body_ids: Body ids. Defaults to None (all bodies). + env_ids: Environment ids. Defaults to None (all environments). + is_global: Whether the forces and torques are applied in the global frame. Defaults to False. + """ + if forces is not None or torques is not None: + self._active = True + + def reset(self, env_ids: wp.array | torch.Tensor | None = None) -> None: + """Reset the composed force and torque. + + Args: + env_ids: Environment ids to reset. Defaults to None (all environments). + """ + if env_ids is None: + self._composed_force_b.zero_() + self._composed_torque_b.zero_() + self._active = False + else: + # For partial reset, just zero the specified environments + if isinstance(env_ids, torch.Tensor): + indices = wp.from_torch(env_ids.to(torch.int32), dtype=wp.int32) + elif isinstance(env_ids, list): + indices = wp.array(env_ids, dtype=wp.int32, device=self.device) + else: + indices = env_ids + self._composed_force_b[indices].zero_() + self._composed_torque_b[indices].zero_() diff --git a/source/isaaclab_physx/isaaclab_physx/assets/articulation/articulation.py b/source/isaaclab_physx/isaaclab_physx/assets/articulation/articulation.py index ef4cbc38aa2..c310dcfd4c3 100644 --- a/source/isaaclab_physx/isaaclab_physx/assets/articulation/articulation.py +++ b/source/isaaclab_physx/isaaclab_physx/assets/articulation/articulation.py @@ -16,13 +16,12 @@ import warp as wp from prettytable import PrettyTable -import omni.physics.tensors.impl.api as physx from isaacsim.core.simulation_manager import SimulationManager from pxr import PhysxSchema, UsdPhysics -import isaaclab.sim as sim_utils +from isaaclab.sim.utils.queries import find_first_matching_prim, get_all_matching_child_prims import isaaclab.utils.math as math_utils -import isaaclab.utils.string as string_utils +from isaaclab.utils.string import resolve_matching_names from isaaclab.actuators import ActuatorBase, ActuatorBaseCfg, ImplicitActuator from isaaclab.assets.articulation.base_articulation import BaseArticulation from isaaclab.utils.types import ArticulationActions @@ -33,6 +32,7 @@ if TYPE_CHECKING: from isaaclab.assets.articulation.articulation_cfg import ArticulationCfg + import omni.physics.tensors.impl.api as physx # import logger logger = logging.getLogger(__name__) @@ -277,7 +277,7 @@ def find_bodies(self, name_keys: str | Sequence[str], preserve_order: bool = Fal Returns: A tuple of lists containing the body indices and names. """ - return string_utils.resolve_matching_names(name_keys, self.body_names, preserve_order) + return resolve_matching_names(name_keys, self.body_names, preserve_order) def find_joints( self, name_keys: str | Sequence[str], joint_subset: list[str] | None = None, preserve_order: bool = False @@ -299,7 +299,7 @@ def find_joints( if joint_subset is None: joint_subset = self.joint_names # find joints - return string_utils.resolve_matching_names(name_keys, joint_subset, preserve_order) + return resolve_matching_names(name_keys, joint_subset, preserve_order) def find_fixed_tendons( self, name_keys: str | Sequence[str], tendon_subsets: list[str] | None = None, preserve_order: bool = False @@ -323,7 +323,7 @@ def find_fixed_tendons( # tendons follow the joint names they are attached to tendon_subsets = self.fixed_tendon_names # find tendons - return string_utils.resolve_matching_names(name_keys, tendon_subsets, preserve_order) + return resolve_matching_names(name_keys, tendon_subsets, preserve_order) def find_spatial_tendons( self, name_keys: str | Sequence[str], tendon_subsets: list[str] | None = None, preserve_order: bool = False @@ -345,7 +345,7 @@ def find_spatial_tendons( if tendon_subsets is None: tendon_subsets = self.spatial_tendon_names # find tendons - return string_utils.resolve_matching_names(name_keys, tendon_subsets, preserve_order) + return resolve_matching_names(name_keys, tendon_subsets, preserve_order) """ Operations - State Writers. @@ -591,6 +591,9 @@ def write_joint_position_to_sim( physx_env_ids = self._ALL_INDICES if joint_ids is None: joint_ids = slice(None) + # convert lists to tensors for proper indexing + if isinstance(env_ids, list): + env_ids = torch.tensor(env_ids, dtype=torch.long, device=self.device) # broadcast env_ids if needed to allow double indexing if env_ids != slice(None) and joint_ids != slice(None): env_ids = env_ids[:, None] @@ -629,6 +632,9 @@ def write_joint_velocity_to_sim( physx_env_ids = self._ALL_INDICES if joint_ids is None: joint_ids = slice(None) + # convert lists to tensors for proper indexing + if isinstance(env_ids, list): + env_ids = torch.tensor(env_ids, dtype=torch.long, device=self.device) # broadcast env_ids if needed to allow double indexing if env_ids != slice(None) and joint_ids != slice(None): env_ids = env_ids[:, None] @@ -664,6 +670,11 @@ def write_joint_stiffness_to_sim( physx_env_ids = self._ALL_INDICES if joint_ids is None: joint_ids = slice(None) + # convert lists to tensors for proper indexing + if isinstance(env_ids, list): + env_ids = torch.tensor(env_ids, dtype=torch.long, device=self.device) + if isinstance(physx_env_ids, list): + physx_env_ids = torch.tensor(physx_env_ids, dtype=torch.long, device=self.device) # broadcast env_ids if needed to allow double indexing if env_ids != slice(None) and joint_ids != slice(None): env_ids = env_ids[:, None] @@ -693,6 +704,11 @@ def write_joint_damping_to_sim( physx_env_ids = self._ALL_INDICES if joint_ids is None: joint_ids = slice(None) + # convert lists to tensors for proper indexing + if isinstance(env_ids, list): + env_ids = torch.tensor(env_ids, dtype=torch.long, device=self.device) + if isinstance(physx_env_ids, list): + physx_env_ids = torch.tensor(physx_env_ids, dtype=torch.long, device=self.device) # broadcast env_ids if needed to allow double indexing if env_ids != slice(None) and joint_ids != slice(None): env_ids = env_ids[:, None] @@ -725,6 +741,11 @@ def write_joint_position_limit_to_sim( physx_env_ids = self._ALL_INDICES if joint_ids is None: joint_ids = slice(None) + # convert lists to tensors for proper indexing + if isinstance(env_ids, list): + env_ids = torch.tensor(env_ids, dtype=torch.long, device=self.device) + if isinstance(physx_env_ids, list): + physx_env_ids = torch.tensor(physx_env_ids, dtype=torch.long, device=self.device) # broadcast env_ids if needed to allow double indexing if env_ids != slice(None) and joint_ids != slice(None): env_ids = env_ids[:, None] @@ -785,6 +806,11 @@ def write_joint_velocity_limit_to_sim( physx_env_ids = self._ALL_INDICES if joint_ids is None: joint_ids = slice(None) + # convert lists to tensors for proper indexing + if isinstance(env_ids, list): + env_ids = torch.tensor(env_ids, dtype=torch.long, device=self.device) + if isinstance(physx_env_ids, list): + physx_env_ids = torch.tensor(physx_env_ids, dtype=torch.long, device=self.device) # broadcast env_ids if needed to allow double indexing if env_ids != slice(None) and joint_ids != slice(None): env_ids = env_ids[:, None] @@ -820,6 +846,11 @@ def write_joint_effort_limit_to_sim( physx_env_ids = self._ALL_INDICES if joint_ids is None: joint_ids = slice(None) + # convert lists to tensors for proper indexing + if isinstance(env_ids, list): + env_ids = torch.tensor(env_ids, dtype=torch.long, device=self.device) + if isinstance(physx_env_ids, list): + physx_env_ids = torch.tensor(physx_env_ids, dtype=torch.long, device=self.device) # broadcast env_ids if needed to allow double indexing if env_ids != slice(None) and joint_ids != slice(None): env_ids = env_ids[:, None] @@ -854,6 +885,11 @@ def write_joint_armature_to_sim( physx_env_ids = self._ALL_INDICES if joint_ids is None: joint_ids = slice(None) + # convert lists to tensors for proper indexing + if isinstance(env_ids, list): + env_ids = torch.tensor(env_ids, dtype=torch.long, device=self.device) + if isinstance(physx_env_ids, list): + physx_env_ids = torch.tensor(physx_env_ids, dtype=torch.long, device=self.device) # broadcast env_ids if needed to allow double indexing if env_ids != slice(None) and joint_ids != slice(None): env_ids = env_ids[:, None] @@ -900,6 +936,11 @@ def write_joint_friction_coefficient_to_sim( physx_env_ids = self._ALL_INDICES if joint_ids is None: joint_ids = slice(None) + # convert lists to tensors for proper indexing + if isinstance(env_ids, list): + env_ids = torch.tensor(env_ids, dtype=torch.long, device=self.device) + if isinstance(physx_env_ids, list): + physx_env_ids = torch.tensor(physx_env_ids, dtype=torch.long, device=self.device) # broadcast env_ids if needed to allow double indexing if env_ids != slice(None) and joint_ids != slice(None): env_ids = env_ids[:, None] @@ -962,6 +1003,11 @@ def write_joint_dynamic_friction_coefficient_to_sim( physx_env_ids = self._ALL_INDICES if joint_ids is None: joint_ids = slice(None) + # convert lists to tensors for proper indexing + if isinstance(env_ids, list): + env_ids = torch.tensor(env_ids, dtype=torch.long, device=self.device) + if isinstance(physx_env_ids, list): + physx_env_ids = torch.tensor(physx_env_ids, dtype=torch.long, device=self.device) # broadcast env_ids if needed to allow double indexing if env_ids != slice(None) and joint_ids != slice(None): env_ids = env_ids[:, None] @@ -996,6 +1042,11 @@ def write_joint_viscous_friction_coefficient_to_sim( physx_env_ids = self._ALL_INDICES if joint_ids is None: joint_ids = slice(None) + # convert lists to tensors for proper indexing + if isinstance(env_ids, list): + env_ids = torch.tensor(env_ids, dtype=torch.long, device=self.device) + if isinstance(physx_env_ids, list): + physx_env_ids = torch.tensor(physx_env_ids, dtype=torch.long, device=self.device) # broadcast env_ids if needed to allow double indexing if env_ids != slice(None) and joint_ids != slice(None): env_ids = env_ids[:, None] @@ -1030,6 +1081,11 @@ def set_masses( physx_env_ids = self._ALL_INDICES if body_ids is None: body_ids = slice(None) + # convert lists to tensors for proper indexing + if isinstance(env_ids, list): + env_ids = torch.tensor(env_ids, dtype=torch.long, device=self.device) + if isinstance(physx_env_ids, list): + physx_env_ids = torch.tensor(physx_env_ids, dtype=torch.long, device=self.device) # broadcast env_ids if needed to allow double indexing if env_ids != slice(None) and body_ids != slice(None): env_ids = env_ids[:, None] @@ -1058,6 +1114,11 @@ def set_coms( physx_env_ids = self._ALL_INDICES if body_ids is None: body_ids = slice(None) + # convert lists to tensors for proper indexing + if isinstance(env_ids, list): + env_ids = torch.tensor(env_ids, dtype=torch.long, device=self.device) + if isinstance(physx_env_ids, list): + physx_env_ids = torch.tensor(physx_env_ids, dtype=torch.long, device=self.device) # broadcast env_ids if needed to allow double indexing if env_ids != slice(None) and body_ids != slice(None): env_ids = env_ids[:, None] @@ -1086,6 +1147,11 @@ def set_inertias( physx_env_ids = self._ALL_INDICES if body_ids is None: body_ids = slice(None) + # convert lists to tensors for proper indexing + if isinstance(env_ids, list): + env_ids = torch.tensor(env_ids, dtype=torch.long, device=self.device) + if isinstance(physx_env_ids, list): + physx_env_ids = torch.tensor(physx_env_ids, dtype=torch.long, device=self.device) # broadcast env_ids if needed to allow double indexing if env_ids != slice(None) and body_ids != slice(None): env_ids = env_ids[:, None] @@ -1203,6 +1269,9 @@ def set_joint_position_target( env_ids = slice(None) if joint_ids is None: joint_ids = slice(None) + # convert lists to tensors for proper indexing + if isinstance(env_ids, list): + env_ids = torch.tensor(env_ids, dtype=torch.long, device=self.device) # broadcast env_ids if needed to allow double indexing if env_ids != slice(None) and joint_ids != slice(None): env_ids = env_ids[:, None] @@ -1230,6 +1299,9 @@ def set_joint_velocity_target( env_ids = slice(None) if joint_ids is None: joint_ids = slice(None) + # convert lists to tensors for proper indexing + if isinstance(env_ids, list): + env_ids = torch.tensor(env_ids, dtype=torch.long, device=self.device) # broadcast env_ids if needed to allow double indexing if env_ids != slice(None) and joint_ids != slice(None): env_ids = env_ids[:, None] @@ -1257,6 +1329,9 @@ def set_joint_effort_target( env_ids = slice(None) if joint_ids is None: joint_ids = slice(None) + # convert lists to tensors for proper indexing + if isinstance(env_ids, list): + env_ids = torch.tensor(env_ids, dtype=torch.long, device=self.device) # broadcast env_ids if needed to allow double indexing if env_ids != slice(None) and joint_ids != slice(None): env_ids = env_ids[:, None] @@ -1289,6 +1364,9 @@ def set_fixed_tendon_stiffness( env_ids = slice(None) if fixed_tendon_ids is None: fixed_tendon_ids = slice(None) + # convert lists to tensors for proper indexing + if isinstance(env_ids, list): + env_ids = torch.tensor(env_ids, dtype=torch.long, device=self.device) if env_ids != slice(None) and fixed_tendon_ids != slice(None): env_ids = env_ids[:, None] # set stiffness @@ -1315,6 +1393,9 @@ def set_fixed_tendon_damping( env_ids = slice(None) if fixed_tendon_ids is None: fixed_tendon_ids = slice(None) + # convert lists to tensors for proper indexing + if isinstance(env_ids, list): + env_ids = torch.tensor(env_ids, dtype=torch.long, device=self.device) if env_ids != slice(None) and fixed_tendon_ids != slice(None): env_ids = env_ids[:, None] # set damping @@ -1342,6 +1423,9 @@ def set_fixed_tendon_limit_stiffness( env_ids = slice(None) if fixed_tendon_ids is None: fixed_tendon_ids = slice(None) + # convert lists to tensors for proper indexing + if isinstance(env_ids, list): + env_ids = torch.tensor(env_ids, dtype=torch.long, device=self.device) if env_ids != slice(None) and fixed_tendon_ids != slice(None): env_ids = env_ids[:, None] # set limit_stiffness @@ -1368,6 +1452,9 @@ def set_fixed_tendon_position_limit( env_ids = slice(None) if fixed_tendon_ids is None: fixed_tendon_ids = slice(None) + # convert lists to tensors for proper indexing + if isinstance(env_ids, list): + env_ids = torch.tensor(env_ids, dtype=torch.long, device=self.device) if env_ids != slice(None) and fixed_tendon_ids != slice(None): env_ids = env_ids[:, None] # set limit @@ -1395,6 +1482,9 @@ def set_fixed_tendon_rest_length( env_ids = slice(None) if fixed_tendon_ids is None: fixed_tendon_ids = slice(None) + # convert lists to tensors for proper indexing + if isinstance(env_ids, list): + env_ids = torch.tensor(env_ids, dtype=torch.long, device=self.device) if env_ids != slice(None) and fixed_tendon_ids != slice(None): env_ids = env_ids[:, None] # set rest_length @@ -1421,6 +1511,9 @@ def set_fixed_tendon_offset( env_ids = slice(None) if fixed_tendon_ids is None: fixed_tendon_ids = slice(None) + # convert lists to tensors for proper indexing + if isinstance(env_ids, list): + env_ids = torch.tensor(env_ids, dtype=torch.long, device=self.device) if env_ids != slice(None) and fixed_tendon_ids != slice(None): env_ids = env_ids[:, None] # set offset @@ -1477,6 +1570,9 @@ def set_spatial_tendon_stiffness( env_ids = slice(None) if spatial_tendon_ids is None: spatial_tendon_ids = slice(None) + # convert lists to tensors for proper indexing + if isinstance(env_ids, list): + env_ids = torch.tensor(env_ids, dtype=torch.long, device=self.device) if env_ids != slice(None) and spatial_tendon_ids != slice(None): env_ids = env_ids[:, None] # set stiffness @@ -1506,6 +1602,9 @@ def set_spatial_tendon_damping( env_ids = slice(None) if spatial_tendon_ids is None: spatial_tendon_ids = slice(None) + # convert lists to tensors for proper indexing + if isinstance(env_ids, list): + env_ids = torch.tensor(env_ids, dtype=torch.long, device=self.device) if env_ids != slice(None) and spatial_tendon_ids != slice(None): env_ids = env_ids[:, None] # set damping @@ -1534,6 +1633,9 @@ def set_spatial_tendon_limit_stiffness( env_ids = slice(None) if spatial_tendon_ids is None: spatial_tendon_ids = slice(None) + # convert lists to tensors for proper indexing + if isinstance(env_ids, list): + env_ids = torch.tensor(env_ids, dtype=torch.long, device=self.device) if env_ids != slice(None) and spatial_tendon_ids != slice(None): env_ids = env_ids[:, None] # set limit stiffness @@ -1561,6 +1663,9 @@ def set_spatial_tendon_offset( env_ids = slice(None) if spatial_tendon_ids is None: spatial_tendon_ids = slice(None) + # convert lists to tensors for proper indexing + if isinstance(env_ids, list): + env_ids = torch.tensor(env_ids, dtype=torch.long, device=self.device) if env_ids != slice(None) and spatial_tendon_ids != slice(None): env_ids = env_ids[:, None] # set offset @@ -1610,13 +1715,13 @@ def _initialize_impl(self): # No articulation root prim path was specified, so we need to search # for it. We search for this in the first environment and then # create a regex that matches all environments. - first_env_matching_prim = sim_utils.find_first_matching_prim(self.cfg.prim_path) + first_env_matching_prim = find_first_matching_prim(self.cfg.prim_path) if first_env_matching_prim is None: raise RuntimeError(f"Failed to find prim for expression: '{self.cfg.prim_path}'.") first_env_matching_prim_path = first_env_matching_prim.GetPath().pathString # Find all articulation root prims in the first environment. - first_env_root_prims = sim_utils.get_all_matching_child_prims( + first_env_root_prims = get_all_matching_child_prims( first_env_matching_prim_path, predicate=lambda prim: prim.HasAPI(UsdPhysics.ArticulationRootAPI), traverse_instance_prims=False, diff --git a/source/isaaclab_physx/isaaclab_physx/assets/articulation/articulation_data.py b/source/isaaclab_physx/isaaclab_physx/assets/articulation/articulation_data.py index ff76c2adacc..db78c19e62c 100644 --- a/source/isaaclab_physx/isaaclab_physx/assets/articulation/articulation_data.py +++ b/source/isaaclab_physx/isaaclab_physx/assets/articulation/articulation_data.py @@ -2,19 +2,23 @@ # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause +from __future__ import annotations import logging import weakref +from typing import TYPE_CHECKING import torch -import omni.physics.tensors.impl.api as physx from isaacsim.core.simulation_manager import SimulationManager -import isaaclab.utils.math as math_utils +from isaaclab.utils.math import quat_apply, quat_apply_inverse, convert_quat, combine_frame_transforms, normalize from isaaclab.assets.articulation.base_articulation_data import BaseArticulationData from isaaclab.utils.buffers import TimestampedBuffer +if TYPE_CHECKING: + from isaaclab.assets.articulation.articulation_view import ArticulationView + # import logger logger = logging.getLogger(__name__) @@ -40,7 +44,7 @@ class ArticulationData(BaseArticulationData): __backend_name__: str = "physx" """The name of the backend for the articulation data.""" - def __init__(self, root_view: physx.ArticulationView, device: str): + def __init__(self, root_view: ArticulationView, device: str): """Initializes the articulation data. Args: @@ -51,7 +55,7 @@ def __init__(self, root_view: physx.ArticulationView, device: str): # Set the root articulation view # note: this is stored as a weak reference to avoid circular references between the asset class # and the data container. This is important to avoid memory leaks. - self._root_view: physx.ArticulationView = weakref.proxy(root_view) + self._root_view: ArticulationView = weakref.proxy(root_view) # Set initial time stamp self._sim_timestamp = 0.0 @@ -62,7 +66,7 @@ def __init__(self, root_view: physx.ArticulationView, device: str): gravity = self._physics_sim_view.get_gravity() # Convert to direction vector gravity_dir = torch.tensor((gravity[0], gravity[1], gravity[2]), device=self.device) - gravity_dir = math_utils.normalize(gravity_dir.unsqueeze(0)).squeeze(0) + gravity_dir = normalize(gravity_dir.unsqueeze(0)).squeeze(0) # Initialize constants self.GRAVITY_VEC_W = gravity_dir.repeat(self._root_view.count, 1) @@ -473,7 +477,7 @@ def root_link_pose_w(self) -> torch.Tensor: if self._root_link_pose_w.timestamp < self._sim_timestamp: # read data from simulation pose = self._root_view.get_root_transforms().clone() - pose[:, 3:7] = math_utils.convert_quat(pose[:, 3:7], to="wxyz") + pose[:, 3:7] = convert_quat(pose[:, 3:7], to="wxyz") # set the buffer data and timestamp self._root_link_pose_w.data = pose self._root_link_pose_w.timestamp = self._sim_timestamp @@ -492,7 +496,7 @@ def root_link_vel_w(self) -> torch.Tensor: vel = self.root_com_vel_w.clone() # adjust linear velocity to link from center of mass vel[:, :3] += torch.linalg.cross( - vel[:, 3:], math_utils.quat_apply(self.root_link_quat_w, -self.body_com_pos_b[:, 0]), dim=-1 + vel[:, 3:], quat_apply(self.root_link_quat_w, -self.body_com_pos_b[:, 0]), dim=-1 ) # set the buffer data and timestamp self._root_link_vel_w.data = vel @@ -509,7 +513,7 @@ def root_com_pose_w(self) -> torch.Tensor: """ if self._root_com_pose_w.timestamp < self._sim_timestamp: # apply local transform to center of mass frame - pos, quat = math_utils.combine_frame_transforms( + pos, quat = combine_frame_transforms( self.root_link_pos_w, self.root_link_quat_w, self.body_com_pos_b[:, 0], self.body_com_quat_b[:, 0] ) # set the buffer data and timestamp @@ -598,7 +602,7 @@ def body_link_pose_w(self) -> torch.Tensor: self._physics_sim_view.update_articulations_kinematic() # read data from simulation poses = self._root_view.get_link_transforms().clone() - poses[..., 3:7] = math_utils.convert_quat(poses[..., 3:7], to="wxyz") + poses[..., 3:7] = convert_quat(poses[..., 3:7], to="wxyz") # set the buffer data and timestamp self._body_link_pose_w.data = poses self._body_link_pose_w.timestamp = self._sim_timestamp @@ -618,7 +622,7 @@ def body_link_vel_w(self) -> torch.Tensor: velocities = self.body_com_vel_w.clone() # adjust linear velocity to link from center of mass velocities[..., :3] += torch.linalg.cross( - velocities[..., 3:], math_utils.quat_apply(self.body_link_quat_w, -self.body_com_pos_b), dim=-1 + velocities[..., 3:], quat_apply(self.body_link_quat_w, -self.body_com_pos_b), dim=-1 ) # set the buffer data and timestamp self._body_link_vel_w.data = velocities @@ -636,7 +640,7 @@ def body_com_pose_w(self) -> torch.Tensor: """ if self._body_com_pose_w.timestamp < self._sim_timestamp: # apply local transform to center of mass frame - pos, quat = math_utils.combine_frame_transforms( + pos, quat = combine_frame_transforms( self.body_link_pos_w, self.body_link_quat_w, self.body_com_pos_b, self.body_com_quat_b ) # set the buffer data and timestamp @@ -726,7 +730,7 @@ def body_com_pose_b(self) -> torch.Tensor: if self._body_com_pose_b.timestamp < self._sim_timestamp: # read data from simulation pose = self._root_view.get_coms().to(self.device) - pose[..., 3:7] = math_utils.convert_quat(pose[..., 3:7], to="wxyz") + pose[..., 3:7] = convert_quat(pose[..., 3:7], to="wxyz") # set the buffer data and timestamp self._body_com_pose_b.data = pose self._body_com_pose_b.timestamp = self._sim_timestamp @@ -793,7 +797,7 @@ def joint_acc(self) -> torch.Tensor: @property def projected_gravity_b(self): """Projection of the gravity direction on base frame. Shape is (num_instances, 3).""" - return math_utils.quat_apply_inverse(self.root_link_quat_w, self.GRAVITY_VEC_W) + return quat_apply_inverse(self.root_link_quat_w, self.GRAVITY_VEC_W) @property def heading_w(self): @@ -803,7 +807,7 @@ def heading_w(self): This quantity is computed by assuming that the forward-direction of the base frame is along x-direction, i.e. :math:`(1, 0, 0)`. """ - forward_w = math_utils.quat_apply(self.root_link_quat_w, self.FORWARD_VEC_B) + forward_w = quat_apply(self.root_link_quat_w, self.FORWARD_VEC_B) return torch.atan2(forward_w[:, 1], forward_w[:, 0]) @property @@ -813,7 +817,7 @@ def root_link_lin_vel_b(self) -> torch.Tensor: This quantity is the linear velocity of the articulation root's actor frame with respect to the its actor frame. """ - return math_utils.quat_apply_inverse(self.root_link_quat_w, self.root_link_lin_vel_w) + return quat_apply_inverse(self.root_link_quat_w, self.root_link_lin_vel_w) @property def root_link_ang_vel_b(self) -> torch.Tensor: @@ -822,7 +826,7 @@ def root_link_ang_vel_b(self) -> torch.Tensor: This quantity is the angular velocity of the articulation root's actor frame with respect to the its actor frame. """ - return math_utils.quat_apply_inverse(self.root_link_quat_w, self.root_link_ang_vel_w) + return quat_apply_inverse(self.root_link_quat_w, self.root_link_ang_vel_w) @property def root_com_lin_vel_b(self) -> torch.Tensor: @@ -831,7 +835,7 @@ def root_com_lin_vel_b(self) -> torch.Tensor: This quantity is the linear velocity of the articulation root's center of mass frame with respect to the its actor frame. """ - return math_utils.quat_apply_inverse(self.root_link_quat_w, self.root_com_lin_vel_w) + return quat_apply_inverse(self.root_link_quat_w, self.root_com_lin_vel_w) @property def root_com_ang_vel_b(self) -> torch.Tensor: @@ -840,7 +844,7 @@ def root_com_ang_vel_b(self) -> torch.Tensor: This quantity is the angular velocity of the articulation root's center of mass frame with respect to the its actor frame. """ - return math_utils.quat_apply_inverse(self.root_link_quat_w, self.root_com_ang_vel_w) + return quat_apply_inverse(self.root_link_quat_w, self.root_com_ang_vel_w) """ Sliced properties. diff --git a/source/isaaclab_physx/isaaclab_physx/assets/rigid_object/rigid_object.py b/source/isaaclab_physx/isaaclab_physx/assets/rigid_object/rigid_object.py index 8ed484a1914..9e6fcbe9176 100644 --- a/source/isaaclab_physx/isaaclab_physx/assets/rigid_object/rigid_object.py +++ b/source/isaaclab_physx/isaaclab_physx/assets/rigid_object/rigid_object.py @@ -430,15 +430,14 @@ def set_masses( if env_ids is None: env_ids = slice(None) physx_env_ids = self._ALL_INDICES - if body_ids is None: - body_ids = slice(None) - # broadcast env_ids if needed to allow double indexing - if env_ids != slice(None) and body_ids != slice(None): - env_ids = env_ids[:, None] + # convert lists to tensors for proper indexing + if isinstance(physx_env_ids, list): + physx_env_ids = torch.tensor(physx_env_ids, dtype=torch.long, device=self.device) # set into internal buffers - self.data.body_mass[env_ids, body_ids] = masses + # _body_mass shape from view is (N, 1), masses input shape is (N, B) where B=1 for RigidObject + self.data._body_mass[env_ids] = masses.reshape(-1, 1) # set into simulation - self.root_view.set_masses(self.data.body_mass.cpu(), indices=physx_env_ids.cpu()) + self.root_view.set_masses(self.data._body_mass.cpu(), indices=physx_env_ids.cpu()) def set_coms( self, @@ -461,13 +460,19 @@ def set_coms( physx_env_ids = self._ALL_INDICES if body_ids is None: body_ids = slice(None) + # convert lists to tensors for proper indexing + if isinstance(physx_env_ids, list): + physx_env_ids = torch.tensor(physx_env_ids, dtype=torch.long, device=self.device) # broadcast env_ids if needed to allow double indexing if env_ids != slice(None) and body_ids != slice(None): env_ids = env_ids[:, None] # set into internal buffers - self.data.body_com_pose_b[env_ids, body_ids] = coms + # body_com_pose_b shape is (N, B, 7) where [:,:,:3] is position and [:,:,3:7] is quaternion + # coms input shape is (N, B, 3) - only setting the position part + self.data.body_com_pose_b[env_ids, body_ids, :3] = coms # set into simulation - self.root_view.set_coms(self.data.body_com_pose_b.cpu(), indices=physx_env_ids.cpu()) + # RigidBodyView expects (N, 7) but body_com_pose_b is (N, 1, 7), squeeze the body dim + self.root_view.set_coms(self.data.body_com_pose_b.squeeze(1).cpu(), indices=physx_env_ids.cpu()) def set_inertias( self, @@ -487,15 +492,15 @@ def set_inertias( if env_ids is None: env_ids = slice(None) physx_env_ids = self._ALL_INDICES - if body_ids is None: - body_ids = slice(None) - # broadcast env_ids if needed to allow double indexing - if env_ids != slice(None) and body_ids != slice(None): - env_ids = env_ids[:, None] + # convert lists to tensors for proper indexing + if isinstance(physx_env_ids, list): + physx_env_ids = torch.tensor(physx_env_ids, dtype=torch.long, device=self.device) # set into internal buffers - self.data.body_inertia[env_ids, body_ids] = inertias + # _body_inertia shape from view is (N, 9) - flattened 3x3 matrices + # inertias input shape is (N, B, 3, 3) where B=1 for RigidObject, flatten to (N, 9) + self.data._body_inertia[env_ids] = inertias.reshape(-1, 9) # set into simulation - self.root_view.set_inertias(self.data.body_inertia.cpu(), indices=physx_env_ids.cpu()) + self.root_view.set_inertias(self.data._body_inertia.cpu(), indices=physx_env_ids.cpu()) def set_external_force_and_torque( self, diff --git a/source/isaaclab_physx/isaaclab_physx/assets/rigid_object/rigid_object_data.py b/source/isaaclab_physx/isaaclab_physx/assets/rigid_object/rigid_object_data.py index f5b104e5ae0..645631efe24 100644 --- a/source/isaaclab_physx/isaaclab_physx/assets/rigid_object/rigid_object_data.py +++ b/source/isaaclab_physx/isaaclab_physx/assets/rigid_object/rigid_object_data.py @@ -2,19 +2,29 @@ # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause +from __future__ import annotations import logging import weakref +from typing import TYPE_CHECKING import torch -import omni.physics.tensors.impl.api as physx +from isaacsim.core.simulation_manager import SimulationManager -import isaaclab.utils.math as math_utils +from isaaclab.utils.math import ( + combine_frame_transforms, + convert_quat, + normalize, + quat_apply, + quat_apply_inverse, +) from isaaclab.assets.rigid_object.base_rigid_object_data import BaseRigidObjectData -from isaaclab.sim.utils.stage import get_current_stage_id from isaaclab.utils.buffers import TimestampedBuffer +if TYPE_CHECKING: + from isaaclab.assets.rigid_object.rigid_object_view import RigidObjectView + # import logger logger = logging.getLogger(__name__) @@ -43,7 +53,7 @@ class RigidObjectData(BaseRigidObjectData): __backend_name__: str = "physx" """The name of the backend for the rigid object data.""" - def __init__(self, root_view: physx.RigidBodyView, device: str): + def __init__(self, root_view: RigidObjectView, device: str): """Initializes the rigid object data. Args: @@ -54,20 +64,18 @@ def __init__(self, root_view: physx.RigidBodyView, device: str): # Set the root rigid body view # note: this is stored as a weak reference to avoid circular references between the asset class # and the data container. This is important to avoid memory leaks. - self._root_view: physx.RigidBodyView = weakref.proxy(root_view) + self._root_view: RigidObjectView = weakref.proxy(root_view) # Set initial time stamp self._sim_timestamp = 0.0 self._is_primed = False # Obtain global physics sim view - stage_id = get_current_stage_id() - physics_sim_view = physx.create_simulation_view("torch", stage_id) - physics_sim_view.set_subspace_roots("/") - gravity = physics_sim_view.get_gravity() + self._physics_sim_view = SimulationManager.get_physics_sim_view() + gravity = self._physics_sim_view.get_gravity() # Convert to direction vector gravity_dir = torch.tensor((gravity[0], gravity[1], gravity[2]), device=self.device) - gravity_dir = math_utils.normalize(gravity_dir.unsqueeze(0)).squeeze(0) + gravity_dir = normalize(gravity_dir.unsqueeze(0)).squeeze(0) # Initialize constants self.GRAVITY_VEC_W = gravity_dir.repeat(self._root_view.count, 1) @@ -206,7 +214,7 @@ def root_link_pose_w(self) -> torch.Tensor: if self._root_link_pose_w.timestamp < self._sim_timestamp: # read data from simulation pose = self._root_view.get_transforms().clone() - pose[:, 3:7] = math_utils.convert_quat(pose[:, 3:7], to="wxyz") + pose[:, 3:7] = convert_quat(pose[:, 3:7], to="wxyz") # set the buffer data and timestamp self._root_link_pose_w.data = pose self._root_link_pose_w.timestamp = self._sim_timestamp @@ -225,7 +233,7 @@ def root_link_vel_w(self) -> torch.Tensor: vel = self.root_com_vel_w.clone() # adjust linear velocity to link from center of mass vel[:, :3] += torch.linalg.cross( - vel[:, 3:], math_utils.quat_apply(self.root_link_quat_w, -self.body_com_pos_b[:, 0]), dim=-1 + vel[:, 3:], quat_apply(self.root_link_quat_w, -self.body_com_pos_b[:, 0]), dim=-1 ) # set the buffer data and timestamp self._root_link_vel_w.data = vel @@ -242,7 +250,7 @@ def root_com_pose_w(self) -> torch.Tensor: """ if self._root_com_pose_w.timestamp < self._sim_timestamp: # apply local transform to center of mass frame - pos, quat = math_utils.combine_frame_transforms( + pos, quat = combine_frame_transforms( self.root_link_pos_w, self.root_link_quat_w, self.body_com_pos_b[:, 0], self.body_com_quat_b[:, 0] ) # set the buffer data and timestamp @@ -410,7 +418,7 @@ def body_com_pose_b(self) -> torch.Tensor: if self._body_com_pose_b.timestamp < self._sim_timestamp: # read data from simulation pose = self._root_view.get_coms().to(self.device) - pose[:, 3:7] = math_utils.convert_quat(pose[:, 3:7], to="wxyz") + pose[:, 3:7] = convert_quat(pose[:, 3:7], to="wxyz") # set the buffer data and timestamp self._body_com_pose_b.data = pose.view(-1, 1, 7) self._body_com_pose_b.timestamp = self._sim_timestamp @@ -424,7 +432,7 @@ def body_com_pose_b(self) -> torch.Tensor: @property def projected_gravity_b(self) -> torch.Tensor: """Projection of the gravity direction on base frame. Shape is (num_instances, 3).""" - return math_utils.quat_apply_inverse(self.root_link_quat_w, self.GRAVITY_VEC_W) + return quat_apply_inverse(self.root_link_quat_w, self.GRAVITY_VEC_W) @property def heading_w(self) -> torch.Tensor: @@ -434,7 +442,7 @@ def heading_w(self) -> torch.Tensor: This quantity is computed by assuming that the forward-direction of the base frame is along x-direction, i.e. :math:`(1, 0, 0)`. """ - forward_w = math_utils.quat_apply(self.root_link_quat_w, self.FORWARD_VEC_B) + forward_w = quat_apply(self.root_link_quat_w, self.FORWARD_VEC_B) return torch.atan2(forward_w[:, 1], forward_w[:, 0]) @property @@ -444,7 +452,7 @@ def root_link_lin_vel_b(self) -> torch.Tensor: This quantity is the linear velocity of the actor frame of the root rigid body frame with respect to the rigid body's actor frame. """ - return math_utils.quat_apply_inverse(self.root_link_quat_w, self.root_link_lin_vel_w) + return quat_apply_inverse(self.root_link_quat_w, self.root_link_lin_vel_w) @property def root_link_ang_vel_b(self) -> torch.Tensor: @@ -453,7 +461,7 @@ def root_link_ang_vel_b(self) -> torch.Tensor: This quantity is the angular velocity of the actor frame of the root rigid body frame with respect to the rigid body's actor frame. """ - return math_utils.quat_apply_inverse(self.root_link_quat_w, self.root_link_ang_vel_w) + return quat_apply_inverse(self.root_link_quat_w, self.root_link_ang_vel_w) @property def root_com_lin_vel_b(self) -> torch.Tensor: @@ -462,7 +470,7 @@ def root_com_lin_vel_b(self) -> torch.Tensor: This quantity is the linear velocity of the root rigid body's center of mass frame with respect to the rigid body's actor frame. """ - return math_utils.quat_apply_inverse(self.root_link_quat_w, self.root_com_lin_vel_w) + return quat_apply_inverse(self.root_link_quat_w, self.root_com_lin_vel_w) @property def root_com_ang_vel_b(self) -> torch.Tensor: @@ -471,7 +479,7 @@ def root_com_ang_vel_b(self) -> torch.Tensor: This quantity is the angular velocity of the root rigid body's center of mass frame with respect to the rigid body's actor frame. """ - return math_utils.quat_apply_inverse(self.root_link_quat_w, self.root_com_ang_vel_w) + return quat_apply_inverse(self.root_link_quat_w, self.root_com_ang_vel_w) """ Sliced properties. diff --git a/source/isaaclab_physx/isaaclab_physx/assets/rigid_object_collection/rigid_object_collection.py b/source/isaaclab_physx/isaaclab_physx/assets/rigid_object_collection/rigid_object_collection.py index 29cd8263f3a..b534559304f 100644 --- a/source/isaaclab_physx/isaaclab_physx/assets/rigid_object_collection/rigid_object_collection.py +++ b/source/isaaclab_physx/isaaclab_physx/assets/rigid_object_collection/rigid_object_collection.py @@ -331,6 +331,12 @@ def write_body_link_pose_to_sim( if body_ids is None: body_ids = self._ALL_BODY_INDICES + # convert lists to tensors for proper indexing + if isinstance(env_ids, list): + env_ids = torch.tensor(env_ids, dtype=torch.long, device=self.device) + if isinstance(body_ids, list): + body_ids = torch.tensor(body_ids, dtype=torch.long, device=self.device) + # note: we need to do this here since tensors are not set into simulation until step. # set into internal buffers self.data.body_link_pose_w[env_ids[:, None], body_ids] = body_poses.clone() @@ -384,6 +390,12 @@ def write_body_com_pose_to_sim( if body_ids is None: body_ids = self._ALL_BODY_INDICES + # convert lists to tensors for proper indexing + if isinstance(env_ids, list): + env_ids = torch.tensor(env_ids, dtype=torch.long, device=self.device) + if isinstance(body_ids, list): + body_ids = torch.tensor(body_ids, dtype=torch.long, device=self.device) + # set into internal buffers self.data.body_com_pose_w[env_ids[:, None], body_ids] = body_poses.clone() # update these buffers only if the user is using them. Otherwise this adds to overhead. @@ -448,6 +460,12 @@ def write_body_com_velocity_to_sim( if body_ids is None: body_ids = self._ALL_BODY_INDICES + # convert lists to tensors for proper indexing + if isinstance(env_ids, list): + env_ids = torch.tensor(env_ids, dtype=torch.long, device=self.device) + if isinstance(body_ids, list): + body_ids = torch.tensor(body_ids, dtype=torch.long, device=self.device) + # note: we need to do this here since tensors are not set into simulation until step. # set into internal buffers self.data.body_com_vel_w[env_ids[:, None], body_ids] = body_velocities.clone() @@ -489,6 +507,12 @@ def write_body_link_velocity_to_sim( if body_ids is None: body_ids = self._ALL_BODY_INDICES + # convert lists to tensors for proper indexing + if isinstance(env_ids, list): + env_ids = torch.tensor(env_ids, dtype=torch.long, device=self.device) + if isinstance(body_ids, list): + body_ids = torch.tensor(body_ids, dtype=torch.long, device=self.device) + # set into internal buffers self.data.body_link_vel_w[env_ids[:, None], body_ids] = body_velocities.clone() # update these buffers only if the user is using them. Otherwise this adds to overhead. @@ -520,10 +544,31 @@ def set_masses( """Set masses of all bodies. Args: - masses: Masses of all bodies. Shape is (num_instances, num_bodies). + masses: Masses of all bodies. Shape is (len(env_ids), len(body_ids)). body_ids: The body indices to set the masses for. Defaults to None (all bodies). env_ids: The environment indices to set the masses for. Defaults to None (all environments). """ + # resolve indices + if env_ids is None: + env_ids = self._ALL_ENV_INDICES + if body_ids is None: + body_ids = self._ALL_BODY_INDICES + + # convert lists to tensors for proper indexing + if isinstance(env_ids, list): + env_ids = torch.tensor(env_ids, dtype=torch.long, device=self.device) + if isinstance(body_ids, list): + body_ids = torch.tensor(body_ids, dtype=torch.long, device=self.device) + + # set into internal buffers + # _body_mass shape from view is (num_instances * num_bodies, 1) + # We need to update only the selected env_ids and body_ids + view_ids = self._env_body_ids_to_view_ids(env_ids, body_ids) + # masses input shape is (len(env_ids), len(body_ids)), flatten to match view + self.data._body_mass[view_ids] = masses.reshape(-1, 1) + + # set into simulation + self.root_view.set_masses(self.data._body_mass.cpu(), indices=view_ids.cpu()) def set_coms( self, @@ -534,11 +579,37 @@ def set_coms( """Set center of mass positions of all bodies. Args: - coms: Center of mass positions of all bodies. Shape is (num_instances, num_bodies, 3). + coms: Center of mass positions of all bodies. Shape is (len(env_ids), len(body_ids), 3). body_ids: The body indices to set the center of mass positions for. Defaults to None (all bodies). env_ids: The environment indices to set the center of mass positions for. Defaults to None (all environments). """ + # resolve indices + if env_ids is None: + env_ids = self._ALL_ENV_INDICES + if body_ids is None: + body_ids = self._ALL_BODY_INDICES + + # convert lists to tensors for proper indexing + if isinstance(env_ids, list): + env_ids = torch.tensor(env_ids, dtype=torch.long, device=self.device) + if isinstance(body_ids, list): + body_ids = torch.tensor(body_ids, dtype=torch.long, device=self.device) + + # get view indices + view_ids = self._env_body_ids_to_view_ids(env_ids, body_ids) + + # get current com poses and update position part + # body_com_pose_b triggers lazy evaluation, so we work with the underlying buffer + current_poses = self.root_view.get_coms().to(self.device) + # coms input shape is (len(env_ids), len(body_ids), 3), flatten to (N, 3) + current_poses[view_ids, :3] = coms.reshape(-1, 3) + + # set into simulation + self.root_view.set_coms(current_poses.cpu(), indices=view_ids.cpu()) + + # invalidate the cached buffer + self.data._body_com_pose_b.timestamp = -1 def set_inertias( self, @@ -549,10 +620,32 @@ def set_inertias( """Set inertias of all bodies. Args: - inertias: Inertias of all bodies. Shape is (num_instances, num_bodies, 3, 3). + inertias: Inertias of all bodies. Shape is (len(env_ids), len(body_ids), 3, 3). body_ids: The body indices to set the inertias for. Defaults to None (all bodies). env_ids: The environment indices to set the inertias for. Defaults to None (all environments). """ + # resolve indices + if env_ids is None: + env_ids = self._ALL_ENV_INDICES + if body_ids is None: + body_ids = self._ALL_BODY_INDICES + + # convert lists to tensors for proper indexing + if isinstance(env_ids, list): + env_ids = torch.tensor(env_ids, dtype=torch.long, device=self.device) + if isinstance(body_ids, list): + body_ids = torch.tensor(body_ids, dtype=torch.long, device=self.device) + + # get view indices + view_ids = self._env_body_ids_to_view_ids(env_ids, body_ids) + + # set into internal buffers + # _body_inertia shape from view is (num_instances * num_bodies, 9) - flattened 3x3 matrices + # inertias input shape is (len(env_ids), len(body_ids), 3, 3), flatten to (N, 9) + self.data._body_inertia[view_ids] = inertias.reshape(-1, 9) + + # set into simulation + self.root_view.set_inertias(self.data._body_inertia.cpu(), indices=view_ids.cpu()) def set_external_force_and_torque( self, diff --git a/source/isaaclab_physx/isaaclab_physx/assets/rigid_object_collection/rigid_object_collection_data.py b/source/isaaclab_physx/isaaclab_physx/assets/rigid_object_collection/rigid_object_collection_data.py index ffc8e8beb07..131dbb4a4c0 100644 --- a/source/isaaclab_physx/isaaclab_physx/assets/rigid_object_collection/rigid_object_collection_data.py +++ b/source/isaaclab_physx/isaaclab_physx/assets/rigid_object_collection/rigid_object_collection_data.py @@ -2,19 +2,23 @@ # All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause +from __future__ import annotations import logging import weakref +from typing import TYPE_CHECKING import torch -import omni.physics.tensors.impl.api as physx +from isaacsim.core.simulation_manager import SimulationManager -import isaaclab.utils.math as math_utils +from isaaclab.utils.math import normalize, convert_quat, quat_apply, quat_apply_inverse, combine_frame_transforms from isaaclab.assets.rigid_object_collection.base_rigid_object_collection_data import BaseRigidObjectCollectionData -from isaaclab.sim.utils.stage import get_current_stage_id from isaaclab.utils.buffers import TimestampedBuffer +if TYPE_CHECKING: + from isaaclab.assets.rigid_object_collection.rigid_object_collection_view import RigidObjectCollectionView + # import logger logger = logging.getLogger(__name__) @@ -43,7 +47,7 @@ class RigidObjectCollectionData(BaseRigidObjectCollectionData): __backend_name__: str = "physx" """The name of the backend for the rigid object collection data.""" - def __init__(self, root_view: physx.RigidBodyView, num_bodies: int, device: str): + def __init__(self, root_view: RigidObjectCollectionView, num_bodies: int, device: str): """Initializes the rigid object data. Args: @@ -56,7 +60,7 @@ def __init__(self, root_view: physx.RigidBodyView, num_bodies: int, device: str) # Set the root rigid body view # note: this is stored as a weak reference to avoid circular references between the asset class # and the data container. This is important to avoid memory leaks. - self._root_view: physx.RigidBodyView = weakref.proxy(root_view) + self._root_view: RigidObjectCollectionView = weakref.proxy(root_view) self.num_instances = self._root_view.count // self.num_bodies # Set initial time stamp @@ -64,13 +68,11 @@ def __init__(self, root_view: physx.RigidBodyView, num_bodies: int, device: str) self._is_primed = False # Obtain global physics sim view - stage_id = get_current_stage_id() - physics_sim_view = physx.create_simulation_view("torch", stage_id) - physics_sim_view.set_subspace_roots("/") - gravity = physics_sim_view.get_gravity() + self._physics_sim_view = SimulationManager.get_physics_sim_view() + gravity = self._physics_sim_view.get_gravity() # Convert to direction vector gravity_dir = torch.tensor((gravity[0], gravity[1], gravity[2]), device=self.device) - gravity_dir = math_utils.normalize(gravity_dir.unsqueeze(0)).squeeze(0) + gravity_dir = normalize(gravity_dir.unsqueeze(0)).squeeze(0) # Initialize constants self.GRAVITY_VEC_W = gravity_dir.repeat(self.num_instances, self.num_bodies, 1) @@ -204,7 +206,7 @@ def default_body_state(self, value: torch.Tensor) -> None: @property def body_link_pose_w(self) -> torch.Tensor: - """Body link pose ``[pos, quat]`` in simulation world frame. Shape is (num_instances, 1, 7). + """Body link pose ``[pos, quat]`` in simulation world frame. Shape is (num_instances, num_bodies, 7). This quantity is the pose of the actor frame of the rigid body relative to the world. The orientation is provided in (x, y, z, w) format. @@ -212,7 +214,7 @@ def body_link_pose_w(self) -> torch.Tensor: if self._body_link_pose_w.timestamp < self._sim_timestamp: # read data from simulation pose = self._reshape_view_to_data(self._root_view.get_transforms().clone()) - pose[..., 3:7] = math_utils.convert_quat(pose[..., 3:7], to="wxyz") + pose[..., 3:7] = convert_quat(pose[..., 3:7], to="wxyz") # set the buffer data and timestamp self._body_link_pose_w.data = pose self._body_link_pose_w.timestamp = self._sim_timestamp @@ -221,7 +223,7 @@ def body_link_pose_w(self) -> torch.Tensor: @property def body_link_vel_w(self) -> torch.Tensor: - """Body link velocity ``[lin_vel, ang_vel]`` in simulation world frame. Shape is (num_instances, 1, 6). + """Body link velocity ``[lin_vel, ang_vel]`` in simulation world frame. Shape is (num_instances, num_bodies, 6). This quantity contains the linear and angular velocities of the actor frame of the root rigid body relative to the world. @@ -231,7 +233,7 @@ def body_link_vel_w(self) -> torch.Tensor: velocity = self.body_com_vel_w.clone() # adjust linear velocity to link from center of mass velocity[..., :3] += torch.linalg.cross( - velocity[..., 3:], math_utils.quat_apply(self.body_link_quat_w, -self.body_com_pos_b), dim=-1 + velocity[..., 3:], quat_apply(self.body_link_quat_w, -self.body_com_pos_b), dim=-1 ) # set the buffer data and timestamp self._body_link_vel_w.data = velocity @@ -241,14 +243,14 @@ def body_link_vel_w(self) -> torch.Tensor: @property def body_com_pose_w(self) -> torch.Tensor: - """Body center of mass pose ``[pos, quat]`` in simulation world frame. Shape is (num_instances, 1, 7). + """Body center of mass pose ``[pos, quat]`` in simulation world frame. Shape is (num_instances, num_bodies, 7). This quantity is the pose of the center of mass frame of the rigid body relative to the world. The orientation is provided in (x, y, z, w) format. """ if self._body_com_pose_w.timestamp < self._sim_timestamp: # adjust pose to center of mass - pos, quat = math_utils.combine_frame_transforms( + pos, quat = combine_frame_transforms( self.body_link_pos_w, self.body_link_quat_w, self.body_com_pos_b, self.body_com_quat_b ) # set the buffer data and timestamp @@ -260,7 +262,7 @@ def body_com_pose_w(self) -> torch.Tensor: @property def body_com_vel_w(self) -> torch.Tensor: """Body center of mass velocity ``[lin_vel, ang_vel]`` in simulation world frame. - Shape is (num_instances, 1, 6). + Shape is (num_instances, num_bodies, 6). This quantity contains the linear and angular velocities of the root rigid body's center of mass frame relative to the world. @@ -274,7 +276,7 @@ def body_com_vel_w(self) -> torch.Tensor: @property def body_state_w(self) -> torch.Tensor: """State of all bodies `[pos, quat, lin_vel, ang_vel]` in simulation world frame. - Shape is (num_instances, 1, 13). + Shape is (num_instances, num_bodies, 13). The position and orientation are of the rigid bodies' actor frame. Meanwhile, the linear and angular velocities are of the rigid bodies' center of mass frame. @@ -288,7 +290,7 @@ def body_state_w(self) -> torch.Tensor: @property def body_link_state_w(self) -> torch.Tensor: """State of all bodies ``[pos, quat, lin_vel, ang_vel]`` in simulation world frame. - Shape is (num_instances, 1, 13). + Shape is (num_instances, num_bodies, 13). The position, quaternion, and linear/angular velocity are of the body's link frame relative to the world. The orientation is provided in (x, y, z, w) format. @@ -317,7 +319,7 @@ def body_com_state_w(self) -> torch.Tensor: @property def body_com_acc_w(self) -> torch.Tensor: """Acceleration of all bodies ``[lin_acc, ang_acc]`` in the simulation world frame. - Shape is (num_instances, 1, 6). + Shape is (num_instances, num_bodies, 6). This quantity is the acceleration of the rigid bodies' center of mass frame relative to the world. """ @@ -329,7 +331,7 @@ def body_com_acc_w(self) -> torch.Tensor: @property def body_com_pose_b(self) -> torch.Tensor: """Center of mass pose ``[pos, quat]`` of all bodies in their respective body's link frames. - Shape is (num_instances, 1, 7). + Shape is (num_instances, num_bodies, 7). This quantity is the pose of the center of mass frame of the rigid body relative to the body's link frame. The orientation is provided in (x, y, z, w) format. @@ -337,7 +339,7 @@ def body_com_pose_b(self) -> torch.Tensor: if self._body_com_pose_b.timestamp < self._sim_timestamp: # obtain the coms poses = self._root_view.get_coms().to(self.device) - poses[:, 3:7] = math_utils.convert_quat(poses[:, 3:7], to="wxyz") + poses[:, 3:7] = convert_quat(poses[:, 3:7], to="wxyz") # read data from simulation self._body_com_pose_b.data = self._reshape_view_to_data(poses) self._body_com_pose_b.timestamp = self._sim_timestamp @@ -346,12 +348,12 @@ def body_com_pose_b(self) -> torch.Tensor: @property def body_mass(self) -> torch.Tensor: - """Mass of all bodies in the simulation world frame. Shape is (num_instances, 1, 1).""" + """Mass of all bodies in the simulation world frame. Shape is (num_instances, num_bodies).""" return self._reshape_view_to_data(self._body_mass.to(self.device)) @property def body_inertia(self) -> torch.Tensor: - """Inertia of all bodies in the simulation world frame. Shape is (num_instances, 1, 3, 3).""" + """Inertia of all bodies in the simulation world frame. Shape is (num_instances, num_bodies, 9).""" return self._reshape_view_to_data(self._body_inertia.to(self.device)) """ @@ -360,55 +362,55 @@ def body_inertia(self) -> torch.Tensor: @property def projected_gravity_b(self) -> torch.Tensor: - """Projection of the gravity direction on base frame. Shape is (num_instances, 3).""" - return math_utils.quat_apply_inverse(self.body_link_quat_w, self.GRAVITY_VEC_W) + """Projection of the gravity direction on base frame. Shape is (num_instances, num_bodies, 3).""" + return quat_apply_inverse(self.body_link_quat_w, self.GRAVITY_VEC_W) @property def heading_w(self) -> torch.Tensor: - """Yaw heading of the base frame (in radians). Shape is (num_instances,). + """Yaw heading of the base frame (in radians). Shape is (num_instances, num_bodies). .. note:: This quantity is computed by assuming that the forward-direction of the base frame is along x-direction, i.e. :math:`(1, 0, 0)`. """ - forward_w = math_utils.quat_apply(self.body_link_quat_w, self.FORWARD_VEC_B) + forward_w = quat_apply(self.body_link_quat_w, self.FORWARD_VEC_B) return torch.atan2(forward_w[..., 1], forward_w[..., 0]) @property def body_link_lin_vel_b(self) -> torch.Tensor: - """Root link linear velocity in base frame. Shape is (num_instances, 3). + """Root link linear velocity in base frame. Shape is (num_instances, num_bodies, 3). This quantity is the linear velocity of the actor frame of the root rigid body frame with respect to the rigid body's actor frame. """ - return math_utils.quat_apply_inverse(self.body_link_quat_w, self.body_link_lin_vel_w) + return quat_apply_inverse(self.body_link_quat_w, self.body_link_lin_vel_w) @property def body_link_ang_vel_b(self) -> torch.Tensor: - """Root link angular velocity in base world frame. Shape is (num_instances, 3). + """Root link angular velocity in base world frame. Shape is (num_instances, num_bodies, 3). This quantity is the angular velocity of the actor frame of the root rigid body frame with respect to the rigid body's actor frame. """ - return math_utils.quat_apply_inverse(self.body_link_quat_w, self.body_link_ang_vel_w) + return quat_apply_inverse(self.body_link_quat_w, self.body_link_ang_vel_w) @property def body_com_lin_vel_b(self) -> torch.Tensor: - """Root center of mass linear velocity in base frame. Shape is (num_instances, 3). + """Root center of mass linear velocity in base frame. Shape is (num_instances, num_bodies, 3). This quantity is the linear velocity of the root rigid body's center of mass frame with respect to the rigid body's actor frame. """ - return math_utils.quat_apply_inverse(self.body_link_quat_w, self.body_com_lin_vel_w) + return quat_apply_inverse(self.body_link_quat_w, self.body_com_lin_vel_w) @property def body_com_ang_vel_b(self) -> torch.Tensor: - """Root center of mass angular velocity in base world frame. Shape is (num_instances, 3). + """Root center of mass angular velocity in base world frame. Shape is (num_instances, num_bodies, 3). This quantity is the angular velocity of the root rigid body's center of mass frame with respect to the rigid body's actor frame. """ - return math_utils.quat_apply_inverse(self.body_link_quat_w, self.body_com_ang_vel_w) + return quat_apply_inverse(self.body_link_quat_w, self.body_com_ang_vel_w) """ Sliced properties. @@ -416,7 +418,7 @@ def body_com_ang_vel_b(self) -> torch.Tensor: @property def body_link_pos_w(self) -> torch.Tensor: - """Positions of all bodies in simulation world frame. Shape is (num_instances, 1, 3). + """Positions of all bodies in simulation world frame. Shape is (num_instances, num_bodies, 3). This quantity is the position of the rigid bodies' actor frame relative to the world. """ @@ -424,7 +426,7 @@ def body_link_pos_w(self) -> torch.Tensor: @property def body_link_quat_w(self) -> torch.Tensor: - """Orientation (x, y, z, w) of all bodies in simulation world frame. Shape is (num_instances, 1, 4). + """Orientation (x, y, z, w) of all bodies in simulation world frame. Shape is (num_instances, num_bodies, 4). This quantity is the orientation of the rigid bodies' actor frame relative to the world. """ @@ -432,7 +434,7 @@ def body_link_quat_w(self) -> torch.Tensor: @property def body_link_lin_vel_w(self) -> torch.Tensor: - """Linear velocity of all bodies in simulation world frame. Shape is (num_instances, 1, 3). + """Linear velocity of all bodies in simulation world frame. Shape is (num_instances, num_bodies, 3). This quantity is the linear velocity of the rigid bodies' center of mass frame relative to the world. """ @@ -440,7 +442,7 @@ def body_link_lin_vel_w(self) -> torch.Tensor: @property def body_link_ang_vel_w(self) -> torch.Tensor: - """Angular velocity of all bodies in simulation world frame. Shape is (num_instances, 1, 3). + """Angular velocity of all bodies in simulation world frame. Shape is (num_instances, num_bodies, 3). This quantity is the angular velocity of the rigid bodies' center of mass frame relative to the world. """ @@ -448,7 +450,7 @@ def body_link_ang_vel_w(self) -> torch.Tensor: @property def body_com_pos_w(self) -> torch.Tensor: - """Positions of all bodies in simulation world frame. Shape is (num_instances, 1, 3). + """Positions of all bodies in simulation world frame. Shape is (num_instances, num_bodies, 3). This quantity is the position of the rigid bodies' actor frame. """ @@ -458,13 +460,13 @@ def body_com_pos_w(self) -> torch.Tensor: def body_com_quat_w(self) -> torch.Tensor: """Orientation (x, y, z, w) of the principle axis of inertia of all bodies in simulation world frame. - Shape is (num_instances, 1, 4). This quantity is the orientation of the rigid bodies' actor frame. + Shape is (num_instances, num_bodies, 4). This quantity is the orientation of the rigid bodies' actor frame. """ return self.body_com_pose_w[..., 3:7] @property def body_com_lin_vel_w(self) -> torch.Tensor: - """Linear velocity of all bodies in simulation world frame. Shape is (num_instances, 1, 3). + """Linear velocity of all bodies in simulation world frame. Shape is (num_instances, num_bodies, 3). This quantity is the linear velocity of the rigid bodies' center of mass frame. """ @@ -472,7 +474,7 @@ def body_com_lin_vel_w(self) -> torch.Tensor: @property def body_com_ang_vel_w(self) -> torch.Tensor: - """Angular velocity of all bodies in simulation world frame. Shape is (num_instances, 1, 3). + """Angular velocity of all bodies in simulation world frame. Shape is (num_instances, num_bodies, 3). This quantity is the angular velocity of the rigid bodies' center of mass frame. """ @@ -480,7 +482,7 @@ def body_com_ang_vel_w(self) -> torch.Tensor: @property def body_com_lin_acc_w(self) -> torch.Tensor: - """Linear acceleration of all bodies in simulation world frame. Shape is (num_instances, 1, 3). + """Linear acceleration of all bodies in simulation world frame. Shape is (num_instances, num_bodies, 3). This quantity is the linear acceleration of the rigid bodies' center of mass frame. """ @@ -488,7 +490,7 @@ def body_com_lin_acc_w(self) -> torch.Tensor: @property def body_com_ang_acc_w(self) -> torch.Tensor: - """Angular acceleration of all bodies in simulation world frame. Shape is (num_instances, 1, 3). + """Angular acceleration of all bodies in simulation world frame. Shape is (num_instances, num_bodies, 3). This quantity is the angular acceleration of the rigid bodies' center of mass frame. """ @@ -497,7 +499,7 @@ def body_com_ang_acc_w(self) -> torch.Tensor: @property def body_com_pos_b(self) -> torch.Tensor: """Center of mass position of all of the bodies in their respective link frames. - Shape is (num_instances, 1, 3). + Shape is (num_instances, num_bodies, 3). This quantity is the center of mass location relative to its body'slink frame. """ @@ -506,7 +508,7 @@ def body_com_pos_b(self) -> torch.Tensor: @property def body_com_quat_b(self) -> torch.Tensor: """Orientation (x, y, z, w) of the principle axis of inertia of all of the bodies in their - respective link frames. Shape is (num_instances, 1, 4). + respective link frames. Shape is (num_instances, num_bodies, 4). This quantity is the orientation of the principles axes of inertia relative to its body's link frame. """ diff --git a/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/views/mock_articulation_view.py b/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/views/mock_articulation_view.py index 173cfa6a306..604cb4966c2 100644 --- a/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/views/mock_articulation_view.py +++ b/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/views/mock_articulation_view.py @@ -35,7 +35,7 @@ class MockArticulationView: - dof_dampings: (N, J) - joint dampings - dof_max_forces: (N, J) - maximum joint forces - dof_max_velocities: (N, J) - maximum joint velocities - - masses: (N, L, 1) - per-link masses + - masses: (N, L) - per-link masses - coms: (N, L, 7) - per-link centers of mass - inertias: (N, L, 3, 3) - per-link inertia tensors @@ -89,6 +89,8 @@ def __init__( self._root_velocities: torch.Tensor | None = None self._link_transforms: torch.Tensor | None = None self._link_velocities: torch.Tensor | None = None + self._link_accelerations: torch.Tensor | None = None + self._link_incoming_joint_force: torch.Tensor | None = None self._dof_positions: torch.Tensor | None = None self._dof_velocities: torch.Tensor | None = None self._dof_projected_joint_forces: torch.Tensor | None = None @@ -99,10 +101,24 @@ def __init__( self._dof_max_velocities: torch.Tensor | None = None self._dof_armatures: torch.Tensor | None = None self._dof_friction_coefficients: torch.Tensor | None = None + self._dof_friction_properties: torch.Tensor | None = None self._masses: torch.Tensor | None = None self._coms: torch.Tensor | None = None self._inertias: torch.Tensor | None = None + # -- Helper Methods -- + + def _check_cpu_tensor(self, tensor: torch.Tensor, name: str) -> None: + """Check that tensor is on CPU, raise RuntimeError if on GPU. + + This mimics PhysX behavior where joint/body properties must be on CPU. + """ + if tensor.is_cuda: + raise RuntimeError( + f"Expected CPU tensor for {name}, but got tensor on {tensor.device}. " + "Joint and body properties must be set with CPU tensors." + ) + # -- Properties -- @property @@ -212,11 +228,11 @@ def get_dof_limits(self) -> torch.Tensor: """Get position limits of all DOFs. Returns: - Tensor of shape (N, J, 2) with [lower, upper] limits. + Tensor of shape (N, J, 2) with [lower, upper] limits. Always on CPU. """ if self._dof_limits is None: - # Default: no limits (infinite) - self._dof_limits = torch.zeros(self._count, self._num_dofs, 2, device=self._device) + # Default: no limits (infinite) - stored on CPU + self._dof_limits = torch.zeros(self._count, self._num_dofs, 2, device="cpu") self._dof_limits[:, :, 0] = float("-inf") # lower limit self._dof_limits[:, :, 1] = float("inf") # upper limit return self._dof_limits.clone() @@ -225,62 +241,62 @@ def get_dof_stiffnesses(self) -> torch.Tensor: """Get stiffnesses of all DOFs. Returns: - Tensor of shape (N, J) with joint stiffnesses. + Tensor of shape (N, J) with joint stiffnesses. Always on CPU. """ if self._dof_stiffnesses is None: - self._dof_stiffnesses = torch.zeros(self._count, self._num_dofs, device=self._device) + self._dof_stiffnesses = torch.zeros(self._count, self._num_dofs, device="cpu") return self._dof_stiffnesses.clone() def get_dof_dampings(self) -> torch.Tensor: """Get dampings of all DOFs. Returns: - Tensor of shape (N, J) with joint dampings. + Tensor of shape (N, J) with joint dampings. Always on CPU. """ if self._dof_dampings is None: - self._dof_dampings = torch.zeros(self._count, self._num_dofs, device=self._device) + self._dof_dampings = torch.zeros(self._count, self._num_dofs, device="cpu") return self._dof_dampings.clone() def get_dof_max_forces(self) -> torch.Tensor: """Get maximum forces of all DOFs. Returns: - Tensor of shape (N, J) with maximum joint forces. + Tensor of shape (N, J) with maximum joint forces. Always on CPU. """ if self._dof_max_forces is None: - # Default: infinite max force - self._dof_max_forces = torch.full((self._count, self._num_dofs), float("inf"), device=self._device) + # Default: infinite max force - stored on CPU + self._dof_max_forces = torch.full((self._count, self._num_dofs), float("inf"), device="cpu") return self._dof_max_forces.clone() def get_dof_max_velocities(self) -> torch.Tensor: """Get maximum velocities of all DOFs. Returns: - Tensor of shape (N, J) with maximum joint velocities. + Tensor of shape (N, J) with maximum joint velocities. Always on CPU. """ if self._dof_max_velocities is None: - # Default: infinite max velocity - self._dof_max_velocities = torch.full((self._count, self._num_dofs), float("inf"), device=self._device) + # Default: infinite max velocity - stored on CPU + self._dof_max_velocities = torch.full((self._count, self._num_dofs), float("inf"), device="cpu") return self._dof_max_velocities.clone() def get_dof_armatures(self) -> torch.Tensor: """Get armatures of all DOFs. Returns: - Tensor of shape (N, J) with joint armatures. + Tensor of shape (N, J) with joint armatures. Always on CPU. """ if self._dof_armatures is None: - self._dof_armatures = torch.zeros(self._count, self._num_dofs, device=self._device) + self._dof_armatures = torch.zeros(self._count, self._num_dofs, device="cpu") return self._dof_armatures.clone() def get_dof_friction_coefficients(self) -> torch.Tensor: """Get friction coefficients of all DOFs. Returns: - Tensor of shape (N, J) with joint friction coefficients. + Tensor of shape (N, J) with joint friction coefficients. Always on CPU. """ if self._dof_friction_coefficients is None: - self._dof_friction_coefficients = torch.zeros(self._count, self._num_dofs, device=self._device) + self._dof_friction_coefficients = torch.zeros(self._count, self._num_dofs, device="cpu") return self._dof_friction_coefficients.clone() # -- Mass Property Getters -- @@ -289,20 +305,20 @@ def get_masses(self) -> torch.Tensor: """Get masses of all links. Returns: - Tensor of shape (N, L, 1) with link masses. + Tensor of shape (N, L) with link masses. Always on CPU. """ if self._masses is None: - self._masses = torch.ones(self._count, self._num_links, 1, device=self._device) + self._masses = torch.ones(self._count, self._num_links, device="cpu") return self._masses.clone() def get_coms(self) -> torch.Tensor: """Get centers of mass of all links. Returns: - Tensor of shape (N, L, 7) with [pos(3), quat_xyzw(4)] per link. + Tensor of shape (N, L, 7) with [pos(3), quat_xyzw(4)] per link. Always on CPU. """ if self._coms is None: - self._coms = torch.zeros(self._count, self._num_links, 7, device=self._device) + self._coms = torch.zeros(self._count, self._num_links, 7, device="cpu") self._coms[:, :, 6] = 1.0 # w=1 for identity quaternion return self._coms.clone() @@ -310,14 +326,14 @@ def get_inertias(self) -> torch.Tensor: """Get inertia tensors of all links. Returns: - Tensor of shape (N, L, 3, 3) with 3x3 inertia matrices per link. + Tensor of shape (N, L, 9) with flattened 3x3 inertia matrices per link (row-major). Always on CPU. """ if self._inertias is None: - # Default: identity inertia - self._inertias = torch.zeros(self._count, self._num_links, 3, 3, device=self._device) - self._inertias[:, :, 0, 0] = 1.0 - self._inertias[:, :, 1, 1] = 1.0 - self._inertias[:, :, 2, 2] = 1.0 + # Default: identity inertia - flattened [1,0,0,0,1,0,0,0,1] - stored on CPU + self._inertias = torch.zeros(self._count, self._num_links, 9, device="cpu") + self._inertias[:, :, 0] = 1.0 # [0,0] + self._inertias[:, :, 4] = 1.0 # [1,1] + self._inertias[:, :, 8] = 1.0 # [2,2] return self._inertias.clone() # -- Root Setters -- @@ -448,12 +464,15 @@ def set_dof_limits( """Set position limits of all DOFs. Args: - limits: Tensor of shape (N, J, 2) with [lower, upper] limits. + limits: Tensor of shape (N, J, 2) with [lower, upper] limits. Must be on CPU. indices: Optional indices of articulations to update. + + Raises: + RuntimeError: If limits tensor is on GPU. """ - limits = limits.to(self._device) + self._check_cpu_tensor(limits, "dof_limits") if self._dof_limits is None: - self._dof_limits = torch.zeros(self._count, self._num_dofs, 2, device=self._device) + self._dof_limits = torch.zeros(self._count, self._num_dofs, 2, device="cpu") self._dof_limits[:, :, 0] = float("-inf") self._dof_limits[:, :, 1] = float("inf") if indices is not None: @@ -469,12 +488,15 @@ def set_dof_stiffnesses( """Set stiffnesses of all DOFs. Args: - stiffnesses: Tensor of shape (N, J). + stiffnesses: Tensor of shape (N, J). Must be on CPU. indices: Optional indices of articulations to update. + + Raises: + RuntimeError: If stiffnesses tensor is on GPU. """ - stiffnesses = stiffnesses.to(self._device) + self._check_cpu_tensor(stiffnesses, "dof_stiffnesses") if self._dof_stiffnesses is None: - self._dof_stiffnesses = torch.zeros(self._count, self._num_dofs, device=self._device) + self._dof_stiffnesses = torch.zeros(self._count, self._num_dofs, device="cpu") if indices is not None: self._dof_stiffnesses[indices] = stiffnesses else: @@ -488,12 +510,15 @@ def set_dof_dampings( """Set dampings of all DOFs. Args: - dampings: Tensor of shape (N, J). + dampings: Tensor of shape (N, J). Must be on CPU. indices: Optional indices of articulations to update. + + Raises: + RuntimeError: If dampings tensor is on GPU. """ - dampings = dampings.to(self._device) + self._check_cpu_tensor(dampings, "dof_dampings") if self._dof_dampings is None: - self._dof_dampings = torch.zeros(self._count, self._num_dofs, device=self._device) + self._dof_dampings = torch.zeros(self._count, self._num_dofs, device="cpu") if indices is not None: self._dof_dampings[indices] = dampings else: @@ -507,12 +532,15 @@ def set_dof_max_forces( """Set maximum forces of all DOFs. Args: - max_forces: Tensor of shape (N, J). + max_forces: Tensor of shape (N, J). Must be on CPU. indices: Optional indices of articulations to update. + + Raises: + RuntimeError: If max_forces tensor is on GPU. """ - max_forces = max_forces.to(self._device) + self._check_cpu_tensor(max_forces, "dof_max_forces") if self._dof_max_forces is None: - self._dof_max_forces = torch.full((self._count, self._num_dofs), float("inf"), device=self._device) + self._dof_max_forces = torch.full((self._count, self._num_dofs), float("inf"), device="cpu") if indices is not None: self._dof_max_forces[indices] = max_forces else: @@ -526,12 +554,15 @@ def set_dof_max_velocities( """Set maximum velocities of all DOFs. Args: - max_velocities: Tensor of shape (N, J). + max_velocities: Tensor of shape (N, J). Must be on CPU. indices: Optional indices of articulations to update. + + Raises: + RuntimeError: If max_velocities tensor is on GPU. """ - max_velocities = max_velocities.to(self._device) + self._check_cpu_tensor(max_velocities, "dof_max_velocities") if self._dof_max_velocities is None: - self._dof_max_velocities = torch.full((self._count, self._num_dofs), float("inf"), device=self._device) + self._dof_max_velocities = torch.full((self._count, self._num_dofs), float("inf"), device="cpu") if indices is not None: self._dof_max_velocities[indices] = max_velocities else: @@ -545,12 +576,15 @@ def set_dof_armatures( """Set armatures of all DOFs. Args: - armatures: Tensor of shape (N, J). + armatures: Tensor of shape (N, J). Must be on CPU. indices: Optional indices of articulations to update. + + Raises: + RuntimeError: If armatures tensor is on GPU. """ - armatures = armatures.to(self._device) + self._check_cpu_tensor(armatures, "dof_armatures") if self._dof_armatures is None: - self._dof_armatures = torch.zeros(self._count, self._num_dofs, device=self._device) + self._dof_armatures = torch.zeros(self._count, self._num_dofs, device="cpu") if indices is not None: self._dof_armatures[indices] = armatures else: @@ -564,12 +598,15 @@ def set_dof_friction_coefficients( """Set friction coefficients of all DOFs. Args: - friction_coefficients: Tensor of shape (N, J). + friction_coefficients: Tensor of shape (N, J). Must be on CPU. indices: Optional indices of articulations to update. + + Raises: + RuntimeError: If friction_coefficients tensor is on GPU. """ - friction_coefficients = friction_coefficients.to(self._device) + self._check_cpu_tensor(friction_coefficients, "dof_friction_coefficients") if self._dof_friction_coefficients is None: - self._dof_friction_coefficients = torch.zeros(self._count, self._num_dofs, device=self._device) + self._dof_friction_coefficients = torch.zeros(self._count, self._num_dofs, device="cpu") if indices is not None: self._dof_friction_coefficients[indices] = friction_coefficients else: @@ -585,12 +622,15 @@ def set_masses( """Set masses of all links. Args: - masses: Tensor of shape (N, L, 1). + masses: Tensor of shape (N, L). Must be on CPU. indices: Optional indices of articulations to update. + + Raises: + RuntimeError: If masses tensor is on GPU. """ - masses = masses.to(self._device) + self._check_cpu_tensor(masses, "masses") if self._masses is None: - self._masses = torch.ones(self._count, self._num_links, 1, device=self._device) + self._masses = torch.ones(self._count, self._num_links, device="cpu") if indices is not None: self._masses[indices] = masses else: @@ -604,12 +644,15 @@ def set_coms( """Set centers of mass of all links. Args: - coms: Tensor of shape (N, L, 7). + coms: Tensor of shape (N, L, 7). Must be on CPU. indices: Optional indices of articulations to update. + + Raises: + RuntimeError: If coms tensor is on GPU. """ - coms = coms.to(self._device) + self._check_cpu_tensor(coms, "coms") if self._coms is None: - self._coms = torch.zeros(self._count, self._num_links, 7, device=self._device) + self._coms = torch.zeros(self._count, self._num_links, 7, device="cpu") self._coms[:, :, 6] = 1.0 if indices is not None: self._coms[indices] = coms @@ -624,15 +667,18 @@ def set_inertias( """Set inertia tensors of all links. Args: - inertias: Tensor of shape (N, L, 3, 3). + inertias: Tensor of shape (N, L, 9) - flattened 3x3 matrices. Must be on CPU. indices: Optional indices of articulations to update. + + Raises: + RuntimeError: If inertias tensor is on GPU. """ - inertias = inertias.to(self._device) + self._check_cpu_tensor(inertias, "inertias") if self._inertias is None: - self._inertias = torch.zeros(self._count, self._num_links, 3, 3, device=self._device) - self._inertias[:, :, 0, 0] = 1.0 - self._inertias[:, :, 1, 1] = 1.0 - self._inertias[:, :, 2, 2] = 1.0 + self._inertias = torch.zeros(self._count, self._num_links, 9, device="cpu") + self._inertias[:, :, 0] = 1.0 + self._inertias[:, :, 4] = 1.0 + self._inertias[:, :, 8] = 1.0 if indices is not None: self._inertias[indices] = inertias else: @@ -756,7 +802,7 @@ def set_mock_masses(self, masses: torch.Tensor) -> None: """Set mock mass data directly for testing. Args: - masses: Tensor of shape (N, L, 1). + masses: Tensor of shape (N, L). """ self._masses = masses.to(self._device) @@ -775,3 +821,102 @@ def set_mock_inertias(self, inertias: torch.Tensor) -> None: inertias: Tensor of shape (N, L, 3, 3). """ self._inertias = inertias.to(self._device) + + # -- Additional mock state for extended properties -- + + def get_dof_friction_properties(self) -> torch.Tensor: + """Get friction properties of all DOFs. + + Returns: + Tensor of shape (N, J, 3) with [static_friction, dynamic_friction, viscous_friction]. Always on CPU. + """ + if self._dof_friction_properties is None: + self._dof_friction_properties = torch.zeros(self._count, self._num_dofs, 3, device="cpu") + return self._dof_friction_properties.clone() + + def get_link_accelerations(self) -> torch.Tensor: + """Get accelerations of all links. + + Returns: + Tensor of shape (N, L, 6) with [lin_acc(3), ang_acc(3)] per link. + """ + if self._link_accelerations is None: + self._link_accelerations = torch.zeros(self._count, self._num_links, 6, device=self._device) + return self._link_accelerations.clone() + + def get_link_incoming_joint_force(self) -> torch.Tensor: + """Get incoming joint forces for all links. + + Returns: + Tensor of shape (N, L, 6) with [force(3), torque(3)] per link. + """ + if self._link_incoming_joint_force is None: + self._link_incoming_joint_force = torch.zeros(self._count, self._num_links, 6, device=self._device) + return self._link_incoming_joint_force.clone() + + def set_mock_dof_friction_properties(self, friction_properties: torch.Tensor) -> None: + """Set mock DOF friction properties data directly for testing. + + Args: + friction_properties: Tensor of shape (N, J, 3). + """ + self._dof_friction_properties = friction_properties.to(self._device) + + def set_mock_link_accelerations(self, accelerations: torch.Tensor) -> None: + """Set mock link acceleration data directly for testing. + + Args: + accelerations: Tensor of shape (N, L, 6). + """ + self._link_accelerations = accelerations.to(self._device) + + def set_mock_link_incoming_joint_force(self, forces: torch.Tensor) -> None: + """Set mock link incoming joint force data directly for testing. + + Args: + forces: Tensor of shape (N, L, 6). + """ + self._link_incoming_joint_force = forces.to(self._device) + + def set_random_mock_data(self) -> None: + """Set all internal state to random values for benchmarking. + + This method initializes all mock data with random values, + useful for benchmarking where the actual values don't matter. + """ + # Root state + self._root_transforms = torch.randn(self._count, 7, device=self._device) + self._root_transforms[:, 3:7] = torch.nn.functional.normalize(self._root_transforms[:, 3:7], dim=-1) + self._root_velocities = torch.randn(self._count, 6, device=self._device) + + # Link state + self._link_transforms = torch.randn(self._count, self._num_links, 7, device=self._device) + self._link_transforms[:, :, 3:7] = torch.nn.functional.normalize(self._link_transforms[:, :, 3:7], dim=-1) + self._link_velocities = torch.randn(self._count, self._num_links, 6, device=self._device) + self._link_accelerations = torch.randn(self._count, self._num_links, 6, device=self._device) + self._link_incoming_joint_force = torch.randn(self._count, self._num_links, 6, device=self._device) + + # DOF state + self._dof_positions = torch.randn(self._count, self._num_dofs, device=self._device) + self._dof_velocities = torch.randn(self._count, self._num_dofs, device=self._device) + self._dof_projected_joint_forces = torch.randn(self._count, self._num_dofs, device=self._device) + + # DOF properties - stored on CPU (PhysX requirement) + self._dof_limits = torch.randn(self._count, self._num_dofs, 2, device="cpu") + self._dof_stiffnesses = torch.rand(self._count, self._num_dofs, device="cpu") * 100 + self._dof_dampings = torch.rand(self._count, self._num_dofs, device="cpu") * 10 + self._dof_max_forces = torch.rand(self._count, self._num_dofs, device="cpu") * 100 + self._dof_max_velocities = torch.rand(self._count, self._num_dofs, device="cpu") * 10 + self._dof_armatures = torch.rand(self._count, self._num_dofs, device="cpu") * 0.1 + self._dof_friction_coefficients = torch.rand(self._count, self._num_dofs, device="cpu") + self._dof_friction_properties = torch.rand(self._count, self._num_dofs, 3, device="cpu") + + # Mass properties - stored on CPU (PhysX requirement) + self._masses = torch.rand(self._count, self._num_links, device="cpu") * 10 + self._coms = torch.randn(self._count, self._num_links, 7, device="cpu") + self._coms[:, :, 3:7] = torch.nn.functional.normalize(self._coms[:, :, 3:7], dim=-1) + # Inertias: (N, L, 9) flattened format + self._inertias = torch.zeros(self._count, self._num_links, 9, device="cpu") + self._inertias[:, :, 0] = torch.rand(self._count, self._num_links) # [0,0] + self._inertias[:, :, 4] = torch.rand(self._count, self._num_links) # [1,1] + self._inertias[:, :, 8] = torch.rand(self._count, self._num_links) # [2,2] diff --git a/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/views/mock_rigid_body_view.py b/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/views/mock_rigid_body_view.py index 98e86fa51e6..e34e2df8cf5 100644 --- a/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/views/mock_rigid_body_view.py +++ b/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/views/mock_rigid_body_view.py @@ -25,9 +25,9 @@ class MockRigidBodyView: - transforms: (N, 7) - [pos(3), quat_xyzw(4)] - velocities: (N, 6) - [lin_vel(3), ang_vel(3)] - accelerations: (N, 6) - [lin_acc(3), ang_acc(3)] - - masses: (N, 1, 1) - - coms: (N, 1, 7) - center of mass [pos(3), quat_xyzw(4)] - - inertias: (N, 1, 3, 3) - 3x3 inertia matrix + - masses: (N, 1) + - coms: (N, 7) - center of mass [pos(3), quat_xyzw(4)] + - inertias: (N, 9) - flattened 3x3 inertia matrix (row-major) """ def __init__( @@ -56,6 +56,19 @@ def __init__( self._coms: torch.Tensor | None = None self._inertias: torch.Tensor | None = None + # -- Helper Methods -- + + def _check_cpu_tensor(self, tensor: torch.Tensor, name: str) -> None: + """Check that tensor is on CPU, raise RuntimeError if on GPU. + + This mimics PhysX behavior where body properties must be on CPU. + """ + if tensor.is_cuda: + raise RuntimeError( + f"Expected CPU tensor for {name}, but got tensor on {tensor.device}. " + "Body properties must be set with CPU tensors." + ) + # -- Properties -- @property @@ -106,36 +119,36 @@ def get_masses(self) -> torch.Tensor: """Get masses of all rigid bodies. Returns: - Tensor of shape (N, 1, 1) with mass values. + Tensor of shape (N, 1) with mass values. Always on CPU. """ if self._masses is None: - self._masses = torch.ones(self._count, 1, 1, device=self._device) + self._masses = torch.ones(self._count, 1, device="cpu") return self._masses.clone() def get_coms(self) -> torch.Tensor: """Get centers of mass of all rigid bodies. Returns: - Tensor of shape (N, 1, 7) with [pos(3), quat_xyzw(4)]. + Tensor of shape (N, 7) with [pos(3), quat_xyzw(4)]. Always on CPU. """ if self._coms is None: - # Default: local origin with identity quaternion - self._coms = torch.zeros(self._count, 1, 7, device=self._device) - self._coms[:, :, 6] = 1.0 # w=1 for identity quaternion + # Default: local origin with identity quaternion - stored on CPU + self._coms = torch.zeros(self._count, 7, device="cpu") + self._coms[:, 6] = 1.0 # w=1 for identity quaternion return self._coms.clone() def get_inertias(self) -> torch.Tensor: """Get inertia tensors of all rigid bodies. Returns: - Tensor of shape (N, 1, 3, 3) with 3x3 inertia matrices. + Tensor of shape (N, 9) with flattened 3x3 inertia matrices (row-major). Always on CPU. """ if self._inertias is None: - # Default: identity inertia (unit sphere) - self._inertias = torch.zeros(self._count, 1, 3, 3, device=self._device) - self._inertias[:, :, 0, 0] = 1.0 - self._inertias[:, :, 1, 1] = 1.0 - self._inertias[:, :, 2, 2] = 1.0 + # Default: identity inertia (unit sphere) - flattened [1,0,0,0,1,0,0,0,1] - stored on CPU + self._inertias = torch.zeros(self._count, 9, device="cpu") + self._inertias[:, 0] = 1.0 # [0,0] + self._inertias[:, 4] = 1.0 # [1,1] + self._inertias[:, 8] = 1.0 # [2,2] return self._inertias.clone() # -- Setters (simulation interface) -- @@ -187,12 +200,15 @@ def set_masses( """Set masses of rigid bodies. Args: - masses: Tensor of shape (N, 1, 1) or (len(indices), 1, 1). + masses: Tensor of shape (N, 1) or (len(indices), 1). Must be on CPU. indices: Optional indices of bodies to update. + + Raises: + RuntimeError: If masses tensor is on GPU. """ - masses = masses.to(self._device) + self._check_cpu_tensor(masses, "masses") if self._masses is None: - self._masses = torch.ones(self._count, 1, 1, device=self._device) + self._masses = torch.ones(self._count, 1, device="cpu") if indices is not None: self._masses[indices] = masses else: @@ -206,13 +222,16 @@ def set_coms( """Set centers of mass of rigid bodies. Args: - coms: Tensor of shape (N, 1, 7) or (len(indices), 1, 7). + coms: Tensor of shape (N, 7) or (len(indices), 7). Must be on CPU. indices: Optional indices of bodies to update. + + Raises: + RuntimeError: If coms tensor is on GPU. """ - coms = coms.to(self._device) + self._check_cpu_tensor(coms, "coms") if self._coms is None: - self._coms = torch.zeros(self._count, 1, 7, device=self._device) - self._coms[:, :, 6] = 1.0 + self._coms = torch.zeros(self._count, 7, device="cpu") + self._coms[:, 6] = 1.0 if indices is not None: self._coms[indices] = coms else: @@ -226,15 +245,18 @@ def set_inertias( """Set inertia tensors of rigid bodies. Args: - inertias: Tensor of shape (N, 1, 3, 3) or (len(indices), 1, 3, 3). + inertias: Tensor of shape (N, 9) or (len(indices), 9) - flattened 3x3 matrices. Must be on CPU. indices: Optional indices of bodies to update. + + Raises: + RuntimeError: If inertias tensor is on GPU. """ - inertias = inertias.to(self._device) + self._check_cpu_tensor(inertias, "inertias") if self._inertias is None: - self._inertias = torch.zeros(self._count, 1, 3, 3, device=self._device) - self._inertias[:, :, 0, 0] = 1.0 - self._inertias[:, :, 1, 1] = 1.0 - self._inertias[:, :, 2, 2] = 1.0 + self._inertias = torch.zeros(self._count, 9, device="cpu") + self._inertias[:, 0] = 1.0 + self._inertias[:, 4] = 1.0 + self._inertias[:, 8] = 1.0 if indices is not None: self._inertias[indices] = inertias else: @@ -270,7 +292,7 @@ def set_mock_masses(self, masses: torch.Tensor) -> None: """Set mock mass data directly for testing. Args: - masses: Tensor of shape (N, 1, 1). + masses: Tensor of shape (N, 1). """ self._masses = masses.to(self._device) @@ -278,7 +300,7 @@ def set_mock_coms(self, coms: torch.Tensor) -> None: """Set mock center of mass data directly for testing. Args: - coms: Tensor of shape (N, 1, 7). + coms: Tensor of shape (N, 7). """ self._coms = coms.to(self._device) @@ -286,7 +308,7 @@ def set_mock_inertias(self, inertias: torch.Tensor) -> None: """Set mock inertia data directly for testing. Args: - inertias: Tensor of shape (N, 1, 3, 3). + inertias: Tensor of shape (N, 9) - flattened 3x3 matrices. """ self._inertias = inertias.to(self._device) @@ -310,3 +332,34 @@ def apply_forces_and_torques_at_position( is_global: Whether forces/torques are in global frame. """ pass # No-op for mock + + # -- Convenience method for benchmarking -- + + def set_random_mock_data(self) -> None: + """Set all internal state to random values for benchmarking. + + This method initializes all mock data with random values, + useful for benchmarking where the actual values don't matter. + """ + # Transforms with normalized quaternions - on device + self._transforms = torch.randn(self._count, 7, device=self._device) + self._transforms[:, 3:7] = torch.nn.functional.normalize(self._transforms[:, 3:7], dim=-1) + + # Velocities and accelerations - on device + self._velocities = torch.randn(self._count, 6, device=self._device) + self._accelerations = torch.randn(self._count, 6, device=self._device) + + # Mass properties - stored on CPU (PhysX requirement) + self._masses = torch.rand(self._count, 1, device="cpu") * 10 + + # Center of mass with normalized quaternions - stored on CPU (PhysX requirement) + self._coms = torch.randn(self._count, 7, device="cpu") + self._coms[:, 3:7] = torch.nn.functional.normalize(self._coms[:, 3:7], dim=-1) + + # Inertia tensors (positive definite diagonal) - flattened (N, 9) - stored on CPU (PhysX requirement) + # Create diagonal inertia matrices and flatten + diag_values = torch.rand(self._count, 3, device="cpu") + 0.1 # Ensure positive + self._inertias = torch.zeros(self._count, 9, device="cpu") + self._inertias[:, 0] = diag_values[:, 0] # [0,0] + self._inertias[:, 4] = diag_values[:, 1] # [1,1] + self._inertias[:, 8] = diag_values[:, 2] # [2,2] From 041196d259baabe32fdb13b59a5f1acc7b129563 Mon Sep 17 00:00:00 2001 From: Antoine Richard Date: Fri, 30 Jan 2026 11:03:34 +0100 Subject: [PATCH 28/38] pre-commits --- .../assets/articulation/articulation.py | 15 ++++++--------- .../assets/articulation/articulation_data.py | 2 +- .../assets/rigid_object/rigid_object_data.py | 4 ++-- .../rigid_object_collection_data.py | 2 +- 4 files changed, 10 insertions(+), 13 deletions(-) diff --git a/source/isaaclab_physx/isaaclab_physx/assets/articulation/articulation.py b/source/isaaclab_physx/isaaclab_physx/assets/articulation/articulation.py index c310dcfd4c3..736b8abc53e 100644 --- a/source/isaaclab_physx/isaaclab_physx/assets/articulation/articulation.py +++ b/source/isaaclab_physx/isaaclab_physx/assets/articulation/articulation.py @@ -19,11 +19,11 @@ from isaacsim.core.simulation_manager import SimulationManager from pxr import PhysxSchema, UsdPhysics -from isaaclab.sim.utils.queries import find_first_matching_prim, get_all_matching_child_prims import isaaclab.utils.math as math_utils -from isaaclab.utils.string import resolve_matching_names from isaaclab.actuators import ActuatorBase, ActuatorBaseCfg, ImplicitActuator from isaaclab.assets.articulation.base_articulation import BaseArticulation +from isaaclab.sim.utils.queries import find_first_matching_prim, get_all_matching_child_prims +from isaaclab.utils.string import resolve_matching_names, resolve_matching_names_values from isaaclab.utils.types import ArticulationActions from isaaclab.utils.version import get_isaac_sim_version from isaaclab.utils.wrench_composer import WrenchComposer @@ -31,9 +31,10 @@ from .articulation_data import ArticulationData if TYPE_CHECKING: - from isaaclab.assets.articulation.articulation_cfg import ArticulationCfg import omni.physics.tensors.impl.api as physx + from isaaclab.assets.articulation.articulation_cfg import ArticulationCfg + # import logger logger = logging.getLogger(__name__) @@ -1822,14 +1823,10 @@ def _process_cfg(self): # -- joint state # joint pos - indices_list, _, values_list = string_utils.resolve_matching_names_values( - self.cfg.init_state.joint_pos, self.joint_names - ) + indices_list, _, values_list = resolve_matching_names_values(self.cfg.init_state.joint_pos, self.joint_names) self.data.default_joint_pos[:, indices_list] = torch.tensor(values_list, device=self.device) # joint vel - indices_list, _, values_list = string_utils.resolve_matching_names_values( - self.cfg.init_state.joint_vel, self.joint_names - ) + indices_list, _, values_list = resolve_matching_names_values(self.cfg.init_state.joint_vel, self.joint_names) self.data.default_joint_vel[:, indices_list] = torch.tensor(values_list, device=self.device) """ diff --git a/source/isaaclab_physx/isaaclab_physx/assets/articulation/articulation_data.py b/source/isaaclab_physx/isaaclab_physx/assets/articulation/articulation_data.py index db78c19e62c..69f1831a217 100644 --- a/source/isaaclab_physx/isaaclab_physx/assets/articulation/articulation_data.py +++ b/source/isaaclab_physx/isaaclab_physx/assets/articulation/articulation_data.py @@ -12,9 +12,9 @@ from isaacsim.core.simulation_manager import SimulationManager -from isaaclab.utils.math import quat_apply, quat_apply_inverse, convert_quat, combine_frame_transforms, normalize from isaaclab.assets.articulation.base_articulation_data import BaseArticulationData from isaaclab.utils.buffers import TimestampedBuffer +from isaaclab.utils.math import combine_frame_transforms, convert_quat, normalize, quat_apply, quat_apply_inverse if TYPE_CHECKING: from isaaclab.assets.articulation.articulation_view import ArticulationView diff --git a/source/isaaclab_physx/isaaclab_physx/assets/rigid_object/rigid_object_data.py b/source/isaaclab_physx/isaaclab_physx/assets/rigid_object/rigid_object_data.py index 645631efe24..14ad7123741 100644 --- a/source/isaaclab_physx/isaaclab_physx/assets/rigid_object/rigid_object_data.py +++ b/source/isaaclab_physx/isaaclab_physx/assets/rigid_object/rigid_object_data.py @@ -12,6 +12,8 @@ from isaacsim.core.simulation_manager import SimulationManager +from isaaclab.assets.rigid_object.base_rigid_object_data import BaseRigidObjectData +from isaaclab.utils.buffers import TimestampedBuffer from isaaclab.utils.math import ( combine_frame_transforms, convert_quat, @@ -19,8 +21,6 @@ quat_apply, quat_apply_inverse, ) -from isaaclab.assets.rigid_object.base_rigid_object_data import BaseRigidObjectData -from isaaclab.utils.buffers import TimestampedBuffer if TYPE_CHECKING: from isaaclab.assets.rigid_object.rigid_object_view import RigidObjectView diff --git a/source/isaaclab_physx/isaaclab_physx/assets/rigid_object_collection/rigid_object_collection_data.py b/source/isaaclab_physx/isaaclab_physx/assets/rigid_object_collection/rigid_object_collection_data.py index 131dbb4a4c0..153f08f3f13 100644 --- a/source/isaaclab_physx/isaaclab_physx/assets/rigid_object_collection/rigid_object_collection_data.py +++ b/source/isaaclab_physx/isaaclab_physx/assets/rigid_object_collection/rigid_object_collection_data.py @@ -12,9 +12,9 @@ from isaacsim.core.simulation_manager import SimulationManager -from isaaclab.utils.math import normalize, convert_quat, quat_apply, quat_apply_inverse, combine_frame_transforms from isaaclab.assets.rigid_object_collection.base_rigid_object_collection_data import BaseRigidObjectCollectionData from isaaclab.utils.buffers import TimestampedBuffer +from isaaclab.utils.math import combine_frame_transforms, convert_quat, normalize, quat_apply, quat_apply_inverse if TYPE_CHECKING: from isaaclab.assets.rigid_object_collection.rigid_object_collection_view import RigidObjectCollectionView From 2d430dc879acc10cd723711aa6eb02dde7874721 Mon Sep 17 00:00:00 2001 From: Antoine Richard Date: Fri, 30 Jan 2026 13:30:04 +0100 Subject: [PATCH 29/38] bumped version number --- source/isaaclab/docs/CHANGELOG.rst | 2 +- source/isaaclab_contrib/docs/CHANGELOG.rst | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/source/isaaclab/docs/CHANGELOG.rst b/source/isaaclab/docs/CHANGELOG.rst index 4abb2a6ac4f..efe37edd7fb 100644 --- a/source/isaaclab/docs/CHANGELOG.rst +++ b/source/isaaclab/docs/CHANGELOG.rst @@ -1,7 +1,7 @@ Changelog --------- -0.54.3 (2026-01-29) +0.55.0 (2026-01-30) ~~~~~~~~~~~~~~~~~~~ Added diff --git a/source/isaaclab_contrib/docs/CHANGELOG.rst b/source/isaaclab_contrib/docs/CHANGELOG.rst index 14dd10a8104..397b9a979f8 100644 --- a/source/isaaclab_contrib/docs/CHANGELOG.rst +++ b/source/isaaclab_contrib/docs/CHANGELOG.rst @@ -1,10 +1,10 @@ Changelog --------- -0.0.3 (2026-01-29) +0.1.0 (2026-01-30) ~~~~~~~~~~~~~~~~~~ -Updated +Changed ^^^^^^^ * Updated the multirotor asset to use the new base classes from the isaaclab_physx package. From db416d9619d22d46a4bee1d99466d17622badf79 Mon Sep 17 00:00:00 2001 From: Antoine Richard Date: Fri, 30 Jan 2026 13:30:22 +0100 Subject: [PATCH 30/38] more precise warning. --- .../assets/articulation/articulation_data.py | 173 ++++++++---- .../assets/rigid_object/rigid_object_data.py | 131 ++++++--- .../rigid_object_collection_data.py | 263 ++++++++++++------ 3 files changed, 377 insertions(+), 190 deletions(-) diff --git a/source/isaaclab_physx/isaaclab_physx/assets/articulation/articulation_data.py b/source/isaaclab_physx/isaaclab_physx/assets/articulation/articulation_data.py index ff76c2adacc..c00f9d82ec7 100644 --- a/source/isaaclab_physx/isaaclab_physx/assets/articulation/articulation_data.py +++ b/source/isaaclab_physx/isaaclab_physx/assets/articulation/articulation_data.py @@ -5,6 +5,7 @@ import logging import weakref +import warnings import torch @@ -178,9 +179,11 @@ def default_root_state(self) -> torch.Tensor: This quantity is configured through the :attr:`isaaclab.assets.ArticulationCfg.init_state` parameter. """ - logger.warning( - "Reading the root state directly is deprecated since IsaacLab 3.0 and will be removed in a future version. \ - Please use the default_root_pose and default_root_vel properties instead." + warnings.warn( + "Reading the root state directly is deprecated since IsaacLab 3.0 and will be removed in a future version. " + "Please use the default_root_pose and default_root_vel properties instead.", + DeprecationWarning, + stacklevel=2, ) return torch.cat([self._default_root_pose, self._default_root_vel], dim=1) @@ -194,9 +197,11 @@ def default_root_state(self, value: torch.Tensor) -> None: Raises: ValueError: If the articulation data is already primed. """ - logger.warning( - "Setting the root state directly is deprecated since IsaacLab 3.0 and will be removed in a future version. \ - Please use the default_root_pose and default_root_vel properties instead." + warnings.warn( + "Setting the root state directly is deprecated since IsaacLab 3.0 and will be removed in a future version. " + "Please use the default_root_pose and default_root_vel properties instead.", + DeprecationWarning, + stacklevel=2, ) if self.is_primed: raise ValueError("The articulation data is already primed.") @@ -1114,222 +1119,274 @@ def _create_buffers(self) -> None: @property def root_pose_w(self) -> torch.Tensor: """Deprecated property. Please use :attr:`root_link_pose_w` instead.""" - logger.warning( - "The `root_pose_w` property will be deprecated in a future release. Please use `root_link_pose_w` instead." + warnings.warn( + "The `root_pose_w` property will be deprecated in a future release. Please use `root_link_pose_w` instead.", + DeprecationWarning, + stacklevel=2, ) return self.root_link_pose_w @property def root_pos_w(self) -> torch.Tensor: """Deprecated property. Please use :attr:`root_link_pos_w` instead.""" - logger.warning( - "The `root_pos_w` property will be deprecated in a future release. Please use `root_link_pos_w` instead." + warnings.warn( + "The `root_pos_w` property will be deprecated in a future release. Please use `root_link_pos_w` instead.", + DeprecationWarning, + stacklevel=2, ) return self.root_link_pos_w @property def root_quat_w(self) -> torch.Tensor: """Deprecated property. Please use :attr:`root_link_quat_w` instead.""" - logger.warning( - "The `root_quat_w` property will be deprecated in a future release. Please use `root_link_quat_w` instead." + warnings.warn( + "The `root_quat_w` property will be deprecated in a future release. Please use `root_link_quat_w` instead.", + DeprecationWarning, + stacklevel=2, ) return self.root_link_quat_w @property def root_vel_w(self) -> torch.Tensor: """Deprecated property. Please use :attr:`root_com_vel_w` instead.""" - logger.warning( - "The `root_vel_w` property will be deprecated in a future release. Please use `root_com_vel_w` instead." + warnings.warn( + "The `root_vel_w` property will be deprecated in a future release. Please use `root_com_vel_w` instead.", + DeprecationWarning, + stacklevel=2, ) return self.root_com_vel_w @property def root_lin_vel_w(self) -> torch.Tensor: """Deprecated property. Please use :attr:`root_com_lin_vel_w` instead.""" - logger.warning( + warnings.warn( "The `root_lin_vel_w` property will be deprecated in a future release. Please use" - " `root_com_lin_vel_w` instead." + " `root_com_lin_vel_w` instead.", + DeprecationWarning, + stacklevel=2, ) return self.root_com_lin_vel_w @property def root_ang_vel_w(self) -> torch.Tensor: """Deprecated property. Please use :attr:`root_com_ang_vel_w` instead.""" - logger.warning( + warnings.warn( "The `root_ang_vel_w` property will be deprecated in a future release. Please use" - " `root_com_ang_vel_w` instead." + " `root_com_ang_vel_w` instead.", + DeprecationWarning, + stacklevel=2, ) return self.root_com_ang_vel_w @property def root_lin_vel_b(self) -> torch.Tensor: """Deprecated property. Please use :attr:`root_com_lin_vel_b` instead.""" - logger.warning( + warnings.warn( "The `root_lin_vel_b` property will be deprecated in a future release. Please use" - " `root_com_lin_vel_b` instead." + " `root_com_lin_vel_b` instead.", + DeprecationWarning, + stacklevel=2, ) return self.root_com_lin_vel_b @property def root_ang_vel_b(self) -> torch.Tensor: """Deprecated property. Please use :attr:`root_com_ang_vel_b` instead.""" - logger.warning( + warnings.warn( "The `root_ang_vel_b` property will be deprecated in a future release. Please use" - " `root_com_ang_vel_b` instead." + " `root_com_ang_vel_b` instead.", + DeprecationWarning, + stacklevel=2, ) return self.root_com_ang_vel_b @property def body_pose_w(self) -> torch.Tensor: """Deprecated property. Please use :attr:`body_link_pose_w` instead.""" - logger.warning( - "The `body_pose_w` property will be deprecated in a future release. Please use `body_link_pose_w` instead." + warnings.warn( + "The `body_pose_w` property will be deprecated in a future release. Please use `body_link_pose_w` instead.", + DeprecationWarning, + stacklevel=2, ) return self.body_link_pose_w @property def body_pos_w(self) -> torch.Tensor: """Deprecated property. Please use :attr:`body_link_pos_w` instead.""" - logger.warning( - "The `body_pos_w` property will be deprecated in a future release. Please use `body_link_pos_w` instead." + warnings.warn( + "The `body_pos_w` property will be deprecated in a future release. Please use `body_link_pos_w` instead.", + DeprecationWarning, + stacklevel=2, ) return self.body_link_pos_w @property def body_quat_w(self) -> torch.Tensor: """Deprecated property. Please use :attr:`body_link_quat_w` instead.""" - logger.warning( - "The `body_quat_w` property will be deprecated in a future release. Please use `body_link_quat_w` instead." + warnings.warn( + "The `body_quat_w` property will be deprecated in a future release. Please use `body_link_quat_w` instead.", + DeprecationWarning, + stacklevel=2, ) return self.body_link_quat_w @property def body_vel_w(self) -> torch.Tensor: """Deprecated property. Please use :attr:`body_com_vel_w` instead.""" - logger.warning( - "The `body_vel_w` property will be deprecated in a future release. Please use `body_com_vel_w` instead." + warnings.warn( + "The `body_vel_w` property will be deprecated in a future release. Please use `body_com_vel_w` instead.", + DeprecationWarning, + stacklevel=2, ) return self.body_com_vel_w @property def body_lin_vel_w(self) -> torch.Tensor: """Deprecated property. Please use :attr:`body_com_lin_vel_w` instead.""" - logger.warning( + warnings.warn( "The `body_lin_vel_w` property will be deprecated in a future release. Please use" - " `body_com_lin_vel_w` instead." + " `body_com_lin_vel_w` instead.", + DeprecationWarning, + stacklevel=2, ) return self.body_com_lin_vel_w @property def body_ang_vel_w(self) -> torch.Tensor: """Deprecated property. Please use :attr:`body_com_ang_vel_w` instead.""" - logger.warning( + warnings.warn( "The `body_ang_vel_w` property will be deprecated in a future release. Please use" - " `body_com_ang_vel_w` instead." + " `body_com_ang_vel_w` instead.", + DeprecationWarning, + stacklevel=2, ) return self.body_com_ang_vel_w @property def body_acc_w(self) -> torch.Tensor: """Deprecated property. Please use :attr:`body_com_acc_w` instead.""" - logger.warning( - "The `body_acc_w` property will be deprecated in a future release. Please use `body_com_acc_w` instead." + warnings.warn( + "The `body_acc_w` property will be deprecated in a future release. Please use `body_com_acc_w` instead.", + DeprecationWarning, + stacklevel=2, ) return self.body_com_acc_w @property def body_lin_acc_w(self) -> torch.Tensor: """Deprecated property. Please use :attr:`body_com_lin_acc_w` instead.""" - logger.warning( + warnings.warn( "The `body_lin_acc_w` property will be deprecated in a future release. Please use" - " `body_com_lin_acc_w` instead." + " `body_com_lin_acc_w` instead.", + DeprecationWarning, + stacklevel=2, ) return self.body_com_lin_acc_w @property def body_ang_acc_w(self) -> torch.Tensor: """Deprecated property. Please use :attr:`body_com_ang_acc_w` instead.""" - logger.warning( + warnings.warn( "The `body_ang_acc_w` property will be deprecated in a future release. Please use" - " `body_com_ang_acc_w` instead." + " `body_com_ang_acc_w` instead.", + DeprecationWarning, + stacklevel=2, ) return self.body_com_ang_acc_w @property def com_pos_b(self) -> torch.Tensor: """Deprecated property. Please use :attr:`body_com_pos_b` instead.""" - logger.warning( - "The `com_pos_b` property will be deprecated in a future release. Please use `body_com_pos_b` instead." + warnings.warn( + "The `com_pos_b` property will be deprecated in a future release. Please use `body_com_pos_b` instead.", + DeprecationWarning, + stacklevel=2, ) return self.body_com_pos_b @property def com_quat_b(self) -> torch.Tensor: """Deprecated property. Please use :attr:`body_com_quat_b` instead.""" - logger.warning( - "The `com_quat_b` property will be deprecated in a future release. Please use `body_com_quat_b` instead." + warnings.warn( + "The `com_quat_b` property will be deprecated in a future release. Please use `body_com_quat_b` instead.", + DeprecationWarning, + stacklevel=2, ) return self.body_com_quat_b @property def joint_limits(self) -> torch.Tensor: """Deprecated property. Please use :attr:`joint_pos_limits` instead.""" - logger.warning( - "The `joint_limits` property will be deprecated in a future release. Please use `joint_pos_limits` instead." + warnings.warn( + "The `joint_limits` property will be deprecated in a future release. Please use `joint_pos_limits` instead.", + DeprecationWarning, + stacklevel=2, ) return self.joint_pos_limits @property def default_joint_limits(self) -> torch.Tensor: """Deprecated property. Please use :attr:`default_joint_pos_limits` instead.""" - logger.warning( + warnings.warn( "The `default_joint_limits` property will be deprecated in a future release. Please use" - " `default_joint_pos_limits` instead." + " `default_joint_pos_limits` instead.", + DeprecationWarning, + stacklevel=2, ) return self.default_joint_pos_limits @property def joint_velocity_limits(self) -> torch.Tensor: """Deprecated property. Please use :attr:`joint_vel_limits` instead.""" - logger.warning( + warnings.warn( "The `joint_velocity_limits` property will be deprecated in a future release. Please use" - " `joint_vel_limits` instead." + " `joint_vel_limits` instead.", + DeprecationWarning, + stacklevel=2, ) return self.joint_vel_limits @property def joint_friction(self) -> torch.Tensor: """Deprecated property. Please use :attr:`joint_friction_coeff` instead.""" - logger.warning( + warnings.warn( "The `joint_friction` property will be deprecated in a future release. Please use" - " `joint_friction_coeff` instead." + " `joint_friction_coeff` instead.", + DeprecationWarning, + stacklevel=2, ) return self.joint_friction_coeff @property def default_joint_friction(self) -> torch.Tensor: """Deprecated property. Please use :attr:`default_joint_friction_coeff` instead.""" - logger.warning( + warnings.warn( "The `default_joint_friction` property will be deprecated in a future release. Please use" - " `default_joint_friction_coeff` instead." + " `default_joint_friction_coeff` instead.", + DeprecationWarning, + stacklevel=2, ) return self.default_joint_friction_coeff @property def fixed_tendon_limit(self) -> torch.Tensor: """Deprecated property. Please use :attr:`fixed_tendon_pos_limits` instead.""" - logger.warning( + warnings.warn( "The `fixed_tendon_limit` property will be deprecated in a future release. Please use" - " `fixed_tendon_pos_limits` instead." + " `fixed_tendon_pos_limits` instead.", + DeprecationWarning, + stacklevel=2, ) return self.fixed_tendon_pos_limits @property def default_fixed_tendon_limit(self) -> torch.Tensor: """Deprecated property. Please use :attr:`default_fixed_tendon_pos_limits` instead.""" - logger.warning( + warnings.warn( "The `default_fixed_tendon_limit` property will be deprecated in a future release. Please use" - " `default_fixed_tendon_pos_limits` instead." + " `default_fixed_tendon_pos_limits` instead.", + DeprecationWarning, + stacklevel=2, ) return self.default_fixed_tendon_pos_limits diff --git a/source/isaaclab_physx/isaaclab_physx/assets/rigid_object/rigid_object_data.py b/source/isaaclab_physx/isaaclab_physx/assets/rigid_object/rigid_object_data.py index f5b104e5ae0..5ef43742fd8 100644 --- a/source/isaaclab_physx/isaaclab_physx/assets/rigid_object/rigid_object_data.py +++ b/source/isaaclab_physx/isaaclab_physx/assets/rigid_object/rigid_object_data.py @@ -4,6 +4,7 @@ # SPDX-License-Identifier: BSD-3-Clause import logging +import warnings import weakref import torch @@ -167,9 +168,11 @@ def default_root_state(self) -> torch.Tensor: The position and quaternion are of the rigid body's actor frame. Meanwhile, the linear and angular velocities are of the center of mass frame. Shape is (num_instances, 13). """ - logger.warning( - "Reading the root state directly is deprecated since IsaacLab 3.0 and will be removed in a future version. \ - Please use the default_root_pose and default_root_vel properties instead." + warnings.warn( + "Reading the root state directly is deprecated since IsaacLab 3.0 and will be removed in a future version. " + "Please use the default_root_pose and default_root_vel properties instead.", + DeprecationWarning, + stacklevel=2, ) return torch.cat([self.default_root_pose, self.default_root_vel], dim=1) @@ -183,9 +186,11 @@ def default_root_state(self, value: torch.Tensor) -> None: Raises: ValueError: If the rigid object data is already primed. """ - logger.warning( - "Setting the root state directly is deprecated since IsaacLab 3.0 and will be removed in a future version. \ - Please use the default_root_pose and default_root_vel properties instead." + warnings.warn( + "Setting the root state directly is deprecated since IsaacLab 3.0 and will be removed in a future version. " + "Please use the default_root_pose and default_root_vel properties instead.", + DeprecationWarning, + stacklevel=2, ) if self._is_primed: raise ValueError("The rigid object data is already primed.") @@ -670,160 +675,198 @@ def _create_buffers(self) -> None: @property def root_pose_w(self) -> torch.Tensor: """Deprecated property. Please use :attr:`root_link_pose_w` instead.""" - logger.warning( - "The `root_pose_w` property will be deprecated in a future release. Please use `root_link_pose_w` instead." + warnings.warn( + "The `root_pose_w` property will be deprecated in a future release. Please use `root_link_pose_w` instead.", + DeprecationWarning, + stacklevel=2, ) return self.root_link_pose_w @property def root_pos_w(self) -> torch.Tensor: """Deprecated property. Please use :attr:`root_link_pos_w` instead.""" - logger.warning( - "The `root_pos_w` property will be deprecated in a future release. Please use `root_link_pos_w` instead." + warnings.warn( + "The `root_pos_w` property will be deprecated in a future release. Please use `root_link_pos_w` instead.", + DeprecationWarning, + stacklevel=2, ) return self.root_link_pos_w @property def root_quat_w(self) -> torch.Tensor: """Deprecated property. Please use :attr:`root_link_quat_w` instead.""" - logger.warning( - "The `root_quat_w` property will be deprecated in a future release. Please use `root_link_quat_w` instead." + warnings.warn( + "The `root_quat_w` property will be deprecated in a future release. Please use `root_link_quat_w` instead.", + DeprecationWarning, + stacklevel=2, ) return self.root_link_quat_w @property def root_vel_w(self) -> torch.Tensor: """Deprecated property. Please use :attr:`root_com_vel_w` instead.""" - logger.warning( - "The `root_vel_w` property will be deprecated in a future release. Please use `root_com_vel_w` instead." + warnings.warn( + "The `root_vel_w` property will be deprecated in a future release. Please use `root_com_vel_w` instead.", + DeprecationWarning, + stacklevel=2, ) return self.root_com_vel_w @property def root_lin_vel_w(self) -> torch.Tensor: """Deprecated property. Please use :attr:`root_com_lin_vel_w` instead.""" - logger.warning( + warnings.warn( "The `root_lin_vel_w` property will be deprecated in a future release. Please use" - " `root_com_lin_vel_w` instead." + " `root_com_lin_vel_w` instead.", + DeprecationWarning, + stacklevel=2, ) return self.root_com_lin_vel_w @property def root_ang_vel_w(self) -> torch.Tensor: """Deprecated property. Please use :attr:`root_com_ang_vel_w` instead.""" - logger.warning( + warnings.warn( "The `root_ang_vel_w` property will be deprecated in a future release. Please use" - " `root_com_ang_vel_w` instead." + " `root_com_ang_vel_w` instead.", + DeprecationWarning, + stacklevel=2, ) return self.root_com_ang_vel_w @property def root_lin_vel_b(self) -> torch.Tensor: """Deprecated property. Please use :attr:`root_com_lin_vel_b` instead.""" - logger.warning( + warnings.warn( "The `root_lin_vel_b` property will be deprecated in a future release. Please use" - " `root_com_lin_vel_b` instead." + " `root_com_lin_vel_b` instead.", + DeprecationWarning, + stacklevel=2, ) return self.root_com_lin_vel_b @property def root_ang_vel_b(self) -> torch.Tensor: """Deprecated property. Please use :attr:`root_com_ang_vel_b` instead.""" - logger.warning( + warnings.warn( "The `root_ang_vel_b` property will be deprecated in a future release. Please use" - " `root_com_ang_vel_b` instead." + " `root_com_ang_vel_b` instead.", + DeprecationWarning, + stacklevel=2, ) return self.root_com_ang_vel_b @property def body_pose_w(self) -> torch.Tensor: """Deprecated property. Please use :attr:`body_link_pose_w` instead.""" - logger.warning( - "The `body_pose_w` property will be deprecated in a future release. Please use `body_link_pose_w` instead." + warnings.warn( + "The `body_pose_w` property will be deprecated in a future release. Please use `body_link_pose_w` instead.", + DeprecationWarning, + stacklevel=2, ) return self.body_link_pose_w @property def body_pos_w(self) -> torch.Tensor: """Deprecated property. Please use :attr:`body_link_pos_w` instead.""" - logger.warning( - "The `body_pos_w` property will be deprecated in a future release. Please use `body_link_pos_w` instead." + warnings.warn( + "The `body_pos_w` property will be deprecated in a future release. Please use `body_link_pos_w` instead.", + DeprecationWarning, + stacklevel=2, ) return self.body_link_pos_w @property def body_quat_w(self) -> torch.Tensor: """Deprecated property. Please use :attr:`body_link_quat_w` instead.""" - logger.warning( - "The `body_quat_w` property will be deprecated in a future release. Please use `body_link_quat_w` instead." + warnings.warn( + "The `body_quat_w` property will be deprecated in a future release. Please use `body_link_quat_w` instead.", + DeprecationWarning, + stacklevel=2, ) return self.body_link_quat_w @property def body_vel_w(self) -> torch.Tensor: """Deprecated property. Please use :attr:`body_com_vel_w` instead.""" - logger.warning( - "The `body_vel_w` property will be deprecated in a future release. Please use `body_com_vel_w` instead." + warnings.warn( + "The `body_vel_w` property will be deprecated in a future release. Please use `body_com_vel_w` instead.", + DeprecationWarning, + stacklevel=2, ) return self.body_com_vel_w @property def body_lin_vel_w(self) -> torch.Tensor: """Deprecated property. Please use :attr:`body_com_lin_vel_w` instead.""" - logger.warning( + warnings.warn( "The `body_lin_vel_w` property will be deprecated in a future release. Please use" - " `body_com_lin_vel_w` instead." + " `body_com_lin_vel_w` instead.", + DeprecationWarning, + stacklevel=2, ) return self.body_com_lin_vel_w @property def body_ang_vel_w(self) -> torch.Tensor: """Deprecated property. Please use :attr:`body_com_ang_vel_w` instead.""" - logger.warning( + warnings.warn( "The `body_ang_vel_w` property will be deprecated in a future release. Please use" - " `body_com_ang_vel_w` instead." + " `body_com_ang_vel_w` instead.", + DeprecationWarning, + stacklevel=2, ) return self.body_com_ang_vel_w @property def body_acc_w(self) -> torch.Tensor: """Deprecated property. Please use :attr:`body_com_acc_w` instead.""" - logger.warning( - "The `body_acc_w` property will be deprecated in a future release. Please use `body_com_acc_w` instead." + warnings.warn( + "The `body_acc_w` property will be deprecated in a future release. Please use `body_com_acc_w` instead.", + DeprecationWarning, + stacklevel=2, ) return self.body_com_acc_w @property def body_lin_acc_w(self) -> torch.Tensor: """Deprecated property. Please use :attr:`body_com_lin_acc_w` instead.""" - logger.warning( + warnings.warn( "The `body_lin_acc_w` property will be deprecated in a future release. Please use" - " `body_com_lin_acc_w` instead." + " `body_com_lin_acc_w` instead.", + DeprecationWarning, + stacklevel=2, ) return self.body_com_lin_acc_w @property def body_ang_acc_w(self) -> torch.Tensor: """Deprecated property. Please use :attr:`body_com_ang_acc_w` instead.""" - logger.warning( + warnings.warn( "The `body_ang_acc_w` property will be deprecated in a future release. Please use" - " `body_com_ang_acc_w` instead." + " `body_com_ang_acc_w` instead.", + DeprecationWarning, + stacklevel=2, ) return self.body_com_ang_acc_w @property def com_pos_b(self) -> torch.Tensor: """Deprecated property. Please use :attr:`body_com_pos_b` instead.""" - logger.warning( - "The `com_pos_b` property will be deprecated in a future release. Please use `body_com_pos_b` instead." + warnings.warn( + "The `com_pos_b` property will be deprecated in a future release. Please use `body_com_pos_b` instead.", + DeprecationWarning, + stacklevel=2, ) return self.body_com_pos_b @property def com_quat_b(self) -> torch.Tensor: """Deprecated property. Please use :attr:`body_com_quat_b` instead.""" - logger.warning( - "The `com_quat_b` property will be deprecated in a future release. Please use `body_com_quat_b` instead." + warnings.warn( + "The `com_quat_b` property will be deprecated in a future release. Please use `body_com_quat_b` instead.", + DeprecationWarning, + stacklevel=2, ) return self.body_com_quat_b diff --git a/source/isaaclab_physx/isaaclab_physx/assets/rigid_object_collection/rigid_object_collection_data.py b/source/isaaclab_physx/isaaclab_physx/assets/rigid_object_collection/rigid_object_collection_data.py index ffc8e8beb07..f98c3688780 100644 --- a/source/isaaclab_physx/isaaclab_physx/assets/rigid_object_collection/rigid_object_collection_data.py +++ b/source/isaaclab_physx/isaaclab_physx/assets/rigid_object_collection/rigid_object_collection_data.py @@ -4,6 +4,7 @@ # SPDX-License-Identifier: BSD-3-Clause import logging +import warnings import weakref import torch @@ -173,9 +174,11 @@ def default_body_state(self) -> torch.Tensor: The position and quaternion are of the rigid body's actor frame. Meanwhile, the linear and angular velocities are of the center of mass frame. Shape is (num_instances, num_bodies, 13). """ - logger.warning( - "Reading the body state directly is deprecated since IsaacLab 3.0 and will be removed in a future version. \ - Please use the default_body_pose and default_body_vel properties instead." + warnings.warn( + "Reading the body state directly is deprecated since IsaacLab 3.0 and will be removed in a future version. " + "Please use the default_body_pose and default_body_vel properties instead.", + DeprecationWarning, + stacklevel=2, ) return torch.cat([self.default_body_pose, self.default_body_vel], dim=-1) @@ -189,9 +192,11 @@ def default_body_state(self, value: torch.Tensor) -> None: Raises: ValueError: If the rigid object collection data is already primed. """ - logger.warning( - "Setting the root state directly is deprecated since IsaacLab 3.0 and will be removed in a future version. \ - Please use the default_root_pose and default_root_vel properties instead." + warnings.warn( + "Setting the root state directly is deprecated since IsaacLab 3.0 and will be removed in a future version. " + "Please use the default_root_pose and default_root_vel properties instead.", + DeprecationWarning, + stacklevel=2, ) if self._is_primed: raise ValueError("The rigid object collection data is already primed.") @@ -543,363 +548,445 @@ def _create_buffers(self) -> None: @property def default_object_pose(self) -> torch.Tensor: """Deprecated property. Please use :attr:`default_body_pose` instead.""" - logger.warning( + warnings.warn( "The `default_object_pose` property will be deprecated in a future release. Please use" - " `default_body_pose` instead." + " `default_body_pose` instead.", + DeprecationWarning, + stacklevel=2, ) return self.default_body_pose @property def default_object_vel(self) -> torch.Tensor: """Deprecated property. Please use :attr:`default_body_vel` instead.""" - logger.warning( + warnings.warn( "The `default_object_vel` property will be deprecated in a future release. Please use" - " `default_body_vel` instead." + " `default_body_vel` instead.", + DeprecationWarning, + stacklevel=2, ) return self.default_body_vel @property def default_object_state(self) -> torch.Tensor: """Deprecated property. Please use :attr:`default_body_state` instead.""" - logger.warning( + warnings.warn( "The `default_object_state` property will be deprecated in a future release. Please use" - " `default_body_state` instead." + " `default_body_state` instead.", + DeprecationWarning, + stacklevel=2, ) return self.default_body_state @property def object_link_pose_w(self): """Deprecated property. Please use :attr:`body_link_pose_w` instead.""" - logger.warning( + warnings.warn( "The `object_link_pose_w` property will be deprecated in a future release. Please use" - " `body_link_pose_w` instead." + " `body_link_pose_w` instead.", + DeprecationWarning, + stacklevel=2, ) return self.body_link_pose_w @property def object_link_vel_w(self): """Deprecated property. Please use :attr:`body_link_vel_w` instead.""" - logger.warning( + warnings.warn( "The `object_link_vel_w` property will be deprecated in a future release. Please use" - " `body_link_vel_w` instead." + " `body_link_vel_w` instead.", + DeprecationWarning, + stacklevel=2, ) return self.body_link_vel_w @property def object_com_pose_w(self): """Deprecated property. Please use :attr:`body_com_pose_w` instead.""" - logger.warning( + warnings.warn( "The `object_com_pose_w` property will be deprecated in a future release. Please use" - " `body_com_pose_w` instead." + " `body_com_pose_w` instead.", + DeprecationWarning, + stacklevel=2, ) return self.body_com_pose_w @property def object_com_vel_w(self): """Deprecated property. Please use :attr:`body_com_vel_w` instead.""" - logger.warning( + warnings.warn( "The `object_com_vel_w` property will be deprecated in a future release. Please use" - " `body_com_vel_w` instead." + " `body_com_vel_w` instead.", + DeprecationWarning, + stacklevel=2, ) return self.body_com_vel_w @property def object_state_w(self): """Deprecated property. Please use :attr:`body_state_w` instead.""" - logger.warning( - "The `object_state_w` property will be deprecated in a future release. Please use `body_state_w` instead." + warnings.warn( + "The `object_state_w` property will be deprecated in a future release. Please use `body_state_w` instead.", + DeprecationWarning, + stacklevel=2, ) return self.body_state_w @property def object_link_state_w(self): """Deprecated property. Please use :attr:`body_link_state_w` instead.""" - logger.warning( + warnings.warn( "The `object_link_state_w` property will be deprecated in a future release. Please use" - " `body_link_state_w` instead." + " `body_link_state_w` instead.", + DeprecationWarning, + stacklevel=2, ) return self.body_link_state_w @property def object_com_state_w(self): """Deprecated property. Please use :attr:`body_com_state_w` instead.""" - logger.warning( + warnings.warn( "The `object_com_state_w` property will be deprecated in a future release. Please use" - " `body_com_state_w` instead." + " `body_com_state_w` instead.", + DeprecationWarning, + stacklevel=2, ) return self.body_com_state_w @property def object_com_acc_w(self): """Deprecated property. Please use :attr:`body_com_acc_w` instead.""" - logger.warning( + warnings.warn( "The `object_com_acc_w` property will be deprecated in a future release. Please use" - " `body_com_acc_w` instead." + " `body_com_acc_w` instead.", + DeprecationWarning, + stacklevel=2, ) return self.body_com_acc_w @property def object_com_pose_b(self): """Deprecated property. Please use :attr:`body_com_pose_b` instead.""" - logger.warning( + warnings.warn( "The `object_com_pose_b` property will be deprecated in a future release. Please use" - " `body_com_pose_b` instead." + " `body_com_pose_b` instead.", + DeprecationWarning, + stacklevel=2, ) return self.body_com_pose_b @property def object_link_pos_w(self) -> torch.Tensor: """Deprecated property. Please use :attr:`body_link_pos_w` instead.""" - logger.warning( + warnings.warn( "The `object_link_pos_w` property will be deprecated in a future release. Please use" - " `body_link_pos_w` instead." + " `body_link_pos_w` instead.", + DeprecationWarning, + stacklevel=2, ) return self.body_link_pos_w @property def object_link_quat_w(self) -> torch.Tensor: """Deprecated property. Please use :attr:`body_link_quat_w` instead.""" - logger.warning( + warnings.warn( "The `object_link_quat_w` property will be deprecated in a future release. Please use" - " `body_link_quat_w` instead." + " `body_link_quat_w` instead.", + DeprecationWarning, + stacklevel=2, ) return self.body_link_quat_w @property def object_link_lin_vel_w(self) -> torch.Tensor: """Deprecated property. Please use :attr:`body_link_lin_vel_w` instead.""" - logger.warning( + warnings.warn( "The `object_link_lin_vel_w` property will be deprecated in a future release. Please use" - " `body_link_lin_vel_w` instead." + " `body_link_lin_vel_w` instead.", + DeprecationWarning, + stacklevel=2, ) return self.body_link_lin_vel_w @property def object_link_ang_vel_w(self) -> torch.Tensor: """Deprecated property. Please use :attr:`body_link_ang_vel_w` instead.""" - logger.warning( + warnings.warn( "The `object_link_ang_vel_w` property will be deprecated in a future release. Please use" - " `body_link_ang_vel_w` instead." + " `body_link_ang_vel_w` instead.", + DeprecationWarning, + stacklevel=2, ) return self.body_link_ang_vel_w @property def object_com_pos_w(self) -> torch.Tensor: """Deprecated property. Please use :attr:`body_com_pos_w` instead.""" - logger.warning( + warnings.warn( "The `object_com_pos_w` property will be deprecated in a future release. Please use" - " `body_com_pos_w` instead." + " `body_com_pos_w` instead.", + DeprecationWarning, + stacklevel=2, ) return self.body_com_pos_w @property def object_com_quat_w(self) -> torch.Tensor: """Deprecated property. Please use :attr:`body_com_quat_w` instead.""" - logger.warning( + warnings.warn( "The `object_com_quat_w` property will be deprecated in a future release. Please use" - " `body_com_quat_w` instead." + " `body_com_quat_w` instead.", + DeprecationWarning, + stacklevel=2, ) return self.body_com_quat_w @property def object_com_lin_vel_w(self) -> torch.Tensor: """Deprecated property. Please use :attr:`body_com_lin_vel_w` instead.""" - logger.warning( + warnings.warn( "The `object_com_lin_vel_w` property will be deprecated in a future release. Please use" - " `body_com_lin_vel_w` instead." + " `body_com_lin_vel_w` instead.", + DeprecationWarning, + stacklevel=2, ) return self.body_com_lin_vel_w @property def object_com_ang_vel_w(self) -> torch.Tensor: """Deprecated property. Please use :attr:`body_com_ang_vel_w` instead.""" - logger.warning( + warnings.warn( "The `object_com_ang_vel_w` property will be deprecated in a future release. Please use" - " `body_com_ang_vel_w` instead." + " `body_com_ang_vel_w` instead.", + DeprecationWarning, + stacklevel=2, ) return self.body_com_ang_vel_w @property def object_com_lin_acc_w(self) -> torch.Tensor: """Deprecated property. Please use :attr:`body_com_lin_acc_w` instead.""" - logger.warning( + warnings.warn( "The `object_com_lin_acc_w` property will be deprecated in a future release. Please use" - " `body_com_lin_acc_w` instead." + " `body_com_lin_acc_w` instead.", + DeprecationWarning, + stacklevel=2, ) return self.body_com_lin_acc_w @property def object_com_ang_acc_w(self) -> torch.Tensor: """Deprecated property. Please use :attr:`body_com_ang_acc_w` instead.""" - logger.warning( + warnings.warn( "The `object_com_ang_acc_w` property will be deprecated in a future release. Please use" - " `body_com_ang_acc_w` instead." + " `body_com_ang_acc_w` instead.", + DeprecationWarning, + stacklevel=2, ) return self.body_com_ang_acc_w @property def object_com_pos_b(self) -> torch.Tensor: """Deprecated property. Please use :attr:`body_com_pos_b` instead.""" - logger.warning( + warnings.warn( "The `object_com_pos_b` property will be deprecated in a future release. Please use" - " `body_com_pos_b` instead." + " `body_com_pos_b` instead.", + DeprecationWarning, + stacklevel=2, ) return self.body_com_pos_b @property def object_com_quat_b(self) -> torch.Tensor: """Deprecated property. Please use :attr:`body_com_quat_b` instead.""" - logger.warning( + warnings.warn( "The `object_com_quat_b` property will be deprecated in a future release. Please use" - " `body_com_quat_b` instead." + " `body_com_quat_b` instead.", + DeprecationWarning, + stacklevel=2, ) return self.body_com_quat_b @property def object_link_lin_vel_b(self) -> torch.Tensor: """Deprecated property. Please use :attr:`body_link_lin_vel_b` instead.""" - logger.warning( + warnings.warn( "The `object_link_lin_vel_b` property will be deprecated in a future release. Please use" - " `body_link_lin_vel_b` instead." + " `body_link_lin_vel_b` instead.", + DeprecationWarning, + stacklevel=2, ) return self.body_link_lin_vel_b @property def object_link_ang_vel_b(self) -> torch.Tensor: """Deprecated property. Please use :attr:`body_link_ang_vel_b` instead.""" - logger.warning( + warnings.warn( "The `object_link_ang_vel_b` property will be deprecated in a future release. Please use" - " `body_link_ang_vel_b` instead." + " `body_link_ang_vel_b` instead.", + DeprecationWarning, + stacklevel=2, ) return self.body_link_ang_vel_b @property def object_com_lin_vel_b(self) -> torch.Tensor: """Deprecated property. Please use :attr:`body_com_lin_vel_b` instead.""" - logger.warning( + warnings.warn( "The `object_com_lin_vel_b` property will be deprecated in a future release. Please use" - " `body_com_lin_vel_b` instead." + " `body_com_lin_vel_b` instead.", + DeprecationWarning, + stacklevel=2, ) return self.body_com_lin_vel_b @property def object_com_ang_vel_b(self) -> torch.Tensor: """Deprecated property. Please use :attr:`body_com_ang_vel_b` instead.""" - logger.warning( + warnings.warn( "The `object_com_ang_vel_b` property will be deprecated in a future release. Please use" - " `body_com_ang_vel_b` instead." + " `body_com_ang_vel_b` instead.", + DeprecationWarning, + stacklevel=2, ) return self.body_com_ang_vel_b @property def object_pose_w(self) -> torch.Tensor: """Deprecated property. Please use :attr:`body_link_pose_w` instead.""" - logger.warning( + warnings.warn( "The `object_pose_w` property will be deprecated in a future release. Please use" - " `body_link_pose_w` instead." + " `body_link_pose_w` instead.", + DeprecationWarning, + stacklevel=2, ) return self.body_link_pose_w @property def object_pos_w(self) -> torch.Tensor: """Deprecated property. Please use :attr:`body_link_pos_w` instead.""" - logger.warning( - "The `object_pos_w` property will be deprecated in a future release. Please use `body_link_pos_w` instead." + warnings.warn( + "The `object_pos_w` property will be deprecated in a future release. Please use `body_link_pos_w` instead.", + DeprecationWarning, + stacklevel=2, ) return self.body_link_pos_w @property def object_quat_w(self) -> torch.Tensor: """Deprecated property. Please use :attr:`body_link_quat_w` instead.""" - logger.warning( + warnings.warn( "The `object_quat_w` property will be deprecated in a future release. Please use" - " `body_link_quat_w` instead." + " `body_link_quat_w` instead.", + DeprecationWarning, + stacklevel=2, ) return self.body_link_quat_w @property def object_vel_w(self) -> torch.Tensor: """Deprecated property. Please use :attr:`body_com_vel_w` instead.""" - logger.warning( - "The `object_vel_w` property will be deprecated in a future release. Please use `body_com_vel_w` instead." + warnings.warn( + "The `object_vel_w` property will be deprecated in a future release. Please use `body_com_vel_w` instead.", + DeprecationWarning, + stacklevel=2, ) return self.body_com_vel_w @property def object_lin_vel_w(self) -> torch.Tensor: """Deprecated property. Please use :attr:`body_com_lin_vel_w` instead.""" - logger.warning( + warnings.warn( "The `object_lin_vel_w` property will be deprecated in a future release. Please use" - " `body_com_lin_vel_w` instead." + " `body_com_lin_vel_w` instead.", + DeprecationWarning, + stacklevel=2, ) return self.body_com_lin_vel_w @property def object_ang_vel_w(self) -> torch.Tensor: """Deprecated property. Please use :attr:`body_com_ang_vel_w` instead.""" - logger.warning( + warnings.warn( "The `object_ang_vel_w` property will be deprecated in a future release. Please use" - " `body_com_ang_vel_w` instead." + " `body_com_ang_vel_w` instead.", + DeprecationWarning, + stacklevel=2, ) return self.body_com_ang_vel_w @property def object_lin_vel_b(self) -> torch.Tensor: """Deprecated property. Please use :attr:`body_com_lin_vel_b` instead.""" - logger.warning( + warnings.warn( "The `object_lin_vel_b` property will be deprecated in a future release. Please use" - " `body_com_lin_vel_b` instead." + " `body_com_lin_vel_b` instead.", + DeprecationWarning, + stacklevel=2, ) return self.body_com_lin_vel_b @property def object_ang_vel_b(self) -> torch.Tensor: """Deprecated property. Please use :attr:`body_com_ang_vel_b` instead.""" - logger.warning( + warnings.warn( "The `object_ang_vel_b` property will be deprecated in a future release. Please use" - " `body_com_ang_vel_b` instead." + " `body_com_ang_vel_b` instead.", + DeprecationWarning, + stacklevel=2, ) return self.body_com_ang_vel_b @property def object_acc_w(self) -> torch.Tensor: """Deprecated property. Please use :attr:`body_com_acc_w` instead.""" - logger.warning( - "The `object_acc_w` property will be deprecated in a future release. Please use `body_com_acc_w` instead." + warnings.warn( + "The `object_acc_w` property will be deprecated in a future release. Please use `body_com_acc_w` instead.", + DeprecationWarning, + stacklevel=2, ) return self.body_com_acc_w @property def object_lin_acc_w(self) -> torch.Tensor: """Deprecated property. Please use :attr:`body_com_lin_acc_w` instead.""" - logger.warning( + warnings.warn( "The `object_lin_acc_w` property will be deprecated in a future release. Please use" - " `body_com_lin_acc_w` instead." + " `body_com_lin_acc_w` instead.", + DeprecationWarning, + stacklevel=2, ) return self.body_com_lin_acc_w @property def object_ang_acc_w(self) -> torch.Tensor: """Deprecated property. Please use :attr:`body_com_ang_acc_w` instead.""" - logger.warning( + warnings.warn( "The `object_ang_acc_w` property will be deprecated in a future release. Please use" - " `body_com_ang_acc_w` instead." + " `body_com_ang_acc_w` instead.", + DeprecationWarning, + stacklevel=2, ) return self.body_com_ang_acc_w @property def com_pos_b(self) -> torch.Tensor: """Deprecated property. Please use :attr:`body_com_pos_b` instead.""" - logger.warning( - "The `com_pos_b` property will be deprecated in a future release. Please use `body_com_pos_b` instead." + warnings.warn( + "The `com_pos_b` property will be deprecated in a future release. Please use `body_com_pos_b` instead.", + DeprecationWarning, + stacklevel=2, ) return self.body_com_pos_b @property def com_quat_b(self) -> torch.Tensor: """Deprecated property. Please use :attr:`body_com_quat_b` instead.""" - logger.warning( - "The `com_quat_b` property will be deprecated in a future release. Please use `body_com_quat_b` instead." + warnings.warn( + "The `com_quat_b` property will be deprecated in a future release. Please use `body_com_quat_b` instead.", + DeprecationWarning, + stacklevel=2, ) return self.body_com_quat_b From ca95f90029e70c4d8f7ec01fc0b9d17f2a9797d8 Mon Sep 17 00:00:00 2001 From: Antoine Richard Date: Fri, 30 Jan 2026 13:41:21 +0100 Subject: [PATCH 31/38] bumped the version in the extension.toml --- source/isaaclab/config/extension.toml | 2 +- source/isaaclab_contrib/config/extension.toml | 2 +- source/isaaclab_tasks/config/extension.toml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/source/isaaclab/config/extension.toml b/source/isaaclab/config/extension.toml index 9939dcb511b..09e7ca6f88a 100644 --- a/source/isaaclab/config/extension.toml +++ b/source/isaaclab/config/extension.toml @@ -1,7 +1,7 @@ [package] # Note: Semantic Versioning is used: https://semver.org/ -version = "0.54.2" +version = "0.55.0" # Description title = "Isaac Lab framework for Robot Learning" diff --git a/source/isaaclab_contrib/config/extension.toml b/source/isaaclab_contrib/config/extension.toml index 326c9faf7ee..8831abdfffb 100644 --- a/source/isaaclab_contrib/config/extension.toml +++ b/source/isaaclab_contrib/config/extension.toml @@ -1,6 +1,6 @@ [package] # Semantic Versioning is used: https://semver.org/ -version = "0.0.2" +version = "0.1.0" # Description title = "Isaac Lab External Contributions" diff --git a/source/isaaclab_tasks/config/extension.toml b/source/isaaclab_tasks/config/extension.toml index ea211a93e2a..67f725c8986 100644 --- a/source/isaaclab_tasks/config/extension.toml +++ b/source/isaaclab_tasks/config/extension.toml @@ -1,7 +1,7 @@ [package] # Note: Semantic Versioning is used: https://semver.org/ -version = "0.11.12" +version = "0.11.13" # Description title = "Isaac Lab Environments" From 46324c374af1e3f2f43e36354af0bc3ab8ecf580 Mon Sep 17 00:00:00 2001 From: Antoine Richard Date: Fri, 30 Jan 2026 13:46:20 +0100 Subject: [PATCH 32/38] made the warning much more explicit in term of deprecation date. This way we actually do it! --- .../assets/articulation/articulation_data.py | 54 ++++++------ .../assets/rigid_object/rigid_object_data.py | 38 ++++----- .../rigid_object_collection_data.py | 84 +++++++++---------- 3 files changed, 87 insertions(+), 89 deletions(-) diff --git a/source/isaaclab_physx/isaaclab_physx/assets/articulation/articulation_data.py b/source/isaaclab_physx/isaaclab_physx/assets/articulation/articulation_data.py index c00f9d82ec7..aff69dd3775 100644 --- a/source/isaaclab_physx/isaaclab_physx/assets/articulation/articulation_data.py +++ b/source/isaaclab_physx/isaaclab_physx/assets/articulation/articulation_data.py @@ -4,8 +4,8 @@ # SPDX-License-Identifier: BSD-3-Clause import logging -import weakref import warnings +import weakref import torch @@ -1120,7 +1120,7 @@ def _create_buffers(self) -> None: def root_pose_w(self) -> torch.Tensor: """Deprecated property. Please use :attr:`root_link_pose_w` instead.""" warnings.warn( - "The `root_pose_w` property will be deprecated in a future release. Please use `root_link_pose_w` instead.", + "The `root_pose_w` property will be deprecated in a IsaacLab 4.0. Please use `root_link_pose_w` instead.", DeprecationWarning, stacklevel=2, ) @@ -1130,7 +1130,7 @@ def root_pose_w(self) -> torch.Tensor: def root_pos_w(self) -> torch.Tensor: """Deprecated property. Please use :attr:`root_link_pos_w` instead.""" warnings.warn( - "The `root_pos_w` property will be deprecated in a future release. Please use `root_link_pos_w` instead.", + "The `root_pos_w` property will be deprecated in a IsaacLab 4.0. Please use `root_link_pos_w` instead.", DeprecationWarning, stacklevel=2, ) @@ -1140,7 +1140,7 @@ def root_pos_w(self) -> torch.Tensor: def root_quat_w(self) -> torch.Tensor: """Deprecated property. Please use :attr:`root_link_quat_w` instead.""" warnings.warn( - "The `root_quat_w` property will be deprecated in a future release. Please use `root_link_quat_w` instead.", + "The `root_quat_w` property will be deprecated in a IsaacLab 4.0. Please use `root_link_quat_w` instead.", DeprecationWarning, stacklevel=2, ) @@ -1150,7 +1150,7 @@ def root_quat_w(self) -> torch.Tensor: def root_vel_w(self) -> torch.Tensor: """Deprecated property. Please use :attr:`root_com_vel_w` instead.""" warnings.warn( - "The `root_vel_w` property will be deprecated in a future release. Please use `root_com_vel_w` instead.", + "The `root_vel_w` property will be deprecated in a IsaacLab 4.0. Please use `root_com_vel_w` instead.", DeprecationWarning, stacklevel=2, ) @@ -1160,7 +1160,7 @@ def root_vel_w(self) -> torch.Tensor: def root_lin_vel_w(self) -> torch.Tensor: """Deprecated property. Please use :attr:`root_com_lin_vel_w` instead.""" warnings.warn( - "The `root_lin_vel_w` property will be deprecated in a future release. Please use" + "The `root_lin_vel_w` property will be deprecated in a IsaacLab 4.0. Please use" " `root_com_lin_vel_w` instead.", DeprecationWarning, stacklevel=2, @@ -1171,7 +1171,7 @@ def root_lin_vel_w(self) -> torch.Tensor: def root_ang_vel_w(self) -> torch.Tensor: """Deprecated property. Please use :attr:`root_com_ang_vel_w` instead.""" warnings.warn( - "The `root_ang_vel_w` property will be deprecated in a future release. Please use" + "The `root_ang_vel_w` property will be deprecated in a IsaacLab 4.0. Please use" " `root_com_ang_vel_w` instead.", DeprecationWarning, stacklevel=2, @@ -1182,7 +1182,7 @@ def root_ang_vel_w(self) -> torch.Tensor: def root_lin_vel_b(self) -> torch.Tensor: """Deprecated property. Please use :attr:`root_com_lin_vel_b` instead.""" warnings.warn( - "The `root_lin_vel_b` property will be deprecated in a future release. Please use" + "The `root_lin_vel_b` property will be deprecated in a IsaacLab 4.0. Please use" " `root_com_lin_vel_b` instead.", DeprecationWarning, stacklevel=2, @@ -1193,7 +1193,7 @@ def root_lin_vel_b(self) -> torch.Tensor: def root_ang_vel_b(self) -> torch.Tensor: """Deprecated property. Please use :attr:`root_com_ang_vel_b` instead.""" warnings.warn( - "The `root_ang_vel_b` property will be deprecated in a future release. Please use" + "The `root_ang_vel_b` property will be deprecated in a IsaacLab 4.0. Please use" " `root_com_ang_vel_b` instead.", DeprecationWarning, stacklevel=2, @@ -1204,7 +1204,7 @@ def root_ang_vel_b(self) -> torch.Tensor: def body_pose_w(self) -> torch.Tensor: """Deprecated property. Please use :attr:`body_link_pose_w` instead.""" warnings.warn( - "The `body_pose_w` property will be deprecated in a future release. Please use `body_link_pose_w` instead.", + "The `body_pose_w` property will be deprecated in a IsaacLab 4.0. Please use `body_link_pose_w` instead.", DeprecationWarning, stacklevel=2, ) @@ -1214,7 +1214,7 @@ def body_pose_w(self) -> torch.Tensor: def body_pos_w(self) -> torch.Tensor: """Deprecated property. Please use :attr:`body_link_pos_w` instead.""" warnings.warn( - "The `body_pos_w` property will be deprecated in a future release. Please use `body_link_pos_w` instead.", + "The `body_pos_w` property will be deprecated in a IsaacLab 4.0. Please use `body_link_pos_w` instead.", DeprecationWarning, stacklevel=2, ) @@ -1224,7 +1224,7 @@ def body_pos_w(self) -> torch.Tensor: def body_quat_w(self) -> torch.Tensor: """Deprecated property. Please use :attr:`body_link_quat_w` instead.""" warnings.warn( - "The `body_quat_w` property will be deprecated in a future release. Please use `body_link_quat_w` instead.", + "The `body_quat_w` property will be deprecated in a IsaacLab 4.0. Please use `body_link_quat_w` instead.", DeprecationWarning, stacklevel=2, ) @@ -1234,7 +1234,7 @@ def body_quat_w(self) -> torch.Tensor: def body_vel_w(self) -> torch.Tensor: """Deprecated property. Please use :attr:`body_com_vel_w` instead.""" warnings.warn( - "The `body_vel_w` property will be deprecated in a future release. Please use `body_com_vel_w` instead.", + "The `body_vel_w` property will be deprecated in a IsaacLab 4.0. Please use `body_com_vel_w` instead.", DeprecationWarning, stacklevel=2, ) @@ -1244,7 +1244,7 @@ def body_vel_w(self) -> torch.Tensor: def body_lin_vel_w(self) -> torch.Tensor: """Deprecated property. Please use :attr:`body_com_lin_vel_w` instead.""" warnings.warn( - "The `body_lin_vel_w` property will be deprecated in a future release. Please use" + "The `body_lin_vel_w` property will be deprecated in a IsaacLab 4.0. Please use" " `body_com_lin_vel_w` instead.", DeprecationWarning, stacklevel=2, @@ -1255,7 +1255,7 @@ def body_lin_vel_w(self) -> torch.Tensor: def body_ang_vel_w(self) -> torch.Tensor: """Deprecated property. Please use :attr:`body_com_ang_vel_w` instead.""" warnings.warn( - "The `body_ang_vel_w` property will be deprecated in a future release. Please use" + "The `body_ang_vel_w` property will be deprecated in a IsaacLab 4.0. Please use" " `body_com_ang_vel_w` instead.", DeprecationWarning, stacklevel=2, @@ -1266,7 +1266,7 @@ def body_ang_vel_w(self) -> torch.Tensor: def body_acc_w(self) -> torch.Tensor: """Deprecated property. Please use :attr:`body_com_acc_w` instead.""" warnings.warn( - "The `body_acc_w` property will be deprecated in a future release. Please use `body_com_acc_w` instead.", + "The `body_acc_w` property will be deprecated in a IsaacLab 4.0. Please use `body_com_acc_w` instead.", DeprecationWarning, stacklevel=2, ) @@ -1276,7 +1276,7 @@ def body_acc_w(self) -> torch.Tensor: def body_lin_acc_w(self) -> torch.Tensor: """Deprecated property. Please use :attr:`body_com_lin_acc_w` instead.""" warnings.warn( - "The `body_lin_acc_w` property will be deprecated in a future release. Please use" + "The `body_lin_acc_w` property will be deprecated in a IsaacLab 4.0. Please use" " `body_com_lin_acc_w` instead.", DeprecationWarning, stacklevel=2, @@ -1287,7 +1287,7 @@ def body_lin_acc_w(self) -> torch.Tensor: def body_ang_acc_w(self) -> torch.Tensor: """Deprecated property. Please use :attr:`body_com_ang_acc_w` instead.""" warnings.warn( - "The `body_ang_acc_w` property will be deprecated in a future release. Please use" + "The `body_ang_acc_w` property will be deprecated in a IsaacLab 4.0. Please use" " `body_com_ang_acc_w` instead.", DeprecationWarning, stacklevel=2, @@ -1298,7 +1298,7 @@ def body_ang_acc_w(self) -> torch.Tensor: def com_pos_b(self) -> torch.Tensor: """Deprecated property. Please use :attr:`body_com_pos_b` instead.""" warnings.warn( - "The `com_pos_b` property will be deprecated in a future release. Please use `body_com_pos_b` instead.", + "The `com_pos_b` property will be deprecated in a IsaacLab 4.0. Please use `body_com_pos_b` instead.", DeprecationWarning, stacklevel=2, ) @@ -1308,7 +1308,7 @@ def com_pos_b(self) -> torch.Tensor: def com_quat_b(self) -> torch.Tensor: """Deprecated property. Please use :attr:`body_com_quat_b` instead.""" warnings.warn( - "The `com_quat_b` property will be deprecated in a future release. Please use `body_com_quat_b` instead.", + "The `com_quat_b` property will be deprecated in a IsaacLab 4.0. Please use `body_com_quat_b` instead.", DeprecationWarning, stacklevel=2, ) @@ -1318,7 +1318,7 @@ def com_quat_b(self) -> torch.Tensor: def joint_limits(self) -> torch.Tensor: """Deprecated property. Please use :attr:`joint_pos_limits` instead.""" warnings.warn( - "The `joint_limits` property will be deprecated in a future release. Please use `joint_pos_limits` instead.", + "The `joint_limits` property will be deprecated in a IsaacLab 4.0. Please use `joint_pos_limits` instead.", DeprecationWarning, stacklevel=2, ) @@ -1328,7 +1328,7 @@ def joint_limits(self) -> torch.Tensor: def default_joint_limits(self) -> torch.Tensor: """Deprecated property. Please use :attr:`default_joint_pos_limits` instead.""" warnings.warn( - "The `default_joint_limits` property will be deprecated in a future release. Please use" + "The `default_joint_limits` property will be deprecated in a IsaacLab 4.0. Please use" " `default_joint_pos_limits` instead.", DeprecationWarning, stacklevel=2, @@ -1339,7 +1339,7 @@ def default_joint_limits(self) -> torch.Tensor: def joint_velocity_limits(self) -> torch.Tensor: """Deprecated property. Please use :attr:`joint_vel_limits` instead.""" warnings.warn( - "The `joint_velocity_limits` property will be deprecated in a future release. Please use" + "The `joint_velocity_limits` property will be deprecated in a IsaacLab 4.0. Please use" " `joint_vel_limits` instead.", DeprecationWarning, stacklevel=2, @@ -1350,7 +1350,7 @@ def joint_velocity_limits(self) -> torch.Tensor: def joint_friction(self) -> torch.Tensor: """Deprecated property. Please use :attr:`joint_friction_coeff` instead.""" warnings.warn( - "The `joint_friction` property will be deprecated in a future release. Please use" + "The `joint_friction` property will be deprecated in a IsaacLab 4.0. Please use" " `joint_friction_coeff` instead.", DeprecationWarning, stacklevel=2, @@ -1361,7 +1361,7 @@ def joint_friction(self) -> torch.Tensor: def default_joint_friction(self) -> torch.Tensor: """Deprecated property. Please use :attr:`default_joint_friction_coeff` instead.""" warnings.warn( - "The `default_joint_friction` property will be deprecated in a future release. Please use" + "The `default_joint_friction` property will be deprecated in a IsaacLab 4.0. Please use" " `default_joint_friction_coeff` instead.", DeprecationWarning, stacklevel=2, @@ -1372,7 +1372,7 @@ def default_joint_friction(self) -> torch.Tensor: def fixed_tendon_limit(self) -> torch.Tensor: """Deprecated property. Please use :attr:`fixed_tendon_pos_limits` instead.""" warnings.warn( - "The `fixed_tendon_limit` property will be deprecated in a future release. Please use" + "The `fixed_tendon_limit` property will be deprecated in a IsaacLab 4.0. Please use" " `fixed_tendon_pos_limits` instead.", DeprecationWarning, stacklevel=2, @@ -1383,7 +1383,7 @@ def fixed_tendon_limit(self) -> torch.Tensor: def default_fixed_tendon_limit(self) -> torch.Tensor: """Deprecated property. Please use :attr:`default_fixed_tendon_pos_limits` instead.""" warnings.warn( - "The `default_fixed_tendon_limit` property will be deprecated in a future release. Please use" + "The `default_fixed_tendon_limit` property will be deprecated in a IsaacLab 4.0. Please use" " `default_fixed_tendon_pos_limits` instead.", DeprecationWarning, stacklevel=2, diff --git a/source/isaaclab_physx/isaaclab_physx/assets/rigid_object/rigid_object_data.py b/source/isaaclab_physx/isaaclab_physx/assets/rigid_object/rigid_object_data.py index 5ef43742fd8..1d87d3335e6 100644 --- a/source/isaaclab_physx/isaaclab_physx/assets/rigid_object/rigid_object_data.py +++ b/source/isaaclab_physx/isaaclab_physx/assets/rigid_object/rigid_object_data.py @@ -676,7 +676,7 @@ def _create_buffers(self) -> None: def root_pose_w(self) -> torch.Tensor: """Deprecated property. Please use :attr:`root_link_pose_w` instead.""" warnings.warn( - "The `root_pose_w` property will be deprecated in a future release. Please use `root_link_pose_w` instead.", + "The `root_pose_w` property will be deprecated in a IsaacLab 4.0. Please use `root_link_pose_w` instead.", DeprecationWarning, stacklevel=2, ) @@ -686,7 +686,7 @@ def root_pose_w(self) -> torch.Tensor: def root_pos_w(self) -> torch.Tensor: """Deprecated property. Please use :attr:`root_link_pos_w` instead.""" warnings.warn( - "The `root_pos_w` property will be deprecated in a future release. Please use `root_link_pos_w` instead.", + "The `root_pos_w` property will be deprecated in a IsaacLab 4.0. Please use `root_link_pos_w` instead.", DeprecationWarning, stacklevel=2, ) @@ -696,7 +696,7 @@ def root_pos_w(self) -> torch.Tensor: def root_quat_w(self) -> torch.Tensor: """Deprecated property. Please use :attr:`root_link_quat_w` instead.""" warnings.warn( - "The `root_quat_w` property will be deprecated in a future release. Please use `root_link_quat_w` instead.", + "The `root_quat_w` property will be deprecated in a IsaacLab 4.0. Please use `root_link_quat_w` instead.", DeprecationWarning, stacklevel=2, ) @@ -706,7 +706,7 @@ def root_quat_w(self) -> torch.Tensor: def root_vel_w(self) -> torch.Tensor: """Deprecated property. Please use :attr:`root_com_vel_w` instead.""" warnings.warn( - "The `root_vel_w` property will be deprecated in a future release. Please use `root_com_vel_w` instead.", + "The `root_vel_w` property will be deprecated in a IsaacLab 4.0. Please use `root_com_vel_w` instead.", DeprecationWarning, stacklevel=2, ) @@ -716,7 +716,7 @@ def root_vel_w(self) -> torch.Tensor: def root_lin_vel_w(self) -> torch.Tensor: """Deprecated property. Please use :attr:`root_com_lin_vel_w` instead.""" warnings.warn( - "The `root_lin_vel_w` property will be deprecated in a future release. Please use" + "The `root_lin_vel_w` property will be deprecated in a IsaacLab 4.0. Please use" " `root_com_lin_vel_w` instead.", DeprecationWarning, stacklevel=2, @@ -727,7 +727,7 @@ def root_lin_vel_w(self) -> torch.Tensor: def root_ang_vel_w(self) -> torch.Tensor: """Deprecated property. Please use :attr:`root_com_ang_vel_w` instead.""" warnings.warn( - "The `root_ang_vel_w` property will be deprecated in a future release. Please use" + "The `root_ang_vel_w` property will be deprecated in a IsaacLab 4.0. Please use" " `root_com_ang_vel_w` instead.", DeprecationWarning, stacklevel=2, @@ -738,7 +738,7 @@ def root_ang_vel_w(self) -> torch.Tensor: def root_lin_vel_b(self) -> torch.Tensor: """Deprecated property. Please use :attr:`root_com_lin_vel_b` instead.""" warnings.warn( - "The `root_lin_vel_b` property will be deprecated in a future release. Please use" + "The `root_lin_vel_b` property will be deprecated in a IsaacLab 4.0. Please use" " `root_com_lin_vel_b` instead.", DeprecationWarning, stacklevel=2, @@ -749,7 +749,7 @@ def root_lin_vel_b(self) -> torch.Tensor: def root_ang_vel_b(self) -> torch.Tensor: """Deprecated property. Please use :attr:`root_com_ang_vel_b` instead.""" warnings.warn( - "The `root_ang_vel_b` property will be deprecated in a future release. Please use" + "The `root_ang_vel_b` property will be deprecated in a IsaacLab 4.0. Please use" " `root_com_ang_vel_b` instead.", DeprecationWarning, stacklevel=2, @@ -760,7 +760,7 @@ def root_ang_vel_b(self) -> torch.Tensor: def body_pose_w(self) -> torch.Tensor: """Deprecated property. Please use :attr:`body_link_pose_w` instead.""" warnings.warn( - "The `body_pose_w` property will be deprecated in a future release. Please use `body_link_pose_w` instead.", + "The `body_pose_w` property will be deprecated in a IsaacLab 4.0. Please use `body_link_pose_w` instead.", DeprecationWarning, stacklevel=2, ) @@ -770,7 +770,7 @@ def body_pose_w(self) -> torch.Tensor: def body_pos_w(self) -> torch.Tensor: """Deprecated property. Please use :attr:`body_link_pos_w` instead.""" warnings.warn( - "The `body_pos_w` property will be deprecated in a future release. Please use `body_link_pos_w` instead.", + "The `body_pos_w` property will be deprecated in a IsaacLab 4.0. Please use `body_link_pos_w` instead.", DeprecationWarning, stacklevel=2, ) @@ -780,7 +780,7 @@ def body_pos_w(self) -> torch.Tensor: def body_quat_w(self) -> torch.Tensor: """Deprecated property. Please use :attr:`body_link_quat_w` instead.""" warnings.warn( - "The `body_quat_w` property will be deprecated in a future release. Please use `body_link_quat_w` instead.", + "The `body_quat_w` property will be deprecated in a IsaacLab 4.0. Please use `body_link_quat_w` instead.", DeprecationWarning, stacklevel=2, ) @@ -790,7 +790,7 @@ def body_quat_w(self) -> torch.Tensor: def body_vel_w(self) -> torch.Tensor: """Deprecated property. Please use :attr:`body_com_vel_w` instead.""" warnings.warn( - "The `body_vel_w` property will be deprecated in a future release. Please use `body_com_vel_w` instead.", + "The `body_vel_w` property will be deprecated in a IsaacLab 4.0. Please use `body_com_vel_w` instead.", DeprecationWarning, stacklevel=2, ) @@ -800,7 +800,7 @@ def body_vel_w(self) -> torch.Tensor: def body_lin_vel_w(self) -> torch.Tensor: """Deprecated property. Please use :attr:`body_com_lin_vel_w` instead.""" warnings.warn( - "The `body_lin_vel_w` property will be deprecated in a future release. Please use" + "The `body_lin_vel_w` property will be deprecated in a IsaacLab 4.0. Please use" " `body_com_lin_vel_w` instead.", DeprecationWarning, stacklevel=2, @@ -811,7 +811,7 @@ def body_lin_vel_w(self) -> torch.Tensor: def body_ang_vel_w(self) -> torch.Tensor: """Deprecated property. Please use :attr:`body_com_ang_vel_w` instead.""" warnings.warn( - "The `body_ang_vel_w` property will be deprecated in a future release. Please use" + "The `body_ang_vel_w` property will be deprecated in a IsaacLab 4.0. Please use" " `body_com_ang_vel_w` instead.", DeprecationWarning, stacklevel=2, @@ -822,7 +822,7 @@ def body_ang_vel_w(self) -> torch.Tensor: def body_acc_w(self) -> torch.Tensor: """Deprecated property. Please use :attr:`body_com_acc_w` instead.""" warnings.warn( - "The `body_acc_w` property will be deprecated in a future release. Please use `body_com_acc_w` instead.", + "The `body_acc_w` property will be deprecated in a IsaacLab 4.0. Please use `body_com_acc_w` instead.", DeprecationWarning, stacklevel=2, ) @@ -832,7 +832,7 @@ def body_acc_w(self) -> torch.Tensor: def body_lin_acc_w(self) -> torch.Tensor: """Deprecated property. Please use :attr:`body_com_lin_acc_w` instead.""" warnings.warn( - "The `body_lin_acc_w` property will be deprecated in a future release. Please use" + "The `body_lin_acc_w` property will be deprecated in a IsaacLab 4.0. Please use" " `body_com_lin_acc_w` instead.", DeprecationWarning, stacklevel=2, @@ -843,7 +843,7 @@ def body_lin_acc_w(self) -> torch.Tensor: def body_ang_acc_w(self) -> torch.Tensor: """Deprecated property. Please use :attr:`body_com_ang_acc_w` instead.""" warnings.warn( - "The `body_ang_acc_w` property will be deprecated in a future release. Please use" + "The `body_ang_acc_w` property will be deprecated in a IsaacLab 4.0. Please use" " `body_com_ang_acc_w` instead.", DeprecationWarning, stacklevel=2, @@ -854,7 +854,7 @@ def body_ang_acc_w(self) -> torch.Tensor: def com_pos_b(self) -> torch.Tensor: """Deprecated property. Please use :attr:`body_com_pos_b` instead.""" warnings.warn( - "The `com_pos_b` property will be deprecated in a future release. Please use `body_com_pos_b` instead.", + "The `com_pos_b` property will be deprecated in a IsaacLab 4.0. Please use `body_com_pos_b` instead.", DeprecationWarning, stacklevel=2, ) @@ -864,7 +864,7 @@ def com_pos_b(self) -> torch.Tensor: def com_quat_b(self) -> torch.Tensor: """Deprecated property. Please use :attr:`body_com_quat_b` instead.""" warnings.warn( - "The `com_quat_b` property will be deprecated in a future release. Please use `body_com_quat_b` instead.", + "The `com_quat_b` property will be deprecated in a IsaacLab 4.0. Please use `body_com_quat_b` instead.", DeprecationWarning, stacklevel=2, ) diff --git a/source/isaaclab_physx/isaaclab_physx/assets/rigid_object_collection/rigid_object_collection_data.py b/source/isaaclab_physx/isaaclab_physx/assets/rigid_object_collection/rigid_object_collection_data.py index f98c3688780..eff287a7259 100644 --- a/source/isaaclab_physx/isaaclab_physx/assets/rigid_object_collection/rigid_object_collection_data.py +++ b/source/isaaclab_physx/isaaclab_physx/assets/rigid_object_collection/rigid_object_collection_data.py @@ -549,7 +549,7 @@ def _create_buffers(self) -> None: def default_object_pose(self) -> torch.Tensor: """Deprecated property. Please use :attr:`default_body_pose` instead.""" warnings.warn( - "The `default_object_pose` property will be deprecated in a future release. Please use" + "The `default_object_pose` property will be deprecated in a IsaacLab 4.0. Please use" " `default_body_pose` instead.", DeprecationWarning, stacklevel=2, @@ -560,7 +560,7 @@ def default_object_pose(self) -> torch.Tensor: def default_object_vel(self) -> torch.Tensor: """Deprecated property. Please use :attr:`default_body_vel` instead.""" warnings.warn( - "The `default_object_vel` property will be deprecated in a future release. Please use" + "The `default_object_vel` property will be deprecated in a IsaacLab 4.0. Please use" " `default_body_vel` instead.", DeprecationWarning, stacklevel=2, @@ -571,7 +571,7 @@ def default_object_vel(self) -> torch.Tensor: def default_object_state(self) -> torch.Tensor: """Deprecated property. Please use :attr:`default_body_state` instead.""" warnings.warn( - "The `default_object_state` property will be deprecated in a future release. Please use" + "The `default_object_state` property will be deprecated in a IsaacLab 4.0. Please use" " `default_body_state` instead.", DeprecationWarning, stacklevel=2, @@ -582,7 +582,7 @@ def default_object_state(self) -> torch.Tensor: def object_link_pose_w(self): """Deprecated property. Please use :attr:`body_link_pose_w` instead.""" warnings.warn( - "The `object_link_pose_w` property will be deprecated in a future release. Please use" + "The `object_link_pose_w` property will be deprecated in a IsaacLab 4.0. Please use" " `body_link_pose_w` instead.", DeprecationWarning, stacklevel=2, @@ -593,7 +593,7 @@ def object_link_pose_w(self): def object_link_vel_w(self): """Deprecated property. Please use :attr:`body_link_vel_w` instead.""" warnings.warn( - "The `object_link_vel_w` property will be deprecated in a future release. Please use" + "The `object_link_vel_w` property will be deprecated in a IsaacLab 4.0. Please use" " `body_link_vel_w` instead.", DeprecationWarning, stacklevel=2, @@ -604,7 +604,7 @@ def object_link_vel_w(self): def object_com_pose_w(self): """Deprecated property. Please use :attr:`body_com_pose_w` instead.""" warnings.warn( - "The `object_com_pose_w` property will be deprecated in a future release. Please use" + "The `object_com_pose_w` property will be deprecated in a IsaacLab 4.0. Please use" " `body_com_pose_w` instead.", DeprecationWarning, stacklevel=2, @@ -615,7 +615,7 @@ def object_com_pose_w(self): def object_com_vel_w(self): """Deprecated property. Please use :attr:`body_com_vel_w` instead.""" warnings.warn( - "The `object_com_vel_w` property will be deprecated in a future release. Please use" + "The `object_com_vel_w` property will be deprecated in a IsaacLab 4.0. Please use" " `body_com_vel_w` instead.", DeprecationWarning, stacklevel=2, @@ -626,7 +626,7 @@ def object_com_vel_w(self): def object_state_w(self): """Deprecated property. Please use :attr:`body_state_w` instead.""" warnings.warn( - "The `object_state_w` property will be deprecated in a future release. Please use `body_state_w` instead.", + "The `object_state_w` property will be deprecated in a IsaacLab 4.0. Please use `body_state_w` instead.", DeprecationWarning, stacklevel=2, ) @@ -636,7 +636,7 @@ def object_state_w(self): def object_link_state_w(self): """Deprecated property. Please use :attr:`body_link_state_w` instead.""" warnings.warn( - "The `object_link_state_w` property will be deprecated in a future release. Please use" + "The `object_link_state_w` property will be deprecated in a IsaacLab 4.0. Please use" " `body_link_state_w` instead.", DeprecationWarning, stacklevel=2, @@ -647,7 +647,7 @@ def object_link_state_w(self): def object_com_state_w(self): """Deprecated property. Please use :attr:`body_com_state_w` instead.""" warnings.warn( - "The `object_com_state_w` property will be deprecated in a future release. Please use" + "The `object_com_state_w` property will be deprecated in a IsaacLab 4.0. Please use" " `body_com_state_w` instead.", DeprecationWarning, stacklevel=2, @@ -658,7 +658,7 @@ def object_com_state_w(self): def object_com_acc_w(self): """Deprecated property. Please use :attr:`body_com_acc_w` instead.""" warnings.warn( - "The `object_com_acc_w` property will be deprecated in a future release. Please use" + "The `object_com_acc_w` property will be deprecated in a IsaacLab 4.0. Please use" " `body_com_acc_w` instead.", DeprecationWarning, stacklevel=2, @@ -669,7 +669,7 @@ def object_com_acc_w(self): def object_com_pose_b(self): """Deprecated property. Please use :attr:`body_com_pose_b` instead.""" warnings.warn( - "The `object_com_pose_b` property will be deprecated in a future release. Please use" + "The `object_com_pose_b` property will be deprecated in a IsaacLab 4.0. Please use" " `body_com_pose_b` instead.", DeprecationWarning, stacklevel=2, @@ -680,7 +680,7 @@ def object_com_pose_b(self): def object_link_pos_w(self) -> torch.Tensor: """Deprecated property. Please use :attr:`body_link_pos_w` instead.""" warnings.warn( - "The `object_link_pos_w` property will be deprecated in a future release. Please use" + "The `object_link_pos_w` property will be deprecated in a IsaacLab 4.0. Please use" " `body_link_pos_w` instead.", DeprecationWarning, stacklevel=2, @@ -691,7 +691,7 @@ def object_link_pos_w(self) -> torch.Tensor: def object_link_quat_w(self) -> torch.Tensor: """Deprecated property. Please use :attr:`body_link_quat_w` instead.""" warnings.warn( - "The `object_link_quat_w` property will be deprecated in a future release. Please use" + "The `object_link_quat_w` property will be deprecated in a IsaacLab 4.0. Please use" " `body_link_quat_w` instead.", DeprecationWarning, stacklevel=2, @@ -702,7 +702,7 @@ def object_link_quat_w(self) -> torch.Tensor: def object_link_lin_vel_w(self) -> torch.Tensor: """Deprecated property. Please use :attr:`body_link_lin_vel_w` instead.""" warnings.warn( - "The `object_link_lin_vel_w` property will be deprecated in a future release. Please use" + "The `object_link_lin_vel_w` property will be deprecated in a IsaacLab 4.0. Please use" " `body_link_lin_vel_w` instead.", DeprecationWarning, stacklevel=2, @@ -713,7 +713,7 @@ def object_link_lin_vel_w(self) -> torch.Tensor: def object_link_ang_vel_w(self) -> torch.Tensor: """Deprecated property. Please use :attr:`body_link_ang_vel_w` instead.""" warnings.warn( - "The `object_link_ang_vel_w` property will be deprecated in a future release. Please use" + "The `object_link_ang_vel_w` property will be deprecated in a IsaacLab 4.0. Please use" " `body_link_ang_vel_w` instead.", DeprecationWarning, stacklevel=2, @@ -724,7 +724,7 @@ def object_link_ang_vel_w(self) -> torch.Tensor: def object_com_pos_w(self) -> torch.Tensor: """Deprecated property. Please use :attr:`body_com_pos_w` instead.""" warnings.warn( - "The `object_com_pos_w` property will be deprecated in a future release. Please use" + "The `object_com_pos_w` property will be deprecated in a IsaacLab 4.0. Please use" " `body_com_pos_w` instead.", DeprecationWarning, stacklevel=2, @@ -735,7 +735,7 @@ def object_com_pos_w(self) -> torch.Tensor: def object_com_quat_w(self) -> torch.Tensor: """Deprecated property. Please use :attr:`body_com_quat_w` instead.""" warnings.warn( - "The `object_com_quat_w` property will be deprecated in a future release. Please use" + "The `object_com_quat_w` property will be deprecated in a IsaacLab 4.0. Please use" " `body_com_quat_w` instead.", DeprecationWarning, stacklevel=2, @@ -746,7 +746,7 @@ def object_com_quat_w(self) -> torch.Tensor: def object_com_lin_vel_w(self) -> torch.Tensor: """Deprecated property. Please use :attr:`body_com_lin_vel_w` instead.""" warnings.warn( - "The `object_com_lin_vel_w` property will be deprecated in a future release. Please use" + "The `object_com_lin_vel_w` property will be deprecated in a IsaacLab 4.0. Please use" " `body_com_lin_vel_w` instead.", DeprecationWarning, stacklevel=2, @@ -757,7 +757,7 @@ def object_com_lin_vel_w(self) -> torch.Tensor: def object_com_ang_vel_w(self) -> torch.Tensor: """Deprecated property. Please use :attr:`body_com_ang_vel_w` instead.""" warnings.warn( - "The `object_com_ang_vel_w` property will be deprecated in a future release. Please use" + "The `object_com_ang_vel_w` property will be deprecated in a IsaacLab 4.0. Please use" " `body_com_ang_vel_w` instead.", DeprecationWarning, stacklevel=2, @@ -768,7 +768,7 @@ def object_com_ang_vel_w(self) -> torch.Tensor: def object_com_lin_acc_w(self) -> torch.Tensor: """Deprecated property. Please use :attr:`body_com_lin_acc_w` instead.""" warnings.warn( - "The `object_com_lin_acc_w` property will be deprecated in a future release. Please use" + "The `object_com_lin_acc_w` property will be deprecated in a IsaacLab 4.0. Please use" " `body_com_lin_acc_w` instead.", DeprecationWarning, stacklevel=2, @@ -779,7 +779,7 @@ def object_com_lin_acc_w(self) -> torch.Tensor: def object_com_ang_acc_w(self) -> torch.Tensor: """Deprecated property. Please use :attr:`body_com_ang_acc_w` instead.""" warnings.warn( - "The `object_com_ang_acc_w` property will be deprecated in a future release. Please use" + "The `object_com_ang_acc_w` property will be deprecated in a IsaacLab 4.0. Please use" " `body_com_ang_acc_w` instead.", DeprecationWarning, stacklevel=2, @@ -790,7 +790,7 @@ def object_com_ang_acc_w(self) -> torch.Tensor: def object_com_pos_b(self) -> torch.Tensor: """Deprecated property. Please use :attr:`body_com_pos_b` instead.""" warnings.warn( - "The `object_com_pos_b` property will be deprecated in a future release. Please use" + "The `object_com_pos_b` property will be deprecated in a IsaacLab 4.0. Please use" " `body_com_pos_b` instead.", DeprecationWarning, stacklevel=2, @@ -801,7 +801,7 @@ def object_com_pos_b(self) -> torch.Tensor: def object_com_quat_b(self) -> torch.Tensor: """Deprecated property. Please use :attr:`body_com_quat_b` instead.""" warnings.warn( - "The `object_com_quat_b` property will be deprecated in a future release. Please use" + "The `object_com_quat_b` property will be deprecated in a IsaacLab 4.0. Please use" " `body_com_quat_b` instead.", DeprecationWarning, stacklevel=2, @@ -812,7 +812,7 @@ def object_com_quat_b(self) -> torch.Tensor: def object_link_lin_vel_b(self) -> torch.Tensor: """Deprecated property. Please use :attr:`body_link_lin_vel_b` instead.""" warnings.warn( - "The `object_link_lin_vel_b` property will be deprecated in a future release. Please use" + "The `object_link_lin_vel_b` property will be deprecated in a IsaacLab 4.0. Please use" " `body_link_lin_vel_b` instead.", DeprecationWarning, stacklevel=2, @@ -823,7 +823,7 @@ def object_link_lin_vel_b(self) -> torch.Tensor: def object_link_ang_vel_b(self) -> torch.Tensor: """Deprecated property. Please use :attr:`body_link_ang_vel_b` instead.""" warnings.warn( - "The `object_link_ang_vel_b` property will be deprecated in a future release. Please use" + "The `object_link_ang_vel_b` property will be deprecated in a IsaacLab 4.0. Please use" " `body_link_ang_vel_b` instead.", DeprecationWarning, stacklevel=2, @@ -834,7 +834,7 @@ def object_link_ang_vel_b(self) -> torch.Tensor: def object_com_lin_vel_b(self) -> torch.Tensor: """Deprecated property. Please use :attr:`body_com_lin_vel_b` instead.""" warnings.warn( - "The `object_com_lin_vel_b` property will be deprecated in a future release. Please use" + "The `object_com_lin_vel_b` property will be deprecated in a IsaacLab 4.0. Please use" " `body_com_lin_vel_b` instead.", DeprecationWarning, stacklevel=2, @@ -845,7 +845,7 @@ def object_com_lin_vel_b(self) -> torch.Tensor: def object_com_ang_vel_b(self) -> torch.Tensor: """Deprecated property. Please use :attr:`body_com_ang_vel_b` instead.""" warnings.warn( - "The `object_com_ang_vel_b` property will be deprecated in a future release. Please use" + "The `object_com_ang_vel_b` property will be deprecated in a IsaacLab 4.0. Please use" " `body_com_ang_vel_b` instead.", DeprecationWarning, stacklevel=2, @@ -856,8 +856,7 @@ def object_com_ang_vel_b(self) -> torch.Tensor: def object_pose_w(self) -> torch.Tensor: """Deprecated property. Please use :attr:`body_link_pose_w` instead.""" warnings.warn( - "The `object_pose_w` property will be deprecated in a future release. Please use" - " `body_link_pose_w` instead.", + "The `object_pose_w` property will be deprecated in a IsaacLab 4.0. Please use `body_link_pose_w` instead.", DeprecationWarning, stacklevel=2, ) @@ -867,7 +866,7 @@ def object_pose_w(self) -> torch.Tensor: def object_pos_w(self) -> torch.Tensor: """Deprecated property. Please use :attr:`body_link_pos_w` instead.""" warnings.warn( - "The `object_pos_w` property will be deprecated in a future release. Please use `body_link_pos_w` instead.", + "The `object_pos_w` property will be deprecated in a IsaacLab 4.0. Please use `body_link_pos_w` instead.", DeprecationWarning, stacklevel=2, ) @@ -877,8 +876,7 @@ def object_pos_w(self) -> torch.Tensor: def object_quat_w(self) -> torch.Tensor: """Deprecated property. Please use :attr:`body_link_quat_w` instead.""" warnings.warn( - "The `object_quat_w` property will be deprecated in a future release. Please use" - " `body_link_quat_w` instead.", + "The `object_quat_w` property will be deprecated in a IsaacLab 4.0. Please use `body_link_quat_w` instead.", DeprecationWarning, stacklevel=2, ) @@ -888,7 +886,7 @@ def object_quat_w(self) -> torch.Tensor: def object_vel_w(self) -> torch.Tensor: """Deprecated property. Please use :attr:`body_com_vel_w` instead.""" warnings.warn( - "The `object_vel_w` property will be deprecated in a future release. Please use `body_com_vel_w` instead.", + "The `object_vel_w` property will be deprecated in a IsaacLab 4.0. Please use `body_com_vel_w` instead.", DeprecationWarning, stacklevel=2, ) @@ -898,7 +896,7 @@ def object_vel_w(self) -> torch.Tensor: def object_lin_vel_w(self) -> torch.Tensor: """Deprecated property. Please use :attr:`body_com_lin_vel_w` instead.""" warnings.warn( - "The `object_lin_vel_w` property will be deprecated in a future release. Please use" + "The `object_lin_vel_w` property will be deprecated in a IsaacLab 4.0. Please use" " `body_com_lin_vel_w` instead.", DeprecationWarning, stacklevel=2, @@ -909,7 +907,7 @@ def object_lin_vel_w(self) -> torch.Tensor: def object_ang_vel_w(self) -> torch.Tensor: """Deprecated property. Please use :attr:`body_com_ang_vel_w` instead.""" warnings.warn( - "The `object_ang_vel_w` property will be deprecated in a future release. Please use" + "The `object_ang_vel_w` property will be deprecated in a IsaacLab 4.0. Please use" " `body_com_ang_vel_w` instead.", DeprecationWarning, stacklevel=2, @@ -920,7 +918,7 @@ def object_ang_vel_w(self) -> torch.Tensor: def object_lin_vel_b(self) -> torch.Tensor: """Deprecated property. Please use :attr:`body_com_lin_vel_b` instead.""" warnings.warn( - "The `object_lin_vel_b` property will be deprecated in a future release. Please use" + "The `object_lin_vel_b` property will be deprecated in a IsaacLab 4.0. Please use" " `body_com_lin_vel_b` instead.", DeprecationWarning, stacklevel=2, @@ -931,7 +929,7 @@ def object_lin_vel_b(self) -> torch.Tensor: def object_ang_vel_b(self) -> torch.Tensor: """Deprecated property. Please use :attr:`body_com_ang_vel_b` instead.""" warnings.warn( - "The `object_ang_vel_b` property will be deprecated in a future release. Please use" + "The `object_ang_vel_b` property will be deprecated in a IsaacLab 4.0. Please use" " `body_com_ang_vel_b` instead.", DeprecationWarning, stacklevel=2, @@ -942,7 +940,7 @@ def object_ang_vel_b(self) -> torch.Tensor: def object_acc_w(self) -> torch.Tensor: """Deprecated property. Please use :attr:`body_com_acc_w` instead.""" warnings.warn( - "The `object_acc_w` property will be deprecated in a future release. Please use `body_com_acc_w` instead.", + "The `object_acc_w` property will be deprecated in a IsaacLab 4.0. Please use `body_com_acc_w` instead.", DeprecationWarning, stacklevel=2, ) @@ -952,7 +950,7 @@ def object_acc_w(self) -> torch.Tensor: def object_lin_acc_w(self) -> torch.Tensor: """Deprecated property. Please use :attr:`body_com_lin_acc_w` instead.""" warnings.warn( - "The `object_lin_acc_w` property will be deprecated in a future release. Please use" + "The `object_lin_acc_w` property will be deprecated in a IsaacLab 4.0. Please use" " `body_com_lin_acc_w` instead.", DeprecationWarning, stacklevel=2, @@ -963,7 +961,7 @@ def object_lin_acc_w(self) -> torch.Tensor: def object_ang_acc_w(self) -> torch.Tensor: """Deprecated property. Please use :attr:`body_com_ang_acc_w` instead.""" warnings.warn( - "The `object_ang_acc_w` property will be deprecated in a future release. Please use" + "The `object_ang_acc_w` property will be deprecated in a IsaacLab 4.0. Please use" " `body_com_ang_acc_w` instead.", DeprecationWarning, stacklevel=2, @@ -974,7 +972,7 @@ def object_ang_acc_w(self) -> torch.Tensor: def com_pos_b(self) -> torch.Tensor: """Deprecated property. Please use :attr:`body_com_pos_b` instead.""" warnings.warn( - "The `com_pos_b` property will be deprecated in a future release. Please use `body_com_pos_b` instead.", + "The `com_pos_b` property will be deprecated in a IsaacLab 4.0. Please use `body_com_pos_b` instead.", DeprecationWarning, stacklevel=2, ) @@ -984,7 +982,7 @@ def com_pos_b(self) -> torch.Tensor: def com_quat_b(self) -> torch.Tensor: """Deprecated property. Please use :attr:`body_com_quat_b` instead.""" warnings.warn( - "The `com_quat_b` property will be deprecated in a future release. Please use `body_com_quat_b` instead.", + "The `com_quat_b` property will be deprecated in a IsaacLab 4.0. Please use `body_com_quat_b` instead.", DeprecationWarning, stacklevel=2, ) From a666933fd128f6875b10a89bce36125e6a6a3b1d Mon Sep 17 00:00:00 2001 From: Antoine Richard Date: Fri, 30 Jan 2026 15:24:00 +0100 Subject: [PATCH 33/38] Added changelogs --- source/isaaclab/config/extension.toml | 2 +- source/isaaclab/docs/CHANGELOG.rst | 19 +++++++++++++++++++ source/isaaclab_physx/config/extension.toml | 2 +- source/isaaclab_physx/docs/CHANGELOG.rst | 19 +++++++++++++++++++ 4 files changed, 40 insertions(+), 2 deletions(-) diff --git a/source/isaaclab/config/extension.toml b/source/isaaclab/config/extension.toml index dab6c3b9a8a..a1787245c2b 100644 --- a/source/isaaclab/config/extension.toml +++ b/source/isaaclab/config/extension.toml @@ -1,7 +1,7 @@ [package] # Note: Semantic Versioning is used: https://semver.org/ -version = "0.56.0" +version = "0.56.1" # Description title = "Isaac Lab framework for Robot Learning" diff --git a/source/isaaclab/docs/CHANGELOG.rst b/source/isaaclab/docs/CHANGELOG.rst index 3799a76e417..4ede3b47025 100644 --- a/source/isaaclab/docs/CHANGELOG.rst +++ b/source/isaaclab/docs/CHANGELOG.rst @@ -1,6 +1,25 @@ Changelog --------- +0.56.1 (2026-01-30) +~~~~~~~~~~~~~~~~~~~ + +Added +^^^^^ + +* Added :mod:`isaaclab.test.mock_interfaces` module providing mock implementations for unit testing + without requiring Isaac Sim. Includes: + + * Mock assets: :class:`MockArticulation`, :class:`MockRigidObject`, :class:`MockRigidObjectCollection` + with full state tracking and property management. + * Mock sensors: :class:`MockContactSensor`, :class:`MockImu`, :class:`MockFrameTransformer` + with configurable data outputs. + * Utility classes: :class:`MockArticulationBuilder`, :class:`MockSensorBuilder`, + :class:`MockWrenchComposer` for flexible mock construction. + * Factory functions for common robot morphologies (quadruped, humanoid). + * Patching utilities and decorators for easy test injection. + + 0.56.0 (2026-01-30) ~~~~~~~~~~~~~~~~~~~ diff --git a/source/isaaclab_physx/config/extension.toml b/source/isaaclab_physx/config/extension.toml index cbe6cd87ac6..1a3e81dc8cd 100644 --- a/source/isaaclab_physx/config/extension.toml +++ b/source/isaaclab_physx/config/extension.toml @@ -1,7 +1,7 @@ [package] # Note: Semantic Versioning is used: https://semver.org/ -version = "0.1.1" +version = "0.1.2" # Description title = "PhysX simulation interfaces for IsaacLab core package" diff --git a/source/isaaclab_physx/docs/CHANGELOG.rst b/source/isaaclab_physx/docs/CHANGELOG.rst index d3d462d2f44..ade43b1d253 100644 --- a/source/isaaclab_physx/docs/CHANGELOG.rst +++ b/source/isaaclab_physx/docs/CHANGELOG.rst @@ -1,6 +1,25 @@ Changelog --------- +0.1.2 (2026-01-30) +~~~~~~~~~~~~~~~~~~~ + +Added +^^^^^ + +* Added :mod:`isaaclab_physx.test.mock_interfaces` module providing mock PhysX view implementations + for unit testing without requiring Isaac Sim. Includes: + + * :class:`MockRigidBodyView`: Mock for ``physx.RigidBodyView`` with transforms, velocities, + accelerations, and mass properties. + * :class:`MockArticulationView`: Mock for ``physx.ArticulationView`` with root/link states, + DOF properties, and joint control. + * :class:`MockRigidContactView`: Mock for ``physx.RigidContactView`` with contact forces, + positions, normals, and friction data. + * Factory functions including pre-configured quadruped and humanoid views. + * Patching utilities and decorators for easy test injection. + + 0.1.1 (2026-01-28) ~~~~~~~~~~~~~~~~~~~ From 82e1fb6ff23573a6c4923df329b83f2cb11f3fc7 Mon Sep 17 00:00:00 2001 From: Antoine Richard Date: Fri, 30 Jan 2026 17:13:00 +0100 Subject: [PATCH 34/38] should work now. --- .../test_mock_articulation_view.py | 4 ++-- .../test_mock_interfaces/test_mock_rigid_body_view.py | 10 +++++----- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/source/isaaclab_physx/test/test_mock_interfaces/test_mock_articulation_view.py b/source/isaaclab_physx/test/test_mock_interfaces/test_mock_articulation_view.py index 4ed69a36513..32f0f7c03d8 100644 --- a/source/isaaclab_physx/test/test_mock_interfaces/test_mock_articulation_view.py +++ b/source/isaaclab_physx/test/test_mock_interfaces/test_mock_articulation_view.py @@ -182,7 +182,7 @@ def view(self): def test_get_masses_shape(self, view): """Test masses shape.""" masses = view.get_masses() - assert masses.shape == (4, 13, 1) + assert masses.shape == (4, 13) def test_get_coms_shape(self, view): """Test centers of mass shape.""" @@ -192,7 +192,7 @@ def test_get_coms_shape(self, view): def test_get_inertias_shape(self, view): """Test inertias shape.""" inertias = view.get_inertias() - assert inertias.shape == (4, 13, 3, 3) + assert inertias.shape == (4, 13, 9) class TestMockArticulationViewSetters: diff --git a/source/isaaclab_physx/test/test_mock_interfaces/test_mock_rigid_body_view.py b/source/isaaclab_physx/test/test_mock_interfaces/test_mock_rigid_body_view.py index 4c2a210617a..3416a713d21 100644 --- a/source/isaaclab_physx/test/test_mock_interfaces/test_mock_rigid_body_view.py +++ b/source/isaaclab_physx/test/test_mock_interfaces/test_mock_rigid_body_view.py @@ -72,27 +72,27 @@ def test_get_accelerations_shape(self, view): def test_get_masses_shape(self, view): """Test masses shape.""" masses = view.get_masses() - assert masses.shape == (4, 1, 1) + assert masses.shape == (4, 1) def test_get_masses_default_value(self, view): """Test that default mass is 1.""" masses = view.get_masses() - assert torch.allclose(masses, torch.ones(4, 1, 1)) + assert torch.allclose(masses, torch.ones(4, 1)) def test_get_coms_shape(self, view): """Test centers of mass shape.""" coms = view.get_coms() - assert coms.shape == (4, 1, 7) + assert coms.shape == (4, 7) def test_get_inertias_shape(self, view): """Test inertias shape.""" inertias = view.get_inertias() - assert inertias.shape == (4, 1, 3, 3) + assert inertias.shape == (4, 9) def test_get_inertias_default_value(self, view): """Test that default inertia is identity.""" inertias = view.get_inertias() - expected = torch.eye(3).unsqueeze(0).unsqueeze(0).expand(4, 1, 3, 3) + expected = torch.eye(3).repeat(4, 1).reshape(4, 9) assert torch.allclose(inertias, expected) def test_getters_return_clones(self, view): From f75b5cb152fb93d52efd5a78f6fa02ef9cf5f3b8 Mon Sep 17 00:00:00 2001 From: Antoine Richard Date: Tue, 3 Feb 2026 10:11:47 +0100 Subject: [PATCH 35/38] Missed a quaternion convertion... --- .../isaaclab_physx/assets/articulation/articulation.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/source/isaaclab_physx/isaaclab_physx/assets/articulation/articulation.py b/source/isaaclab_physx/isaaclab_physx/assets/articulation/articulation.py index 3167d41b598..d0259ad3878 100644 --- a/source/isaaclab_physx/isaaclab_physx/assets/articulation/articulation.py +++ b/source/isaaclab_physx/isaaclab_physx/assets/articulation/articulation.py @@ -425,9 +425,7 @@ def write_root_link_pose_to_sim(self, root_pose: torch.Tensor, env_ids: Sequence if self.data._root_state_w.data is not None: self.data.root_state_w[env_ids, :7] = self.data.root_link_pose_w[env_ids] - # convert root quaternion from wxyz to xyzw root_poses_xyzw = self.data.root_link_pose_w.clone() - root_poses_xyzw[:, 3:] = math_utils.convert_quat(root_poses_xyzw[:, 3:], to="xyzw") # Need to invalidate the buffer to trigger the update with the new state. self.data._body_link_pose_w.timestamp = -1.0 From 48d6eea0d6734b27c78aa50b72b14505ddfcb733 Mon Sep 17 00:00:00 2001 From: Antoine Richard Date: Tue, 3 Feb 2026 10:14:16 +0100 Subject: [PATCH 36/38] pre-commits --- .../isaaclab_physx/assets/articulation/articulation.py | 1 + .../isaaclab_physx/assets/articulation/articulation_data.py | 3 +-- .../isaaclab_physx/assets/rigid_object/rigid_object_data.py | 1 - .../rigid_object_collection/rigid_object_collection_data.py | 2 +- 4 files changed, 3 insertions(+), 4 deletions(-) diff --git a/source/isaaclab_physx/isaaclab_physx/assets/articulation/articulation.py b/source/isaaclab_physx/isaaclab_physx/assets/articulation/articulation.py index d0259ad3878..e31f18dccdf 100644 --- a/source/isaaclab_physx/isaaclab_physx/assets/articulation/articulation.py +++ b/source/isaaclab_physx/isaaclab_physx/assets/articulation/articulation.py @@ -32,6 +32,7 @@ if TYPE_CHECKING: import omni.physics.tensors.impl.api as physx + from isaaclab.assets.articulation.articulation_cfg import ArticulationCfg # import logger diff --git a/source/isaaclab_physx/isaaclab_physx/assets/articulation/articulation_data.py b/source/isaaclab_physx/isaaclab_physx/assets/articulation/articulation_data.py index fe37a531d96..01300e25f7c 100644 --- a/source/isaaclab_physx/isaaclab_physx/assets/articulation/articulation_data.py +++ b/source/isaaclab_physx/isaaclab_physx/assets/articulation/articulation_data.py @@ -11,12 +11,11 @@ import torch -import omni.physics.tensors.impl.api as physx from isaacsim.core.simulation_manager import SimulationManager from isaaclab.assets.articulation.base_articulation_data import BaseArticulationData from isaaclab.utils.buffers import TimestampedBuffer -from isaaclab.utils.math import combine_frame_transforms, convert_quat, normalize, quat_apply, quat_apply_inverse +from isaaclab.utils.math import combine_frame_transforms, normalize, quat_apply, quat_apply_inverse if TYPE_CHECKING: from isaaclab.assets.articulation.articulation_view import ArticulationView diff --git a/source/isaaclab_physx/isaaclab_physx/assets/rigid_object/rigid_object_data.py b/source/isaaclab_physx/isaaclab_physx/assets/rigid_object/rigid_object_data.py index a7595dac778..cc3e636aa04 100644 --- a/source/isaaclab_physx/isaaclab_physx/assets/rigid_object/rigid_object_data.py +++ b/source/isaaclab_physx/isaaclab_physx/assets/rigid_object/rigid_object_data.py @@ -17,7 +17,6 @@ from isaaclab.utils.buffers import TimestampedBuffer from isaaclab.utils.math import ( combine_frame_transforms, - convert_quat, normalize, quat_apply, quat_apply_inverse, diff --git a/source/isaaclab_physx/isaaclab_physx/assets/rigid_object_collection/rigid_object_collection_data.py b/source/isaaclab_physx/isaaclab_physx/assets/rigid_object_collection/rigid_object_collection_data.py index 9c92581b721..64acd87ceb1 100644 --- a/source/isaaclab_physx/isaaclab_physx/assets/rigid_object_collection/rigid_object_collection_data.py +++ b/source/isaaclab_physx/isaaclab_physx/assets/rigid_object_collection/rigid_object_collection_data.py @@ -15,7 +15,7 @@ from isaaclab.assets.rigid_object_collection.base_rigid_object_collection_data import BaseRigidObjectCollectionData from isaaclab.utils.buffers import TimestampedBuffer -from isaaclab.utils.math import combine_frame_transforms, convert_quat, normalize, quat_apply, quat_apply_inverse +from isaaclab.utils.math import combine_frame_transforms, normalize, quat_apply, quat_apply_inverse if TYPE_CHECKING: from isaaclab.assets.rigid_object_collection.rigid_object_collection_view import RigidObjectCollectionView From 8cf180995233ecf8c54dc809063a6d96b39a96be Mon Sep 17 00:00:00 2001 From: Antoine Richard Date: Tue, 3 Feb 2026 10:26:10 +0100 Subject: [PATCH 37/38] Addressing the lizard's comments. --- source/isaaclab/isaaclab/devices/openxr/xr_anchor_utils.py | 2 -- source/isaaclab/isaaclab/test/mock_interfaces/__init__.py | 5 ----- .../isaaclab/test/mock_interfaces/assets/__init__.py | 5 ----- .../isaaclab/test/mock_interfaces/assets/factories.py | 5 ----- .../test/mock_interfaces/assets/mock_articulation.py | 5 ----- .../test/mock_interfaces/assets/mock_rigid_object.py | 5 ----- .../mock_interfaces/assets/mock_rigid_object_collection.py | 5 ----- .../isaaclab/test/mock_interfaces/sensors/__init__.py | 5 ----- .../isaaclab/test/mock_interfaces/sensors/factories.py | 5 ----- .../test/mock_interfaces/sensors/mock_contact_sensor.py | 5 ----- .../test/mock_interfaces/sensors/mock_frame_transformer.py | 5 ----- .../isaaclab/test/mock_interfaces/sensors/mock_imu.py | 5 ----- .../isaaclab/isaaclab/test/mock_interfaces/utils/__init__.py | 5 ----- .../isaaclab/test/mock_interfaces/utils/mock_generator.py | 5 ----- .../isaaclab/isaaclab/test/mock_interfaces/utils/patching.py | 5 ----- .../isaaclab/test/test_mock_interfaces/test_mock_assets.py | 5 ----- .../test/test_mock_interfaces/test_mock_data_properties.py | 5 ----- .../isaaclab/test/test_mock_interfaces/test_mock_sensors.py | 5 ----- source/isaaclab_physx/isaaclab_physx/test/__init__.py | 5 ----- .../isaaclab_physx/test/mock_interfaces/__init__.py | 5 ----- .../isaaclab_physx/test/mock_interfaces/factories.py | 5 ----- .../isaaclab_physx/test/mock_interfaces/utils/__init__.py | 5 ----- .../test/mock_interfaces/utils/mock_shared_metatype.py | 5 ----- .../isaaclab_physx/test/mock_interfaces/utils/patching.py | 5 ----- .../isaaclab_physx/test/mock_interfaces/views/__init__.py | 5 ----- .../test/mock_interfaces/views/mock_articulation_view.py | 5 ----- .../test/mock_interfaces/views/mock_rigid_body_view.py | 5 ----- .../test/mock_interfaces/views/mock_rigid_contact_view.py | 5 ----- source/isaaclab_physx/test/test_mock_interfaces/__init__.py | 5 ----- .../test/test_mock_interfaces/test_mock_articulation_view.py | 5 ----- .../test/test_mock_interfaces/test_mock_rigid_body_view.py | 5 ----- .../test_mock_interfaces/test_mock_rigid_contact_view.py | 5 ----- .../test/test_mock_interfaces/test_patching.py | 5 ----- 33 files changed, 162 deletions(-) diff --git a/source/isaaclab/isaaclab/devices/openxr/xr_anchor_utils.py b/source/isaaclab/isaaclab/devices/openxr/xr_anchor_utils.py index c525b99e66b..6bc9fcd6ec2 100644 --- a/source/isaaclab/isaaclab/devices/openxr/xr_anchor_utils.py +++ b/source/isaaclab/isaaclab/devices/openxr/xr_anchor_utils.py @@ -3,8 +3,6 @@ # # SPDX-License-Identifier: BSD-3-Clause -# Copyright (c) 2022-2025, The Isaac Lab Project Developers. -# SPDX-License-Identifier: BSD-3-Clause """Utilities for synchronizing XR anchor pose with a reference prim and XR config.""" from __future__ import annotations diff --git a/source/isaaclab/isaaclab/test/mock_interfaces/__init__.py b/source/isaaclab/isaaclab/test/mock_interfaces/__init__.py index 271765d769d..8c3fe7595a1 100644 --- a/source/isaaclab/isaaclab/test/mock_interfaces/__init__.py +++ b/source/isaaclab/isaaclab/test/mock_interfaces/__init__.py @@ -3,11 +3,6 @@ # # SPDX-License-Identifier: BSD-3-Clause -# Copyright (c) 2022-2025, The Isaac Lab Project Developers. -# All rights reserved. -# -# SPDX-License-Identifier: BSD-3-Clause - """Mock interfaces for IsaacLab sensors and assets. This module provides mock implementations of sensor and asset classes for unit testing diff --git a/source/isaaclab/isaaclab/test/mock_interfaces/assets/__init__.py b/source/isaaclab/isaaclab/test/mock_interfaces/assets/__init__.py index 2dce1f8450a..f6169a1fcc7 100644 --- a/source/isaaclab/isaaclab/test/mock_interfaces/assets/__init__.py +++ b/source/isaaclab/isaaclab/test/mock_interfaces/assets/__init__.py @@ -3,11 +3,6 @@ # # SPDX-License-Identifier: BSD-3-Clause -# Copyright (c) 2022-2025, The Isaac Lab Project Developers. -# All rights reserved. -# -# SPDX-License-Identifier: BSD-3-Clause - """Mock asset interfaces for testing without Isaac Sim.""" from .mock_articulation import MockArticulation, MockArticulationData diff --git a/source/isaaclab/isaaclab/test/mock_interfaces/assets/factories.py b/source/isaaclab/isaaclab/test/mock_interfaces/assets/factories.py index 697a8706eba..c135c80005f 100644 --- a/source/isaaclab/isaaclab/test/mock_interfaces/assets/factories.py +++ b/source/isaaclab/isaaclab/test/mock_interfaces/assets/factories.py @@ -3,11 +3,6 @@ # # SPDX-License-Identifier: BSD-3-Clause -# Copyright (c) 2022-2025, The Isaac Lab Project Developers. -# All rights reserved. -# -# SPDX-License-Identifier: BSD-3-Clause - """Factory functions for creating pre-configured mock assets.""" from __future__ import annotations diff --git a/source/isaaclab/isaaclab/test/mock_interfaces/assets/mock_articulation.py b/source/isaaclab/isaaclab/test/mock_interfaces/assets/mock_articulation.py index fc9a8619698..7004da6798b 100644 --- a/source/isaaclab/isaaclab/test/mock_interfaces/assets/mock_articulation.py +++ b/source/isaaclab/isaaclab/test/mock_interfaces/assets/mock_articulation.py @@ -3,11 +3,6 @@ # # SPDX-License-Identifier: BSD-3-Clause -# Copyright (c) 2022-2025, The Isaac Lab Project Developers. -# All rights reserved. -# -# SPDX-License-Identifier: BSD-3-Clause - """Mock articulation asset for testing without Isaac Sim.""" from __future__ import annotations diff --git a/source/isaaclab/isaaclab/test/mock_interfaces/assets/mock_rigid_object.py b/source/isaaclab/isaaclab/test/mock_interfaces/assets/mock_rigid_object.py index ec6d6ea45c3..8e9354b8814 100644 --- a/source/isaaclab/isaaclab/test/mock_interfaces/assets/mock_rigid_object.py +++ b/source/isaaclab/isaaclab/test/mock_interfaces/assets/mock_rigid_object.py @@ -3,11 +3,6 @@ # # SPDX-License-Identifier: BSD-3-Clause -# Copyright (c) 2022-2025, The Isaac Lab Project Developers. -# All rights reserved. -# -# SPDX-License-Identifier: BSD-3-Clause - """Mock rigid object asset for testing without Isaac Sim.""" from __future__ import annotations diff --git a/source/isaaclab/isaaclab/test/mock_interfaces/assets/mock_rigid_object_collection.py b/source/isaaclab/isaaclab/test/mock_interfaces/assets/mock_rigid_object_collection.py index 5ccac7951f1..342bef51c99 100644 --- a/source/isaaclab/isaaclab/test/mock_interfaces/assets/mock_rigid_object_collection.py +++ b/source/isaaclab/isaaclab/test/mock_interfaces/assets/mock_rigid_object_collection.py @@ -3,11 +3,6 @@ # # SPDX-License-Identifier: BSD-3-Clause -# Copyright (c) 2022-2025, The Isaac Lab Project Developers. -# All rights reserved. -# -# SPDX-License-Identifier: BSD-3-Clause - """Mock rigid object collection asset for testing without Isaac Sim.""" from __future__ import annotations diff --git a/source/isaaclab/isaaclab/test/mock_interfaces/sensors/__init__.py b/source/isaaclab/isaaclab/test/mock_interfaces/sensors/__init__.py index f99b9347f2d..8ea858817fc 100644 --- a/source/isaaclab/isaaclab/test/mock_interfaces/sensors/__init__.py +++ b/source/isaaclab/isaaclab/test/mock_interfaces/sensors/__init__.py @@ -3,11 +3,6 @@ # # SPDX-License-Identifier: BSD-3-Clause -# Copyright (c) 2022-2025, The Isaac Lab Project Developers. -# All rights reserved. -# -# SPDX-License-Identifier: BSD-3-Clause - """Mock sensor interfaces for testing without Isaac Sim.""" from .mock_contact_sensor import MockContactSensor, MockContactSensorData diff --git a/source/isaaclab/isaaclab/test/mock_interfaces/sensors/factories.py b/source/isaaclab/isaaclab/test/mock_interfaces/sensors/factories.py index 8268928af3a..8f99a4854ad 100644 --- a/source/isaaclab/isaaclab/test/mock_interfaces/sensors/factories.py +++ b/source/isaaclab/isaaclab/test/mock_interfaces/sensors/factories.py @@ -3,11 +3,6 @@ # # SPDX-License-Identifier: BSD-3-Clause -# Copyright (c) 2022-2025, The Isaac Lab Project Developers. -# All rights reserved. -# -# SPDX-License-Identifier: BSD-3-Clause - """Factory functions for creating pre-configured mock sensors.""" from __future__ import annotations diff --git a/source/isaaclab/isaaclab/test/mock_interfaces/sensors/mock_contact_sensor.py b/source/isaaclab/isaaclab/test/mock_interfaces/sensors/mock_contact_sensor.py index 5cc51d8afde..140d5cc5d8b 100644 --- a/source/isaaclab/isaaclab/test/mock_interfaces/sensors/mock_contact_sensor.py +++ b/source/isaaclab/isaaclab/test/mock_interfaces/sensors/mock_contact_sensor.py @@ -3,11 +3,6 @@ # # SPDX-License-Identifier: BSD-3-Clause -# Copyright (c) 2022-2025, The Isaac Lab Project Developers. -# All rights reserved. -# -# SPDX-License-Identifier: BSD-3-Clause - """Mock contact sensor for testing without Isaac Sim.""" from __future__ import annotations diff --git a/source/isaaclab/isaaclab/test/mock_interfaces/sensors/mock_frame_transformer.py b/source/isaaclab/isaaclab/test/mock_interfaces/sensors/mock_frame_transformer.py index 7b1b9081b06..55fce15edc9 100644 --- a/source/isaaclab/isaaclab/test/mock_interfaces/sensors/mock_frame_transformer.py +++ b/source/isaaclab/isaaclab/test/mock_interfaces/sensors/mock_frame_transformer.py @@ -3,11 +3,6 @@ # # SPDX-License-Identifier: BSD-3-Clause -# Copyright (c) 2022-2025, The Isaac Lab Project Developers. -# All rights reserved. -# -# SPDX-License-Identifier: BSD-3-Clause - """Mock frame transformer sensor for testing without Isaac Sim.""" from __future__ import annotations diff --git a/source/isaaclab/isaaclab/test/mock_interfaces/sensors/mock_imu.py b/source/isaaclab/isaaclab/test/mock_interfaces/sensors/mock_imu.py index 202465211ca..12883cfa789 100644 --- a/source/isaaclab/isaaclab/test/mock_interfaces/sensors/mock_imu.py +++ b/source/isaaclab/isaaclab/test/mock_interfaces/sensors/mock_imu.py @@ -3,11 +3,6 @@ # # SPDX-License-Identifier: BSD-3-Clause -# Copyright (c) 2022-2025, The Isaac Lab Project Developers. -# All rights reserved. -# -# SPDX-License-Identifier: BSD-3-Clause - """Mock IMU sensor for testing without Isaac Sim.""" from __future__ import annotations diff --git a/source/isaaclab/isaaclab/test/mock_interfaces/utils/__init__.py b/source/isaaclab/isaaclab/test/mock_interfaces/utils/__init__.py index 474483b6098..5935e60c1d8 100644 --- a/source/isaaclab/isaaclab/test/mock_interfaces/utils/__init__.py +++ b/source/isaaclab/isaaclab/test/mock_interfaces/utils/__init__.py @@ -3,11 +3,6 @@ # # SPDX-License-Identifier: BSD-3-Clause -# Copyright (c) 2022-2025, The Isaac Lab Project Developers. -# All rights reserved. -# -# SPDX-License-Identifier: BSD-3-Clause - """Utilities for creating and using mock interfaces.""" from .mock_generator import MockArticulationBuilder, MockSensorBuilder diff --git a/source/isaaclab/isaaclab/test/mock_interfaces/utils/mock_generator.py b/source/isaaclab/isaaclab/test/mock_interfaces/utils/mock_generator.py index f9bc6fb246d..fb139fe1e1b 100644 --- a/source/isaaclab/isaaclab/test/mock_interfaces/utils/mock_generator.py +++ b/source/isaaclab/isaaclab/test/mock_interfaces/utils/mock_generator.py @@ -3,11 +3,6 @@ # # SPDX-License-Identifier: BSD-3-Clause -# Copyright (c) 2022-2025, The Isaac Lab Project Developers. -# All rights reserved. -# -# SPDX-License-Identifier: BSD-3-Clause - """Utilities for generating custom mock objects.""" from __future__ import annotations diff --git a/source/isaaclab/isaaclab/test/mock_interfaces/utils/patching.py b/source/isaaclab/isaaclab/test/mock_interfaces/utils/patching.py index 4dcb5038bb0..15941ce5122 100644 --- a/source/isaaclab/isaaclab/test/mock_interfaces/utils/patching.py +++ b/source/isaaclab/isaaclab/test/mock_interfaces/utils/patching.py @@ -3,11 +3,6 @@ # # SPDX-License-Identifier: BSD-3-Clause -# Copyright (c) 2022-2025, The Isaac Lab Project Developers. -# All rights reserved. -# -# SPDX-License-Identifier: BSD-3-Clause - """Utilities for patching real classes with mock implementations in tests.""" from __future__ import annotations diff --git a/source/isaaclab/test/test_mock_interfaces/test_mock_assets.py b/source/isaaclab/test/test_mock_interfaces/test_mock_assets.py index 7eac32396f4..37e5b05914b 100644 --- a/source/isaaclab/test/test_mock_interfaces/test_mock_assets.py +++ b/source/isaaclab/test/test_mock_interfaces/test_mock_assets.py @@ -3,11 +3,6 @@ # # SPDX-License-Identifier: BSD-3-Clause -# Copyright (c) 2022-2025, The Isaac Lab Project Developers. -# All rights reserved. -# -# SPDX-License-Identifier: BSD-3-Clause - """Unit tests for mock asset interfaces.""" import pytest diff --git a/source/isaaclab/test/test_mock_interfaces/test_mock_data_properties.py b/source/isaaclab/test/test_mock_interfaces/test_mock_data_properties.py index 628a42c2d2a..37e09e2f382 100644 --- a/source/isaaclab/test/test_mock_interfaces/test_mock_data_properties.py +++ b/source/isaaclab/test/test_mock_interfaces/test_mock_data_properties.py @@ -3,11 +3,6 @@ # # SPDX-License-Identifier: BSD-3-Clause -# Copyright (c) 2022-2025, The Isaac Lab Project Developers. -# All rights reserved. -# -# SPDX-License-Identifier: BSD-3-Clause - """Comprehensive tests for mock data properties - shapes, setters, and device handling.""" import pytest diff --git a/source/isaaclab/test/test_mock_interfaces/test_mock_sensors.py b/source/isaaclab/test/test_mock_interfaces/test_mock_sensors.py index 88a3b4db6f3..42dbfb3c635 100644 --- a/source/isaaclab/test/test_mock_interfaces/test_mock_sensors.py +++ b/source/isaaclab/test/test_mock_interfaces/test_mock_sensors.py @@ -3,11 +3,6 @@ # # SPDX-License-Identifier: BSD-3-Clause -# Copyright (c) 2022-2025, The Isaac Lab Project Developers. -# All rights reserved. -# -# SPDX-License-Identifier: BSD-3-Clause - """Unit tests for mock sensor interfaces.""" import pytest diff --git a/source/isaaclab_physx/isaaclab_physx/test/__init__.py b/source/isaaclab_physx/isaaclab_physx/test/__init__.py index a8484a8989a..7d7ed09bafb 100644 --- a/source/isaaclab_physx/isaaclab_physx/test/__init__.py +++ b/source/isaaclab_physx/isaaclab_physx/test/__init__.py @@ -3,9 +3,4 @@ # # SPDX-License-Identifier: BSD-3-Clause -# Copyright (c) 2022-2025, The Isaac Lab Project Developers. -# All rights reserved. -# -# SPDX-License-Identifier: BSD-3-Clause - """Test utilities for isaaclab_physx.""" diff --git a/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/__init__.py b/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/__init__.py index d743be4fdf5..8e619ad9227 100644 --- a/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/__init__.py +++ b/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/__init__.py @@ -3,11 +3,6 @@ # # SPDX-License-Identifier: BSD-3-Clause -# Copyright (c) 2022-2025, The Isaac Lab Project Developers. -# All rights reserved. -# -# SPDX-License-Identifier: BSD-3-Clause - """Mock interfaces for PhysX TensorAPI views. This module provides mock implementations of PhysX TensorAPI views for unit testing diff --git a/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/factories.py b/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/factories.py index e99604d4175..a3d5ae812c9 100644 --- a/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/factories.py +++ b/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/factories.py @@ -3,11 +3,6 @@ # # SPDX-License-Identifier: BSD-3-Clause -# Copyright (c) 2022-2025, The Isaac Lab Project Developers. -# All rights reserved. -# -# SPDX-License-Identifier: BSD-3-Clause - """Factory functions for creating mock PhysX views.""" from __future__ import annotations diff --git a/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/utils/__init__.py b/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/utils/__init__.py index 9abf21654b1..d945acdf8b1 100644 --- a/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/utils/__init__.py +++ b/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/utils/__init__.py @@ -3,11 +3,6 @@ # # SPDX-License-Identifier: BSD-3-Clause -# Copyright (c) 2022-2025, The Isaac Lab Project Developers. -# All rights reserved. -# -# SPDX-License-Identifier: BSD-3-Clause - """Utilities for mock PhysX interfaces.""" from .mock_shared_metatype import MockSharedMetatype diff --git a/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/utils/mock_shared_metatype.py b/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/utils/mock_shared_metatype.py index d9858c11744..c7ceb4d93f7 100644 --- a/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/utils/mock_shared_metatype.py +++ b/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/utils/mock_shared_metatype.py @@ -3,11 +3,6 @@ # # SPDX-License-Identifier: BSD-3-Clause -# Copyright (c) 2022-2025, The Isaac Lab Project Developers. -# All rights reserved. -# -# SPDX-License-Identifier: BSD-3-Clause - """Mock implementation of PhysX shared metatype.""" from __future__ import annotations diff --git a/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/utils/patching.py b/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/utils/patching.py index 3d787e74b2a..288eb28bf5f 100644 --- a/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/utils/patching.py +++ b/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/utils/patching.py @@ -3,11 +3,6 @@ # # SPDX-License-Identifier: BSD-3-Clause -# Copyright (c) 2022-2025, The Isaac Lab Project Developers. -# All rights reserved. -# -# SPDX-License-Identifier: BSD-3-Clause - """Utilities for patching PhysX views with mock implementations.""" from __future__ import annotations diff --git a/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/views/__init__.py b/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/views/__init__.py index ae4fcc892b3..f4699c04592 100644 --- a/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/views/__init__.py +++ b/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/views/__init__.py @@ -3,11 +3,6 @@ # # SPDX-License-Identifier: BSD-3-Clause -# Copyright (c) 2022-2025, The Isaac Lab Project Developers. -# All rights reserved. -# -# SPDX-License-Identifier: BSD-3-Clause - """Mock PhysX TensorAPI views.""" from .mock_articulation_view import MockArticulationView diff --git a/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/views/mock_articulation_view.py b/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/views/mock_articulation_view.py index 604cb4966c2..52827ac3c53 100644 --- a/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/views/mock_articulation_view.py +++ b/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/views/mock_articulation_view.py @@ -3,11 +3,6 @@ # # SPDX-License-Identifier: BSD-3-Clause -# Copyright (c) 2022-2025, The Isaac Lab Project Developers. -# All rights reserved. -# -# SPDX-License-Identifier: BSD-3-Clause - """Mock implementation of PhysX ArticulationView.""" from __future__ import annotations diff --git a/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/views/mock_rigid_body_view.py b/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/views/mock_rigid_body_view.py index e34e2df8cf5..a7698cd7af5 100644 --- a/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/views/mock_rigid_body_view.py +++ b/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/views/mock_rigid_body_view.py @@ -3,11 +3,6 @@ # # SPDX-License-Identifier: BSD-3-Clause -# Copyright (c) 2022-2025, The Isaac Lab Project Developers. -# All rights reserved. -# -# SPDX-License-Identifier: BSD-3-Clause - """Mock implementation of PhysX RigidBodyView.""" from __future__ import annotations diff --git a/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/views/mock_rigid_contact_view.py b/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/views/mock_rigid_contact_view.py index 0235700b3e9..9bb2821a6ba 100644 --- a/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/views/mock_rigid_contact_view.py +++ b/source/isaaclab_physx/isaaclab_physx/test/mock_interfaces/views/mock_rigid_contact_view.py @@ -3,11 +3,6 @@ # # SPDX-License-Identifier: BSD-3-Clause -# Copyright (c) 2022-2025, The Isaac Lab Project Developers. -# All rights reserved. -# -# SPDX-License-Identifier: BSD-3-Clause - """Mock implementation of PhysX RigidContactView.""" from __future__ import annotations diff --git a/source/isaaclab_physx/test/test_mock_interfaces/__init__.py b/source/isaaclab_physx/test/test_mock_interfaces/__init__.py index f8b9c5fcd4e..c360d6bd70c 100644 --- a/source/isaaclab_physx/test/test_mock_interfaces/__init__.py +++ b/source/isaaclab_physx/test/test_mock_interfaces/__init__.py @@ -3,9 +3,4 @@ # # SPDX-License-Identifier: BSD-3-Clause -# Copyright (c) 2022-2025, The Isaac Lab Project Developers. -# All rights reserved. -# -# SPDX-License-Identifier: BSD-3-Clause - """Tests for mock PhysX interfaces.""" diff --git a/source/isaaclab_physx/test/test_mock_interfaces/test_mock_articulation_view.py b/source/isaaclab_physx/test/test_mock_interfaces/test_mock_articulation_view.py index 32f0f7c03d8..76f25f9c23a 100644 --- a/source/isaaclab_physx/test/test_mock_interfaces/test_mock_articulation_view.py +++ b/source/isaaclab_physx/test/test_mock_interfaces/test_mock_articulation_view.py @@ -3,11 +3,6 @@ # # SPDX-License-Identifier: BSD-3-Clause -# Copyright (c) 2022-2025, The Isaac Lab Project Developers. -# All rights reserved. -# -# SPDX-License-Identifier: BSD-3-Clause - """Tests for MockArticulationView.""" import pytest diff --git a/source/isaaclab_physx/test/test_mock_interfaces/test_mock_rigid_body_view.py b/source/isaaclab_physx/test/test_mock_interfaces/test_mock_rigid_body_view.py index 3416a713d21..014423ee481 100644 --- a/source/isaaclab_physx/test/test_mock_interfaces/test_mock_rigid_body_view.py +++ b/source/isaaclab_physx/test/test_mock_interfaces/test_mock_rigid_body_view.py @@ -3,11 +3,6 @@ # # SPDX-License-Identifier: BSD-3-Clause -# Copyright (c) 2022-2025, The Isaac Lab Project Developers. -# All rights reserved. -# -# SPDX-License-Identifier: BSD-3-Clause - """Tests for MockRigidBodyView.""" import pytest diff --git a/source/isaaclab_physx/test/test_mock_interfaces/test_mock_rigid_contact_view.py b/source/isaaclab_physx/test/test_mock_interfaces/test_mock_rigid_contact_view.py index 8aa4d5e6ce7..4c2503531da 100644 --- a/source/isaaclab_physx/test/test_mock_interfaces/test_mock_rigid_contact_view.py +++ b/source/isaaclab_physx/test/test_mock_interfaces/test_mock_rigid_contact_view.py @@ -3,11 +3,6 @@ # # SPDX-License-Identifier: BSD-3-Clause -# Copyright (c) 2022-2025, The Isaac Lab Project Developers. -# All rights reserved. -# -# SPDX-License-Identifier: BSD-3-Clause - """Tests for MockRigidContactView.""" import pytest diff --git a/source/isaaclab_physx/test/test_mock_interfaces/test_patching.py b/source/isaaclab_physx/test/test_mock_interfaces/test_patching.py index b475c1ae8cc..7c49afdef34 100644 --- a/source/isaaclab_physx/test/test_mock_interfaces/test_patching.py +++ b/source/isaaclab_physx/test/test_mock_interfaces/test_patching.py @@ -3,11 +3,6 @@ # # SPDX-License-Identifier: BSD-3-Clause -# Copyright (c) 2022-2025, The Isaac Lab Project Developers. -# All rights reserved. -# -# SPDX-License-Identifier: BSD-3-Clause - """Tests for patching utilities.""" from isaaclab_physx.test.mock_interfaces.utils import ( From e411ceb6812710d4b3ad813bfe2de37fbd20df13 Mon Sep 17 00:00:00 2001 From: Antoine Richard Date: Tue, 3 Feb 2026 11:45:23 +0100 Subject: [PATCH 38/38] fix doc failures --- docs/index.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/index.rst b/docs/index.rst index 56177298e60..96f593f1d98 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -99,6 +99,7 @@ Table of Contents source/tutorials/index source/how-to/index source/overview/developer-guide/index + source/testing/index .. toctree::