diff --git a/source/isaaclab/isaaclab/assets/articulation/articulation_cfg.py b/source/isaaclab/isaaclab/assets/articulation/articulation_cfg.py index 208590dfc21..d784493f407 100644 --- a/source/isaaclab/isaaclab/assets/articulation/articulation_cfg.py +++ b/source/isaaclab/isaaclab/assets/articulation/articulation_cfg.py @@ -57,5 +57,17 @@ class InitialStateCfg(AssetBaseCfg.InitialStateCfg): The soft joint position limits are accessible through the :attr:`ArticulationData.soft_joint_pos_limits` attribute. """ + gravity_compensation: list[str] | None = None + """List of body name patterns for gravity compensation. + + Each string is a regular expression pattern matching body names that should have + gravity compensation applied (full compensation, i.e., no gravity effect). + Defaults to None (no compensation). + """ + + gravity_compensation_root_link_index: str | None = None + """Root link index for gravity compensation. Defaults to None (no compensation). + """ + actuators: dict[str, ActuatorBaseCfg] = MISSING """Actuators for the robot with corresponding joint names.""" diff --git a/source/isaaclab/isaaclab/cloner/cloner_utils.py b/source/isaaclab/isaaclab/cloner/cloner_utils.py index 6b15379d2d9..9400ffc2de7 100644 --- a/source/isaaclab/isaaclab/cloner/cloner_utils.py +++ b/source/isaaclab/isaaclab/cloner/cloner_utils.py @@ -67,7 +67,9 @@ def clone_from_template(stage: Usd.Stage, num_clones: int, template_clone_cfg: T # If all prototypes map to env_0, clone whole env_0 to all envs; else clone per-object if torch.all(proto_idx == 0): # args: src_paths, dest_paths, env_ids, mask - replicate_args = [clone_path_fmt.format(0)], [clone_path_fmt], world_indices, clone_masking + # When cloning entire env_0, we use a single source so need a single-row mask + single_source_mask = torch.ones((1, num_clones), device=cfg.device, dtype=torch.bool) + replicate_args = [clone_path_fmt.format(0)], [clone_path_fmt], world_indices, single_source_mask get_pos = lambda path: stage.GetPrimAtPath(path).GetAttribute("xformOp:translate").Get() # noqa: E731 positions = torch.tensor([get_pos(clone_path_fmt.format(i)) for i in world_indices]) if cfg.clone_physics: diff --git a/source/isaaclab/isaaclab/controllers/pink_ik/__init__.py b/source/isaaclab/isaaclab/controllers/pink_ik/__init__.py new file mode 100644 index 00000000000..17ed7a67b07 --- /dev/null +++ b/source/isaaclab/isaaclab/controllers/pink_ik/__init__.py @@ -0,0 +1,13 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Pink IK controller package for IsaacLab. + +This package provides integration between Pink inverse kinematics solver and IsaacLab. +""" + +from .null_space_posture_task import NullSpacePostureTask +from .pink_ik import PinkIKController +from .pink_ik_cfg import PinkIKControllerCfg diff --git a/source/isaaclab/isaaclab/controllers/pink_ik/local_frame_task.py b/source/isaaclab/isaaclab/controllers/pink_ik/local_frame_task.py new file mode 100644 index 00000000000..4690677c24e --- /dev/null +++ b/source/isaaclab/isaaclab/controllers/pink_ik/local_frame_task.py @@ -0,0 +1,116 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +import numpy as np +from collections.abc import Sequence + +import pinocchio as pin +from pink.tasks.frame_task import FrameTask + +from .pink_kinematics_configuration import PinkKinematicsConfiguration + + +class LocalFrameTask(FrameTask): + """ + A task that computes error in a local (custom) frame. + Inherits from FrameTask but overrides compute_error. + """ + + def __init__( + self, + frame: str, + base_link_frame_name: str, + position_cost: float | Sequence[float], + orientation_cost: float | Sequence[float], + lm_damping: float = 0.0, + gain: float = 1.0, + ): + """ + Initialize the LocalFrameTask with configuration. + + This task computes pose errors in a local (custom) frame rather than the world frame, + allowing for more flexible control strategies where the reference frame can be + specified independently. + + Args: + frame: Name of the frame to control (end-effector or target frame). + base_link_frame_name: Name of the base link frame used as reference frame + for computing transforms and errors. + position_cost: Cost weight(s) for position error. Can be a single float + for uniform weighting or a sequence of 3 floats for per-axis weighting. + orientation_cost: Cost weight(s) for orientation error. Can be a single float + for uniform weighting or a sequence of 3 floats for per-axis weighting. + lm_damping: Levenberg-Marquardt damping factor for numerical stability. + Defaults to 0.0 (no damping). + gain: Task gain factor that scales the overall task contribution. + Defaults to 1.0. + """ + super().__init__(frame, position_cost, orientation_cost, lm_damping, gain) + self.base_link_frame_name = base_link_frame_name + self.transform_target_to_base = None + + def set_target(self, transform_target_to_base: pin.SE3) -> None: + """Set task target pose in the world frame. + + Args: + transform_target_to_world: Transform from the task target frame to + the world frame. + """ + self.transform_target_to_base = transform_target_to_base.copy() + + def set_target_from_configuration(self, configuration: PinkKinematicsConfiguration) -> None: + """Set task target pose from a robot configuration. + + Args: + configuration: Robot configuration. + """ + if not isinstance(configuration, PinkKinematicsConfiguration): + raise ValueError("configuration must be a PinkKinematicsConfiguration") + self.set_target(configuration.get_transform(self.frame, self.base_link_frame_name)) + + def compute_error(self, configuration: PinkKinematicsConfiguration) -> np.ndarray: + """ + Compute the error between current and target pose in a local frame. + """ + if not isinstance(configuration, PinkKinematicsConfiguration): + raise ValueError("configuration must be a PinkKinematicsConfiguration") + if self.transform_target_to_base is None: + raise ValueError(f"no target set for frame '{self.frame}'") + + transform_frame_to_base = configuration.get_transform(self.frame, self.base_link_frame_name) + transform_target_to_frame = transform_frame_to_base.actInv(self.transform_target_to_base) + + error_in_frame: np.ndarray = pin.log(transform_target_to_frame).vector + return error_in_frame + + def compute_jacobian(self, configuration: PinkKinematicsConfiguration) -> np.ndarray: + r"""Compute the frame task Jacobian. + + The task Jacobian :math:`J(q) \in \mathbb{R}^{6 \times n_v}` is the + derivative of the task error :math:`e(q) \in \mathbb{R}^6` with respect + to the configuration :math:`q`. The formula for the frame task is: + + .. math:: + + J(q) = -\text{Jlog}_6(T_{tb}) {}_b J_{0b}(q) + + The derivation of the formula for this Jacobian is detailed in + [Caron2023]_. See also + :func:`pink.tasks.task.Task.compute_jacobian` for more context on task + Jacobians. + + Args: + configuration: Robot configuration :math:`q`. + + Returns: + Jacobian matrix :math:`J`, expressed locally in the frame. + """ + if self.transform_target_to_base is None: + raise Exception(f"no target set for frame '{self.frame}'") + transform_frame_to_base = configuration.get_transform(self.frame, self.base_link_frame_name) + transform_frame_to_target = self.transform_target_to_base.actInv(transform_frame_to_base) + jacobian_in_frame = configuration.get_frame_jacobian(self.frame) + J = -pin.Jlog6(transform_frame_to_target) @ jacobian_in_frame + return J diff --git a/source/isaaclab/isaaclab/controllers/pink_ik/null_space_posture_task.py b/source/isaaclab/isaaclab/controllers/pink_ik/null_space_posture_task.py new file mode 100644 index 00000000000..73f285b6140 --- /dev/null +++ b/source/isaaclab/isaaclab/controllers/pink_ik/null_space_posture_task.py @@ -0,0 +1,271 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +import numpy as np +import scipy.linalg.blas as blas +import scipy.linalg.lapack as lapack + +import pinocchio as pin +from pink.configuration import Configuration +from pink.tasks import Task + + +class NullSpacePostureTask(Task): + r"""Pink-based task that adds a posture objective that is in the null space projection of other tasks. + + This task implements posture control in the null space of higher priority tasks + (typically end-effector pose tasks) within the Pink inverse kinematics framework. + + **Mathematical Formulation:** + + For details on Pink Inverse Kinematics optimization formulation visit: https://github.com/stephane-caron/pink + + **Null Space Posture Task Implementation:** + + This task consists of two components: + + 1. **Error Function**: The posture error is computed as: + + .. math:: + + \mathbf{e}(\mathbf{q}) = \mathbf{M} \cdot (\mathbf{q}^* - \mathbf{q}) + + where: + - :math:`\mathbf{q}^*` is the target joint configuration + - :math:`\mathbf{q}` is the current joint configuration + - :math:`\mathbf{M}` is a joint selection mask matrix + + 2. **Jacobian Matrix**: The task Jacobian is the null space projector: + + .. math:: + + \mathbf{J}_{\text{posture}}(\mathbf{q}) = \mathbf{N}(\mathbf{q}) = \mathbf{I} - \mathbf{J}_{\text{primary}}^+ \mathbf{J}_{\text{primary}} + + where: + - :math:`\mathbf{J}_{\text{primary}}` is the combined Jacobian of all higher priority tasks + - :math:`\mathbf{J}_{\text{primary}}^+` is the pseudoinverse of the primary task Jacobian + - :math:`\mathbf{N}(\mathbf{q})` is the null space projector matrix + + For example, if there are two frame tasks (e.g., controlling the pose of two end-effectors), the combined Jacobian + :math:`\mathbf{J}_{\text{primary}}` is constructed by stacking the individual Jacobians for each frame vertically: + + .. math:: + + \mathbf{J}_{\text{primary}} = + \begin{bmatrix} + \mathbf{J}_1(\mathbf{q}) \\ + \mathbf{J}_2(\mathbf{q}) + \end{bmatrix} + + where :math:`\mathbf{J}_1(\mathbf{q})` and :math:`\mathbf{J}_2(\mathbf{q})` are the Jacobians for the first and second frame tasks, respectively. + + The null space projector ensures that joint velocities in the null space produce zero velocity + for the primary tasks: :math:`\mathbf{J}_{\text{primary}} \cdot \dot{\mathbf{q}}_{\text{null}} = \mathbf{0}`. + + **Task Integration:** + + When integrated into the Pink framework, this task contributes to the optimization as: + + .. math:: + + \left\| \mathbf{N}(\mathbf{q}) \mathbf{v} + \mathbf{M} \cdot (\mathbf{q}^* - \mathbf{q}) \right\|_{W_{\text{posture}}}^2 + + This formulation allows the robot to maintain a desired posture while respecting the constraints + imposed by higher priority tasks (e.g., end-effector positioning). + + """ + + # Regularization factor for pseudoinverse computation to ensure numerical stability + PSEUDOINVERSE_DAMPING_FACTOR: float = 1e-9 + + def __init__( + self, + cost: float, + lm_damping: float = 0.0, + gain: float = 1.0, + controlled_frames: list[str] | None = None, + controlled_joints: list[str] | None = None, + ) -> None: + r"""Initialize the null space posture task. + + This task maintains a desired joint posture in the null space of higher-priority + frame tasks. Joint selection allows excluding specific joints (e.g., wrist joints + in humanoid manipulation) to prevent large rotational ranges from overwhelming + errors in critical joints like shoulders and waist. + + Args: + cost: Task weighting factor in the optimization objective. + Units: :math:`[\text{cost}] / [\text{rad}]`. + lm_damping: Levenberg-Marquardt regularization scale (unitless). Defaults to 0.0. + gain: Task gain :math:`\alpha \in [0, 1]` for low-pass filtering. + Defaults to 1.0 (no filtering). + controlled_frames: Frame names whose Jacobians define the primary tasks for + null space projection. If None or empty, no projection is applied. + controlled_joints: Joint names to control in the posture task. If None or + empty, all actuated joints are controlled. + """ + super().__init__(cost=cost, gain=gain, lm_damping=lm_damping) + self.target_q: np.ndarray | None = None + self.controlled_frames: list[str] = controlled_frames or [] + self.controlled_joints: list[str] = controlled_joints or [] + self._joint_mask: np.ndarray | None = None + self._frame_names: list[str] | None = None + + def __repr__(self) -> str: + """Human-readable representation of the task.""" + return ( + f"NullSpacePostureTask(cost={self.cost}, gain={self.gain}, lm_damping={self.lm_damping}," + f" controlled_frames={self.controlled_frames}, controlled_joints={self.controlled_joints})" + ) + + def _build_joint_mapping(self, configuration: Configuration) -> None: + """Build joint mask and cache frequently used values. + + Creates a binary mask that selects which joints should be controlled + in the posture task. + + Args: + configuration: Robot configuration containing the model and joint information. + """ + # Create joint mask for full configuration size + self._joint_mask = np.zeros(configuration.model.nq) + + # Create dictionary for joint names to indices (exclude root joint) + joint_names = configuration.model.names.tolist()[1:] + + # Build joint mask efficiently + for i, joint_name in enumerate(joint_names): + if joint_name in self.controlled_joints: + self._joint_mask[i] = 1.0 + + # Cache frame names for performance + self._frame_names = list(self.controlled_frames) + + def set_target(self, target_q: np.ndarray) -> None: + """Set target posture configuration. + + Args: + target_q: Target vector in the configuration space. If the model + has a floating base, then this vector should include + floating-base coordinates (although they have no effect on the + posture task since only actuated joints are controlled). + """ + self.target_q = target_q.copy() + + def set_target_from_configuration(self, configuration: Configuration) -> None: + """Set target posture from a robot configuration. + + Args: + configuration: Robot configuration whose joint angles will be used + as the target posture. + """ + self.set_target(configuration.q) + + def compute_error(self, configuration: Configuration) -> np.ndarray: + r"""Compute posture task error. + + The error computation follows: + + .. math:: + + \mathbf{e}(\mathbf{q}) = \mathbf{M} \cdot (\mathbf{q}^* - \mathbf{q}) + + where :math:`\mathbf{M}` is the joint selection mask and :math:`\mathbf{q}^* - \mathbf{q}` + is computed using Pinocchio's difference function to handle joint angle wrapping. + + Args: + configuration: Robot configuration :math:`\mathbf{q}`. + + Returns: + Posture task error :math:`\mathbf{e}(\mathbf{q})` with the same dimension + as the configuration vector, but with zeros for non-controlled joints. + + Raises: + ValueError: If no posture target has been set. + """ + if self.target_q is None: + raise ValueError("No posture target has been set. Call set_target() first.") + + # Initialize joint mapping if needed + if self._joint_mask is None: + self._build_joint_mapping(configuration) + + # Compute configuration difference using Pinocchio's difference function + # This handles joint angle wrapping correctly + err = pin.difference( + configuration.model, + self.target_q, + configuration.q, + ) + + # Apply pre-computed joint mask to select only controlled joints + return self._joint_mask * err + + def compute_jacobian(self, configuration: Configuration) -> np.ndarray: + r"""Compute the null space projector Jacobian. + + The null space projector is defined as: + + .. math:: + + \mathbf{N}(\mathbf{q}) = \mathbf{I} - \mathbf{J}_{\text{primary}}^+ \mathbf{J}_{\text{primary}} + + where: + - :math:`\mathbf{J}_{\text{primary}}` is the combined Jacobian of all controlled frames + - :math:`\mathbf{J}_{\text{primary}}^+` is the pseudoinverse of the primary task Jacobian + - :math:`\mathbf{I}` is the identity matrix + + The null space projector ensures that joint velocities in the null space produce + zero velocity for the primary tasks: :math:`\mathbf{J}_{\text{primary}} \cdot \dot{\mathbf{q}}_{\text{null}} = \mathbf{0}`. + + If no controlled frames are specified, returns the identity matrix. + + Args: + configuration: Robot configuration :math:`\mathbf{q}`. + + Returns: + Null space projector matrix :math:`\mathbf{N}(\mathbf{q})` with dimensions + :math:`n_q \times n_q` where :math:`n_q` is the number of configuration variables. + """ + # Initialize joint mapping if needed + if self._frame_names is None: + self._build_joint_mapping(configuration) + + # If no frame tasks are defined, return identity matrix (no null space projection) + if not self._frame_names: + return np.eye(configuration.model.nq) + + # Get Jacobians for all frame tasks and combine them + J_frame_tasks = [configuration.get_frame_jacobian(frame_name) for frame_name in self._frame_names] + J_combined = np.concatenate(J_frame_tasks, axis=0) + + # Compute null space projector: N = I - J^+ * J + # Use fast pseudoinverse computation with direct LAPACK/BLAS calls + m, n = J_combined.shape + + # Wide matrix (typical for robotics): use left pseudoinverse + # J^+ = J^T @ inv(J @ J^T + λ²I) + # This is faster because we invert an m×m matrix instead of n×n + + # Compute J @ J^T using BLAS (faster than numpy) + JJT = blas.dgemm(1.0, J_combined, J_combined.T) + np.fill_diagonal(JJT, JJT.diagonal() + self.PSEUDOINVERSE_DAMPING_FACTOR**2) + + # Use LAPACK's Cholesky factorization (dpotrf = Positive definite TRiangular Factorization) + L, info = lapack.dpotrf(JJT, lower=1, clean=False, overwrite_a=True) + + if info != 0: + # Fallback if not positive definite: use numpy's pseudoinverse + J_pinv = np.linalg.pinv(J_combined) + return np.eye(n) - J_pinv @ J_combined + + # Solve (J @ J^T + λ²I) @ X = J using LAPACK's triangular solver (dpotrs) + # This directly solves the system without computing the full inverse + X, _ = lapack.dpotrs(L, J_combined, lower=1) + + # Compute null space projector: N = I - J^T @ X + N_combined = np.eye(n) - J_combined.T @ X + + return N_combined diff --git a/source/isaaclab/isaaclab/controllers/pink_ik/pink_ik.py b/source/isaaclab/isaaclab/controllers/pink_ik/pink_ik.py new file mode 100644 index 00000000000..3dab8a63fc8 --- /dev/null +++ b/source/isaaclab/isaaclab/controllers/pink_ik/pink_ik.py @@ -0,0 +1,254 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Pink IK controller implementation for IsaacLab. + +This module provides integration between Pink inverse kinematics solver and IsaacLab. +Pink is a differentiable inverse kinematics solver framework that provides task-space control capabilities. + +Reference: + Pink IK Solver: https://github.com/stephane-caron/pink +""" + +from __future__ import annotations + +import numpy as np +import torch +from typing import TYPE_CHECKING + +from pink import solve_ik + +from isaaclab.assets import ArticulationCfg +from isaaclab.utils.string import resolve_matching_names_values + +from .null_space_posture_task import NullSpacePostureTask +from .pink_kinematics_configuration import PinkKinematicsConfiguration + +if TYPE_CHECKING: + from .pink_ik_cfg import PinkIKControllerCfg + + +class PinkIKController: + """Integration of Pink IK controller with Isaac Lab. + + The Pink IK controller solves differential inverse kinematics through weighted tasks. Each task is defined + by a residual function e(q) that is driven to zero (e.g., e(q) = p_target - p_ee(q) for end-effector positioning). + The controller computes joint velocities v satisfying J_e(q)v = -αe(q), where J_e(q) is the task Jacobian. + Multiple tasks are resolved through weighted optimization, formulating a quadratic program that minimizes + weighted task errors while respecting joint velocity limits. + + It supports user defined tasks, and we have provided a NullSpacePostureTask for maintaining desired joint configurations. + + Reference: + Pink IK Solver: https://github.com/stephane-caron/pink + """ + + def __init__( + self, cfg: PinkIKControllerCfg, robot_cfg: ArticulationCfg, device: str, controlled_joint_indices: list[int] + ): + """Initialize the Pink IK Controller. + + Args: + cfg: The configuration for the Pink IK controller containing task definitions, solver parameters, + and joint configurations. + robot_cfg: The robot articulation configuration containing initial joint positions and robot + specifications. + device: The device to use for computations (e.g., 'cuda:0', 'cpu'). + controlled_joint_indices: A list of joint indices in the USD asset controlled by the Pink IK controller. + + Raises: + ValueError: When joint_names or all_joint_names are not provided in the configuration. + """ + if cfg.joint_names is None: + raise ValueError("joint_names must be provided in the configuration") + if cfg.all_joint_names is None: + raise ValueError("all_joint_names must be provided in the configuration") + + self.cfg = cfg + self.device = device + self.controlled_joint_indices = controlled_joint_indices + + # Validate consistency between controlled_joint_indices and configuration + self._validate_consistency(cfg, controlled_joint_indices) + + # Initialize the Kinematics model used by pink IK to control robot + self.pink_configuration = PinkKinematicsConfiguration( + urdf_path=cfg.urdf_path, + mesh_path=cfg.mesh_path, + controlled_joint_names=cfg.joint_names, + ) + + # Find the initial joint positions by matching Pink's joint names to robot_cfg.init_state.joint_pos, + # where the joint_pos keys may be regex patterns and the values are the initial positions. + # We want to assign to each Pink joint name the value from the first matching regex key in joint_pos. + pink_joint_names = self.pink_configuration.model.names.tolist()[1:] + joint_pos_dict = robot_cfg.init_state.joint_pos + + # Use resolve_matching_names_values to match Pink joint names to joint_pos values + indices, _, values = resolve_matching_names_values( + joint_pos_dict, pink_joint_names, preserve_order=False, match_all=False + ) + self.init_joint_positions = np.zeros(len(pink_joint_names)) + self.init_joint_positions[indices] = np.array(values) + + # Set the default targets for each task from the configuration + for task in cfg.variable_input_tasks: + # If task is a NullSpacePostureTask, set the target to the initial joint positions + if isinstance(task, NullSpacePostureTask): + task.set_target(self.init_joint_positions) + continue + task.set_target_from_configuration(self.pink_configuration) + for task in cfg.fixed_input_tasks: + task.set_target_from_configuration(self.pink_configuration) + + # Create joint ordering mappings + self._setup_joint_ordering_mappings() + + def _validate_consistency(self, cfg: PinkIKControllerCfg, controlled_joint_indices: list[int]) -> None: + """Validate consistency between controlled_joint_indices and controller configuration. + + Args: + cfg: The Pink IK controller configuration. + controlled_joint_indices: List of joint indices in Isaac Lab joint space. + + Raises: + ValueError: If any consistency checks fail. + """ + # Check: Length consistency + if cfg.joint_names is None: + raise ValueError("cfg.joint_names cannot be None") + if len(controlled_joint_indices) != len(cfg.joint_names): + raise ValueError( + f"Length mismatch: controlled_joint_indices has {len(controlled_joint_indices)} elements " + f"but cfg.joint_names has {len(cfg.joint_names)} elements" + ) + + # Check: Joint name consistency - verify that the indices point to the expected joint names + actual_joint_names = [cfg.all_joint_names[idx] for idx in controlled_joint_indices] + if actual_joint_names != cfg.joint_names: + mismatches = [] + for i, (actual, expected) in enumerate(zip(actual_joint_names, cfg.joint_names)): + if actual != expected: + mismatches.append( + f"Index {i}: index {controlled_joint_indices[i]} points to '{actual}' but expected '{expected}'" + ) + if mismatches: + raise ValueError( + "Joint name mismatch between controlled_joint_indices and cfg.joint_names:\n" + + "\n".join(mismatches) + ) + + def _setup_joint_ordering_mappings(self): + """Setup joint ordering mappings between Isaac Lab and Pink conventions.""" + pink_joint_names = self.pink_configuration.all_joint_names_pinocchio_order + isaac_lab_joint_names = self.cfg.all_joint_names + + if pink_joint_names is None: + raise ValueError("pink_joint_names should not be None") + if isaac_lab_joint_names is None: + raise ValueError("isaac_lab_joint_names should not be None") + + # Create reordering arrays for all joints + self.isaac_lab_to_pink_ordering = np.array( + [isaac_lab_joint_names.index(pink_joint) for pink_joint in pink_joint_names] + ) + self.pink_to_isaac_lab_ordering = np.array( + [pink_joint_names.index(isaac_lab_joint) for isaac_lab_joint in isaac_lab_joint_names] + ) + # Create reordering arrays for controlled joints only + pink_controlled_joint_names = self.pink_configuration.controlled_joint_names_pinocchio_order + isaac_lab_controlled_joint_names = self.cfg.joint_names + + if pink_controlled_joint_names is None: + raise ValueError("pink_controlled_joint_names should not be None") + if isaac_lab_controlled_joint_names is None: + raise ValueError("isaac_lab_controlled_joint_names should not be None") + + self.isaac_lab_to_pink_controlled_ordering = np.array( + [isaac_lab_controlled_joint_names.index(pink_joint) for pink_joint in pink_controlled_joint_names] + ) + self.pink_to_isaac_lab_controlled_ordering = np.array( + [pink_controlled_joint_names.index(isaac_lab_joint) for isaac_lab_joint in isaac_lab_controlled_joint_names] + ) + + def update_null_space_joint_targets(self, curr_joint_pos: np.ndarray): + """Update the null space joint targets. + + This method updates the target joint positions for null space posture tasks based on the current + joint configuration. This is useful for maintaining desired joint configurations when the primary + task allows redundancy. + + Args: + curr_joint_pos: The current joint positions of shape (num_joints,). + """ + for task in self.cfg.variable_input_tasks: + if isinstance(task, NullSpacePostureTask): + task.set_target(curr_joint_pos) + + def compute( + self, + curr_joint_pos: np.ndarray, + dt: float, + ) -> torch.Tensor: + """Compute the target joint positions based on current state and tasks. + + Performs inverse kinematics using the Pink solver to compute target joint positions that satisfy + the defined tasks. The solver uses quadratic programming to find optimal joint velocities that + minimize task errors while respecting constraints. + + Args: + curr_joint_pos: The current joint positions of shape (num_joints,). + dt: The time step for computing joint position changes in seconds. + + Returns: + The target joint positions as a tensor of shape (num_joints,) on the specified device. + If the IK solver fails, returns the current joint positions unchanged to maintain stability. + """ + # Get the current controlled joint positions + curr_controlled_joint_pos = [curr_joint_pos[i] for i in self.controlled_joint_indices] + + # Initialize joint positions for Pink, change from isaac_lab to pink/pinocchio joint ordering. + joint_positions_pink = curr_joint_pos[self.isaac_lab_to_pink_ordering] + + # Update Pink's robot configuration with the current joint positions + self.pink_configuration.update(joint_positions_pink) + + # Solve IK using Pink's solver + try: + velocity = solve_ik( + self.pink_configuration, + self.cfg.variable_input_tasks + self.cfg.fixed_input_tasks, + dt, + solver="daqp", + safety_break=self.cfg.fail_on_joint_limit_violation, + ) + joint_angle_changes = velocity * dt + except (AssertionError, Exception) as e: + # Print warning and return the current joint positions as the target + if self.cfg.show_ik_warnings: + print( + "Warning: IK quadratic solver could not find a solution! Did not update the target joint" + f" positions.\nError: {e}" + ) + + if self.cfg.xr_enabled: + from isaaclab.ui.xr_widgets import XRVisualization + + XRVisualization.push_event("ik_error", {"error": e}) + return torch.tensor(curr_controlled_joint_pos, device=self.device, dtype=torch.float32) + + # Reorder the joint angle changes back to Isaac Lab conventions + joint_vel_isaac_lab = torch.tensor( + joint_angle_changes[self.pink_to_isaac_lab_controlled_ordering], + device=self.device, + dtype=torch.float32, + ) + + # Add the velocity changes to the current joint positions to get the target joint positions + target_joint_pos = torch.add( + joint_vel_isaac_lab, torch.tensor(curr_controlled_joint_pos, device=self.device, dtype=torch.float32) + ) + + return target_joint_pos diff --git a/source/isaaclab/isaaclab/controllers/pink_ik/pink_ik_cfg.py b/source/isaaclab/isaaclab/controllers/pink_ik/pink_ik_cfg.py new file mode 100644 index 00000000000..69c65610354 --- /dev/null +++ b/source/isaaclab/isaaclab/controllers/pink_ik/pink_ik_cfg.py @@ -0,0 +1,71 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Configuration for Pink IK controller.""" + +from dataclasses import MISSING + +from pink.tasks import FrameTask + +from isaaclab.utils import configclass + + +@configclass +class PinkIKControllerCfg: + """Configuration settings for the Pink IK Controller. + + The Pink IK controller can be found at: https://github.com/stephane-caron/pink + """ + + urdf_path: str | None = None + """Path to the robot's URDF file. This file is used by Pinocchio's `robot_wrapper.BuildFromURDF` to load the robot model.""" + + mesh_path: str | None = None + """Path to the mesh files associated with the robot. These files are also loaded by Pinocchio's `robot_wrapper.BuildFromURDF`.""" + + num_hand_joints: int = 0 + """The number of hand joints in the robot. The action space for the controller contains the pose_dim(7)*num_controlled_frames + num_hand_joints. + The last num_hand_joints values of the action are the hand joint angles.""" + + variable_input_tasks: list[FrameTask] = MISSING + """ + A list of tasks for the Pink IK controller. These tasks are controllable by the env action. + + These tasks can be used to control the pose of a frame or the angles of joints. + For more details, visit: https://github.com/stephane-caron/pink + """ + + fixed_input_tasks: list[FrameTask] = MISSING + """ + A list of tasks for the Pink IK controller. These tasks are fixed and not controllable by the env action. + + These tasks can be used to fix the pose of a frame or the angles of joints to a desired configuration. + For more details, visit: https://github.com/stephane-caron/pink + """ + + joint_names: list[str] | None = None + """A list of joint names in the USD asset controlled by the Pink IK controller. This is required because the joint naming conventions differ between USD and URDF files. + This value is currently designed to be automatically populated by the action term in a manager based environment.""" + + all_joint_names: list[str] | None = None + """A list of joint names in the USD asset. This is required because the joint naming conventions differ between USD and URDF files. + This value is currently designed to be automatically populated by the action term in a manager based environment.""" + + articulation_name: str = "robot" + """The name of the articulation USD asset in the scene.""" + + base_link_name: str = "base_link" + """The name of the base link in the USD asset.""" + + show_ik_warnings: bool = True + """Show warning if IK solver fails to find a solution.""" + + fail_on_joint_limit_violation: bool = True + """If True, the Pink IK solver will fail and raise an error if any joint limit is violated during optimization. PinkIKController + will handle the error by setting the last joint positions. If False, the solver will ignore joint limit violations and return the + closest solution found.""" + + xr_enabled: bool = False + """If True, the Pink IK controller will send information to the XRVisualization.""" diff --git a/source/isaaclab/isaaclab/controllers/pink_ik/pink_kinematics_configuration.py b/source/isaaclab/isaaclab/controllers/pink_ik/pink_kinematics_configuration.py new file mode 100644 index 00000000000..1062bb122e7 --- /dev/null +++ b/source/isaaclab/isaaclab/controllers/pink_ik/pink_kinematics_configuration.py @@ -0,0 +1,180 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from __future__ import annotations + +import numpy as np + +import pinocchio as pin +from pink.configuration import Configuration +from pink.exceptions import FrameNotFound +from pinocchio.robot_wrapper import RobotWrapper + + +class PinkKinematicsConfiguration(Configuration): + """ + A configuration class that maintains both a "controlled" (reduced) model and a "full" model. + + This class extends the standard Pink Configuration to allow for selective joint control: + - The "controlled" model/data/q represent the subset of joints being actively controlled (e.g., a kinematic chain or arm). + - The "full" model/data/q represent the complete robot, including all joints. + + This is useful for scenarios where only a subset of joints are being optimized or controlled, but full-model kinematics + (e.g., for collision checking, full-body Jacobians, or visualization) are still required. + + The class ensures that both models are kept up to date, and provides methods to update both the controlled and full + configurations as needed. + """ + + def __init__( + self, + controlled_joint_names: list[str], + urdf_path: str, + mesh_path: str | None = None, + copy_data: bool = True, + forward_kinematics: bool = True, + ): + """ + Initialize PinkKinematicsConfiguration. + + Args: + urdf_path (str): Path to the robot URDF file. + mesh_path (str): Path to the mesh files for the robot. + controlled_joint_names (list[str]): List of joint names to be actively controlled. + copy_data (bool, optional): If True, work on an internal copy of the input data. Defaults to True. + forward_kinematics (bool, optional): If True, compute forward kinematics from the configuration vector. Defaults to True. + + This constructor initializes the PinkKinematicsConfiguration, which maintains both a "controlled" (reduced) model and a "full" model. + The controlled model/data/q represent the subset of joints being actively controlled, while the full model/data/q represent the complete robot. + This is useful for scenarios where only a subset of joints are being optimized or controlled, but full-model kinematics are still required. + """ + self._controlled_joint_names = controlled_joint_names + + # Build robot model with all joints + if mesh_path: + self.robot_wrapper = RobotWrapper.BuildFromURDF(urdf_path, mesh_path) + else: + self.robot_wrapper = RobotWrapper.BuildFromURDF(urdf_path) + self.full_model = self.robot_wrapper.model + self.full_data = self.robot_wrapper.data + self.full_q = self.robot_wrapper.q0 + + self._all_joint_names = self.full_model.names.tolist()[1:] + # controlled_joint_indices: indices in all_joint_names for joints that are in controlled_joint_names, preserving all_joint_names order + self._controlled_joint_indices = [ + idx for idx, joint_name in enumerate(self._all_joint_names) if joint_name in self._controlled_joint_names + ] + + # Build the reduced model with only the controlled joints + joints_to_lock = [] + for joint_name in self._all_joint_names: + if joint_name not in self._controlled_joint_names: + joints_to_lock.append(self.full_model.getJointId(joint_name)) + + if len(joints_to_lock) == 0: + # No joints to lock, controlled model is the same as full model + self.controlled_model = self.full_model + self.controlled_data = self.full_data + self.controlled_q = self.full_q + else: + self.controlled_model = pin.buildReducedModel(self.full_model, joints_to_lock, self.full_q) + self.controlled_data = self.controlled_model.createData() + self.controlled_q = self.full_q[self._controlled_joint_indices] + + # Pink will should only have the controlled model + super().__init__(self.controlled_model, self.controlled_data, self.controlled_q, copy_data, forward_kinematics) + + def update(self, q: np.ndarray | None = None) -> None: + """Update configuration to a new vector. + + Calling this function runs forward kinematics and computes + collision-pair distances, if applicable. + + Args: + q: New configuration vector. + """ + if q is not None and len(q) != len(self._all_joint_names): + raise ValueError("q must have the same length as the number of joints in the model") + if q is not None: + super().update(q[self._controlled_joint_indices]) + + q_readonly = q.copy() + q_readonly.setflags(write=False) + self.full_q = q_readonly + pin.computeJointJacobians(self.full_model, self.full_data, q) + pin.updateFramePlacements(self.full_model, self.full_data) + else: + super().update() + pin.computeJointJacobians(self.full_model, self.full_data, self.full_q) + pin.updateFramePlacements(self.full_model, self.full_data) + + def get_frame_jacobian(self, frame: str) -> np.ndarray: + r"""Compute the Jacobian matrix of a frame velocity. + + Denoting our frame by :math:`B` and the world frame by :math:`W`, the + Jacobian matrix :math:`{}_B J_{WB}` is related to the body velocity + :math:`{}_B v_{WB}` by: + + .. math:: + + {}_B v_{WB} = {}_B J_{WB} \dot{q} + + Args: + frame: Name of the frame, typically a link name from the URDF. + + Returns: + Jacobian :math:`{}_B J_{WB}` of the frame. + + When the robot model includes a floating base + (pin.JointModelFreeFlyer), the configuration vector :math:`q` consists + of: + + - ``q[0:3]``: position in [m] of the floating base in the inertial + frame, formatted as :math:`[p_x, p_y, p_z]`. + - ``q[3:7]``: unit quaternion for the orientation of the floating base + in the inertial frame, formatted as :math:`[q_x, q_y, q_z, q_w]`. + - ``q[7:]``: joint angles in [rad]. + """ + if not self.full_model.existFrame(frame): + raise FrameNotFound(frame, self.full_model.frames) + frame_id = self.full_model.getFrameId(frame) + J: np.ndarray = pin.getFrameJacobian(self.full_model, self.full_data, frame_id, pin.ReferenceFrame.LOCAL) + return J[:, self._controlled_joint_indices] + + def get_transform_frame_to_world(self, frame: str) -> pin.SE3: + """Get the pose of a frame in the current configuration. + We override this method from the super class to solve the issue that in the default + Pink implementation, the frame placements do not take into account the non-controlled joints + being not at initial pose (which is a bad assumption when they are controlled by other controllers like a lower body controller). + + Args: + frame: Name of a frame, typically a link name from the URDF. + + Returns: + Current transform from the given frame to the world frame. + + Raises: + FrameNotFound: if the frame name is not found in the robot model. + """ + frame_id = self.full_model.getFrameId(frame) + try: + return self.full_data.oMf[frame_id].copy() + except IndexError as index_error: + raise FrameNotFound(frame, self.full_model.frames) from index_error + + def check_limits(self, tol: float = 1e-6, safety_break: bool = True) -> None: + """Check if limits are violated only if safety_break is enabled""" + if safety_break: + super().check_limits(tol, safety_break) + + @property + def controlled_joint_names_pinocchio_order(self) -> list[str]: + """Get the names of the controlled joints in the order of the pinocchio model.""" + return [self._all_joint_names[i] for i in self._controlled_joint_indices] + + @property + def all_joint_names_pinocchio_order(self) -> list[str]: + """Get the names of all joints in the order of the pinocchio model.""" + return self._all_joint_names diff --git a/source/isaaclab/isaaclab/controllers/utils.py b/source/isaaclab/isaaclab/controllers/utils.py new file mode 100644 index 00000000000..7e72912fdfd --- /dev/null +++ b/source/isaaclab/isaaclab/controllers/utils.py @@ -0,0 +1,138 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Helper functions for Isaac Lab controllers. + +This module provides utility functions to help with controller implementations. +""" + +import logging +import os +import re + +from isaacsim.core.utils.extensions import enable_extension + +enable_extension("isaacsim.asset.exporter.urdf") + +from nvidia.srl.from_usd.to_urdf import UsdToUrdf + +# import logger +logger = logging.getLogger(__name__) + + +def convert_usd_to_urdf(usd_path: str, output_path: str, force_conversion: bool = True) -> tuple[str, str]: + """Convert a USD file to URDF format. + + Args: + usd_path: Path to the USD file to convert. + output_path: Directory to save the converted URDF and mesh files. + force_conversion: Whether to force the conversion even if the URDF and mesh files already exist. + Returns: + A tuple containing the paths to the URDF file and the mesh directory. + """ + usd_to_urdf_kwargs = { + "node_names_to_remove": None, + "edge_names_to_remove": None, + "root": None, + "parent_link_is_body_1": None, + "log_level": logging.ERROR, + } + + urdf_output_dir = os.path.join(output_path, "urdf") + urdf_file_name = os.path.basename(usd_path).split(".")[0] + ".urdf" + urdf_output_path = urdf_output_dir + "/" + urdf_file_name + urdf_meshes_output_dir = os.path.join(output_path, "meshes") + + if not os.path.exists(urdf_output_path) or not os.path.exists(urdf_meshes_output_dir) or force_conversion: + usd_to_urdf = UsdToUrdf.init_from_file(usd_path, **usd_to_urdf_kwargs) + os.makedirs(urdf_output_dir, exist_ok=True) + os.makedirs(urdf_meshes_output_dir, exist_ok=True) + + output_path = usd_to_urdf.save_to_file( + urdf_output_path=urdf_output_path, + visualize_collision_meshes=False, + mesh_dir=urdf_meshes_output_dir, + mesh_path_prefix="", + ) + + # The current version of the usd to urdf converter creates "inf" effort, + # This has to be replaced with a max value for the urdf to be valid + # Open the file for reading and writing + with open(urdf_output_path) as file: + # Read the content of the file + content = file.read() + + # Replace all occurrences of 'inf' with '0' + content = content.replace("inf", "0.") + + # Open the file again to write the modified content + with open(urdf_output_path, "w") as file: + # Write the modified content back to the file + file.write(content) + return urdf_output_path, urdf_meshes_output_dir + + +def change_revolute_to_fixed(urdf_path: str, fixed_joints: list[str], verbose: bool = False): + """Change revolute joints to fixed joints in a URDF file. + + This function modifies a URDF file by changing specified revolute joints to fixed joints. + This is useful when you want to disable certain joints in a robot model. + + Args: + urdf_path: Path to the URDF file to modify. + fixed_joints: List of joint names to convert from revolute to fixed. + verbose: Whether to print information about the changes being made. + """ + with open(urdf_path) as file: + content = file.read() + + for joint in fixed_joints: + old_str = f'' + new_str = f'' + if verbose: + logger.warning(f"Replacing {joint} with fixed joint") + logger.warning(old_str) + logger.warning(new_str) + if old_str not in content: + logger.warning(f"Error: Could not find revolute joint named '{joint}' in URDF file") + content = content.replace(old_str, new_str) + + with open(urdf_path, "w") as file: + file.write(content) + + +def change_revolute_to_fixed_regex(urdf_path: str, fixed_joints: list[str], verbose: bool = False): + """Change revolute joints to fixed joints in a URDF file. + + This function modifies a URDF file by changing specified revolute joints to fixed joints. + This is useful when you want to disable certain joints in a robot model. + + Args: + urdf_path: Path to the URDF file to modify. + fixed_joints: List of regular expressions matching joint names to convert from revolute to fixed. + verbose: Whether to print information about the changes being made. + """ + + with open(urdf_path) as file: + content = file.read() + + # Find all revolute joints in the URDF + revolute_joints = re.findall(r'', content) + + for joint in revolute_joints: + # Check if this joint matches any of the fixed joint patterns + should_fix = any(re.match(pattern, joint) for pattern in fixed_joints) + + if should_fix: + old_str = f'' + new_str = f'' + if verbose: + logger.warning(f"Replacing {joint} with fixed joint") + logger.warning(old_str) + logger.warning(new_str) + content = content.replace(old_str, new_str) + + with open(urdf_path, "w") as file: + file.write(content) diff --git a/source/isaaclab/isaaclab/envs/mdp/actions/pink_actions_cfg.py b/source/isaaclab/isaaclab/envs/mdp/actions/pink_actions_cfg.py new file mode 100644 index 00000000000..e60bfefbcf1 --- /dev/null +++ b/source/isaaclab/isaaclab/envs/mdp/actions/pink_actions_cfg.py @@ -0,0 +1,43 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from dataclasses import MISSING + +from isaaclab.controllers.pink_ik import PinkIKControllerCfg +from isaaclab.managers.action_manager import ActionTerm, ActionTermCfg +from isaaclab.utils import configclass + +from . import pink_task_space_actions + + +@configclass +class PinkInverseKinematicsActionCfg(ActionTermCfg): + """Configuration for Pink inverse kinematics action term. + + This configuration is used to define settings for the Pink inverse kinematics action term, + which is a inverse kinematics framework. + """ + + class_type: type[ActionTerm] = pink_task_space_actions.PinkInverseKinematicsAction + """Specifies the action term class type for Pink inverse kinematics action.""" + + pink_controlled_joint_names: list[str] = MISSING + """List of joint names or regular expression patterns that specify the joints controlled by pink IK.""" + + hand_joint_names: list[str] = MISSING + """List of joint names or regular expression patterns that specify the joints controlled by hand retargeting.""" + + controller: PinkIKControllerCfg = MISSING + """Configuration for the Pink IK controller that will be used to solve the inverse kinematics.""" + + enable_gravity_compensation: bool = False + """Whether to compensate for gravity in the Pink IK controller.""" + + target_eef_link_names: dict[str, str] = MISSING + """Dictionary mapping task names to controlled link names for the Pink IK controller. + + This dictionary should map the task names (e.g., 'left_wrist', 'right_wrist') to the + corresponding link names in the URDF that will be controlled by the IK solver. + """ diff --git a/source/isaaclab/isaaclab/envs/mdp/actions/pink_task_space_actions.py b/source/isaaclab/isaaclab/envs/mdp/actions/pink_task_space_actions.py new file mode 100644 index 00000000000..cca41593af2 --- /dev/null +++ b/source/isaaclab/isaaclab/envs/mdp/actions/pink_task_space_actions.py @@ -0,0 +1,367 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from __future__ import annotations + +import torch +from collections.abc import Sequence +from typing import TYPE_CHECKING + +import warp as wp +from pink.tasks import FrameTask + +import isaaclab.utils.math as math_utils +from isaaclab.assets.articulation import Articulation +from isaaclab.controllers.pink_ik import PinkIKController +from isaaclab.controllers.pink_ik.local_frame_task import LocalFrameTask +from isaaclab.managers.action_manager import ActionTerm + +if TYPE_CHECKING: + from isaaclab.envs import ManagerBasedEnv + from isaaclab.envs.utils.io_descriptors import GenericActionIODescriptor + + from . import pink_actions_cfg + + +class PinkInverseKinematicsAction(ActionTerm): + r"""Pink Inverse Kinematics action term. + + This action term processes the action tensor and sets these setpoints in the pink IK framework. + The action tensor is ordered in the order of the tasks defined in PinkIKControllerCfg. + """ + + cfg: pink_actions_cfg.PinkInverseKinematicsActionCfg + """Configuration for the Pink Inverse Kinematics action term.""" + + _asset: Articulation + """The articulation asset to which the action term is applied.""" + + def __init__(self, cfg: pink_actions_cfg.PinkInverseKinematicsActionCfg, env: ManagerBasedEnv): + """Initialize the Pink Inverse Kinematics action term. + + Args: + cfg: The configuration for this action term. + env: The environment in which the action term will be applied. + """ + super().__init__(cfg, env) + + self._env = env + self._sim_dt = env.sim.get_physics_dt() + + # Initialize joint information + self._initialize_joint_info() + + # Initialize IK controllers + self._initialize_ik_controllers() + + # Initialize action tensors + self._raw_actions = torch.zeros(self.num_envs, self.action_dim, device=self.device) + self._processed_actions = torch.zeros_like(self._raw_actions) + + # PhysX Articulation Floating joint indices offset from IsaacLab Articulation joint indices + self._physx_floating_joint_indices_offset = 6 + + # Pre-allocate tensors for runtime use + self._initialize_helper_tensors() + + def _initialize_joint_info(self) -> None: + """Initialize joint IDs and names based on configuration.""" + # Resolve pink controlled joints + _, self._isaaclab_controlled_joint_names, self._isaaclab_controlled_joint_ids = self._asset.find_joints( + self.cfg.pink_controlled_joint_names + ) + self.cfg.controller.joint_names = self._isaaclab_controlled_joint_names + self._isaaclab_all_joint_ids = list(range(len(self._asset.joint_names))) + self.cfg.controller.all_joint_names = self._asset.joint_names + + # Resolve hand joints + _, self._hand_joint_names, self._hand_joint_ids = self._asset.find_joints(self.cfg.hand_joint_names) + + # Combine all joint information + self._controlled_joint_ids = self._isaaclab_controlled_joint_ids + self._hand_joint_ids + self._controlled_joint_names = self._isaaclab_controlled_joint_names + self._hand_joint_names + + def _initialize_ik_controllers(self) -> None: + """Initialize Pink IK controllers for all environments.""" + assert self._env.num_envs > 0, "Number of environments specified are less than 1." + + self._ik_controllers = [] + for _ in range(self._env.num_envs): + self._ik_controllers.append( + PinkIKController( + cfg=self.cfg.controller.copy(), + robot_cfg=self._env.scene.cfg.robot, + device=self.device, + controlled_joint_indices=self._isaaclab_controlled_joint_ids, + ) + ) + + def _initialize_helper_tensors(self) -> None: + """Pre-allocate tensors and cache values for performance optimization.""" + # Cache frequently used tensor versions of joint IDs to avoid repeated creation + self._controlled_joint_ids_tensor = torch.tensor(self._controlled_joint_ids, device=self.device) + + # Cache base link index to avoid string lookup every time + articulation = self._env.scene[self.cfg.controller.articulation_name] + self._base_link_idx = articulation.body_names.index(self.cfg.controller.base_link_name) + + # Pre-allocate working tensors + # Count only FrameTask instances in variable_input_tasks (not all tasks) + num_frame_tasks = sum( + 1 for task in self._ik_controllers[0].cfg.variable_input_tasks if isinstance(task, FrameTask) + ) + self._num_frame_tasks = num_frame_tasks + self._controlled_frame_poses = torch.zeros(num_frame_tasks, self.num_envs, 4, 4, device=self.device) + + # Pre-allocate tensor for base frame computations + self._base_link_frame_buffer = torch.zeros(self.num_envs, 4, 4, device=self.device) + + # ==================== Properties ==================== + + @property + def hand_joint_dim(self) -> int: + """Dimension for hand joint positions.""" + return self.cfg.controller.num_hand_joints + + @property + def position_dim(self) -> int: + """Dimension for position (x, y, z).""" + return 3 + + @property + def orientation_dim(self) -> int: + """Dimension for orientation (w, x, y, z).""" + return 4 + + @property + def pose_dim(self) -> int: + """Total pose dimension (position + orientation).""" + return self.position_dim + self.orientation_dim + + @property + def action_dim(self) -> int: + """Dimension of the action space (based on number of tasks and pose dimension).""" + # Count only FrameTask instances in variable_input_tasks + frame_tasks_count = sum( + 1 for task in self._ik_controllers[0].cfg.variable_input_tasks if isinstance(task, FrameTask) + ) + return frame_tasks_count * self.pose_dim + self.hand_joint_dim + + @property + def raw_actions(self) -> torch.Tensor: + """Get the raw actions tensor.""" + return self._raw_actions + + @property + def processed_actions(self) -> torch.Tensor: + """Get the processed actions tensor.""" + return self._processed_actions + + @property + def IO_descriptor(self) -> GenericActionIODescriptor: + """The IO descriptor of the action term. + + This descriptor is used to describe the action term of the pink inverse kinematics action. + It adds the following information to the base descriptor: + - scale: The scale of the action term. + - offset: The offset of the action term. + - clip: The clip of the action term. + - pink_controller_joint_names: The names of the pink controller joints. + - hand_joint_names: The names of the hand joints. + - controller_cfg: The configuration of the pink controller. + + Returns: + The IO descriptor of the action term. + """ + super().IO_descriptor + self._IO_descriptor.shape = (self.action_dim,) + self._IO_descriptor.dtype = str(self.raw_actions.dtype) + self._IO_descriptor.action_type = "PinkInverseKinematicsAction" + self._IO_descriptor.pink_controller_joint_names = self._isaaclab_controlled_joint_names + self._IO_descriptor.hand_joint_names = self._hand_joint_names + self._IO_descriptor.extras["controller_cfg"] = self.cfg.controller.__dict__ + return self._IO_descriptor + + # """ + # Operations. + # """ + + def process_actions(self, actions: torch.Tensor) -> None: + """Process the input actions and set targets for each task. + + Args: + actions: The input actions tensor. + """ + # Store raw actions + self._raw_actions[:] = actions + + # Extract hand joint positions directly (no cloning needed) + self._target_hand_joint_positions = actions[:, -self.hand_joint_dim :] + + # Get base link frame transformation + self.base_link_frame_in_world_rf = self._get_base_link_frame_transform() + + # Process controlled frame poses (pass original actions, no clone needed) + controlled_frame_poses = self._extract_controlled_frame_poses(actions) + transformed_poses = self._transform_poses_to_base_link_frame(controlled_frame_poses) + + # Set targets for all tasks + self._set_task_targets(transformed_poses) + + def _get_base_link_frame_transform(self) -> torch.Tensor: + """Get the base link frame transformation matrix. + + Returns: + Base link frame transformation matrix. + """ + # Get base link frame pose in world origin using cached index + articulation_data = self._env.scene[self.cfg.controller.articulation_name].data + base_link_frame_in_world_origin = wp.to_torch(articulation_data.body_link_state_w)[:, self._base_link_idx, :7] + + # Transform to environment origin frame (reuse buffer to avoid allocation) + torch.sub( + base_link_frame_in_world_origin[:, :3], + self._env.scene.env_origins, + out=self._base_link_frame_buffer[:, :3, 3], + ) + + # Copy orientation (avoid clone) + base_link_frame_quat = base_link_frame_in_world_origin[:, 3:7] + + # Create transformation matrix + return math_utils.make_pose( + self._base_link_frame_buffer[:, :3, 3], math_utils.matrix_from_quat(base_link_frame_quat) + ) + + def _extract_controlled_frame_poses(self, actions: torch.Tensor) -> torch.Tensor: + """Extract controlled frame poses from action tensor. + + Args: + actions: The action tensor. + + Returns: + Stacked controlled frame poses tensor. + """ + # Use pre-allocated tensor instead of list operations + for task_index in range(self._num_frame_tasks): + # Extract position and orientation for this task + pos_start = task_index * self.pose_dim + pos_end = pos_start + self.position_dim + quat_start = pos_end + quat_end = (task_index + 1) * self.pose_dim + + position = actions[:, pos_start:pos_end] + quaternion = actions[:, quat_start:quat_end] + + # Create pose matrix directly into pre-allocated tensor + self._controlled_frame_poses[task_index] = math_utils.make_pose( + position, math_utils.matrix_from_quat(quaternion) + ) + + return self._controlled_frame_poses + + def _transform_poses_to_base_link_frame(self, poses: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]: + """Transform poses from world frame to base link frame. + + Args: + poses: Poses in world frame. + + Returns: + Tuple of (positions, rotation_matrices) in base link frame. + """ + # Transform poses to base link frame + base_link_inv = math_utils.pose_inv(self.base_link_frame_in_world_rf) + transformed_poses = math_utils.pose_in_A_to_pose_in_B(poses, base_link_inv) + + # Extract position and rotation + positions, rotation_matrices = math_utils.unmake_pose(transformed_poses) + + return positions, rotation_matrices + + def _set_task_targets(self, transformed_poses: tuple[torch.Tensor, torch.Tensor]) -> None: + """Set targets for all tasks across all environments. + + Args: + transformed_poses: Tuple of (positions, rotation_matrices) in base link frame. + """ + positions, rotation_matrices = transformed_poses + + for env_index, ik_controller in enumerate(self._ik_controllers): + for frame_task_index, task in enumerate(ik_controller.cfg.variable_input_tasks): + if isinstance(task, LocalFrameTask): + target = task.transform_target_to_base + elif isinstance(task, FrameTask): + target = task.transform_target_to_world + else: + continue + + # Set position and rotation targets using frame_task_index + target.translation = positions[frame_task_index, env_index, :].cpu().numpy() + target.rotation = rotation_matrices[frame_task_index, env_index, :].cpu().numpy() + + task.set_target(target) + + # ==================== Action Application ==================== + + def apply_actions(self) -> None: + """Apply the computed joint positions based on the inverse kinematics solution.""" + # Compute IK solutions for all environments + ik_joint_positions = self._compute_ik_solutions() + + # Combine IK and hand joint positions + all_joint_positions = torch.cat((ik_joint_positions, self._target_hand_joint_positions), dim=1) + self._processed_actions = all_joint_positions + + # Apply gravity compensation to arm joints + if self.cfg.enable_gravity_compensation: + self._apply_gravity_compensation() + + # Apply joint position targets + self._asset.set_joint_position_target(self._processed_actions, self._controlled_joint_ids) + + def _apply_gravity_compensation(self) -> None: + """Apply gravity compensation to arm joints if not disabled in props.""" + if not self._asset.cfg.spawn.rigid_props.disable_gravity: + # Get gravity compensation forces using cached tensor + if self._asset.is_fixed_base: + gravity = torch.zeros_like( + self._asset.root_physx_view.get_gravity_compensation_forces()[:, self._controlled_joint_ids_tensor] + ) + else: + # If floating base, then need to skip the first 6 joints (base) + gravity = self._asset.root_physx_view.get_gravity_compensation_forces()[ + :, self._controlled_joint_ids_tensor + self._physx_floating_joint_indices_offset + ] + + # Apply gravity compensation to arm joints + self._asset.set_joint_effort_target(gravity, self._controlled_joint_ids) + + def _compute_ik_solutions(self) -> torch.Tensor: + """Compute IK solutions for all environments. + + Returns: + IK joint positions tensor for all environments. + """ + ik_solutions = [] + + for env_index, ik_controller in enumerate(self._ik_controllers): + # Get current joint positions for this environment + current_joint_pos = wp.to_torch(self._asset.data.joint_pos).cpu().numpy()[env_index] + + # Compute IK solution + joint_pos_des = ik_controller.compute(current_joint_pos, self._sim_dt) + ik_solutions.append(joint_pos_des) + + return torch.stack(ik_solutions) + + # ==================== Reset ==================== + + def reset(self, env_ids: Sequence[int] | None = None) -> None: + """Reset the action term for specified environments. + + Args: + env_ids: A list of environment IDs to reset. If None, all environments are reset. + """ + self._raw_actions[env_ids] = torch.zeros(self.action_dim, device=self.device) diff --git a/source/isaaclab/isaaclab/envs/mdp/events.py b/source/isaaclab/isaaclab/envs/mdp/events.py index 8b46b549d19..db55c572ec8 100644 --- a/source/isaaclab/isaaclab/envs/mdp/events.py +++ b/source/isaaclab/isaaclab/envs/mdp/events.py @@ -14,6 +14,7 @@ from __future__ import annotations +import contextlib import logging import math import re @@ -1187,24 +1188,27 @@ def reset_scene_to_default(env: ManagerBasedEnv, env_ids: torch.Tensor, reset_jo targets to default values, especially when the targets should be handled by action terms and not event terms. """ # rigid bodies - for rigid_object in env.scene.rigid_objects.values(): - # obtain default and deal with the offset for env origins - default_root_state = rigid_object.data.default_root_state[env_ids].clone() - default_root_state[:, 0:3] += env.scene.env_origins[env_ids] - # set into the physics simulation - rigid_object.write_root_pose_to_sim(default_root_state[:, :7], env_ids=env_ids) - rigid_object.write_root_velocity_to_sim(default_root_state[:, 7:], env_ids=env_ids) + # Note: rigid_objects is not supported in Newton backend + with contextlib.suppress(NotImplementedError): + for rigid_object in env.scene.rigid_objects.values(): + # obtain default and deal with the offset for env origins + default_root_state = rigid_object.data.default_root_state[env_ids].clone() + default_root_state[:, 0:3] += env.scene.env_origins[env_ids] + # set into the physics simulation + rigid_object.write_root_pose_to_sim(default_root_state[:, :7], env_ids=env_ids) + rigid_object.write_root_velocity_to_sim(default_root_state[:, 7:], env_ids=env_ids) # articulations for articulation_asset in env.scene.articulations.values(): # obtain default and deal with the offset for env origins - default_root_state = articulation_asset.data.default_root_state[env_ids].clone() - default_root_state[:, 0:3] += env.scene.env_origins[env_ids] + default_root_pose = wp.to_torch(articulation_asset.data.default_root_pose)[env_ids].clone() + default_root_vel = wp.to_torch(articulation_asset.data.default_root_vel)[env_ids].clone() + default_root_pose[:, 0:3] += env.scene.env_origins[env_ids] # set into the physics simulation - articulation_asset.write_root_pose_to_sim(default_root_state[:, :7], env_ids=env_ids) - articulation_asset.write_root_velocity_to_sim(default_root_state[:, 7:], env_ids=env_ids) + articulation_asset.write_root_pose_to_sim(default_root_pose, env_ids=env_ids) + articulation_asset.write_root_velocity_to_sim(default_root_vel, env_ids=env_ids) # obtain default joint positions - default_joint_pos = articulation_asset.data.default_joint_pos[env_ids].clone() - default_joint_vel = articulation_asset.data.default_joint_vel[env_ids].clone() + default_joint_pos = wp.to_torch(articulation_asset.data.default_joint_pos)[env_ids].clone() + default_joint_vel = wp.to_torch(articulation_asset.data.default_joint_vel)[env_ids].clone() # set into the physics simulation articulation_asset.write_joint_state_to_sim(default_joint_pos, default_joint_vel, env_ids=env_ids) # reset joint targets if required diff --git a/source/isaaclab/isaaclab/envs/mdp/observations.py b/source/isaaclab/isaaclab/envs/mdp/observations.py index 9bfdacc3c85..bdd846612f2 100644 --- a/source/isaaclab/isaaclab/envs/mdp/observations.py +++ b/source/isaaclab/isaaclab/envs/mdp/observations.py @@ -86,7 +86,7 @@ def root_pos_w(env: ManagerBasedEnv, asset_cfg: SceneEntityCfg = SceneEntityCfg( """Asset root position in the environment frame.""" # extract the used quantities (to enable type-hinting) asset: RigidObject = env.scene[asset_cfg.name] - return wp.to_torch(asset.data.root_pos_w) - wp.to_torch(env.scene.env_origins) + return wp.to_torch(asset.data.root_pos_w) - env.scene.env_origins @generic_io_descriptor( diff --git a/source/isaaclab/isaaclab/sim/schemas/schemas.py b/source/isaaclab/isaaclab/sim/schemas/schemas.py index 63870a06048..ab3e03a7b01 100644 --- a/source/isaaclab/isaaclab/sim/schemas/schemas.py +++ b/source/isaaclab/isaaclab/sim/schemas/schemas.py @@ -11,7 +11,7 @@ # import omni.physx.scripts.utils as physx_utils # from omni.physx.scripts import deformableUtils as deformable_utils -from pxr import Usd, UsdPhysics +from pxr import Gf, Usd, UsdPhysics from isaaclab.sim.utils.stage import get_current_stage from isaaclab.utils.string import to_camel_case @@ -178,8 +178,16 @@ def modify_articulation_root_properties( ) # create a fixed joint between the root link and the world frame - # physx_utils.createJoint(stage=stage, joint_type="Fixed", from_prim=None, to_prim=articulation_prim) - # TODO: fix this + # NOTE: We intentionally connect only one body so the joint is attached to the world. + fixed_joint_prim_path = f"{prim_path}/FixedJoint" + fixed_joint = UsdPhysics.FixedJoint.Define(stage, fixed_joint_prim_path) + fixed_joint.CreateBody1Rel().SetTargets([articulation_prim.GetPath()]) + fixed_joint.CreateJointEnabledAttr().Set(True) + # keep joint frames at the body origin to avoid snapping + fixed_joint.CreateLocalPos0Attr().Set((0.0, 0.0, 0.0)) + fixed_joint.CreateLocalRot0Attr().Set(Gf.Quatf(1.0, 0.0, 0.0, 0.0)) + fixed_joint.CreateLocalPos1Attr().Set((0.0, 0.0, 0.0)) + fixed_joint.CreateLocalRot1Attr().Set(Gf.Quatf(1.0, 0.0, 0.0, 0.0)) # Having a fixed joint on a rigid body is not treated as "fixed base articulation". # instead, it is treated as a part of the maximal coordinate tree. diff --git a/source/isaaclab/isaaclab/utils/io/torchscript.py b/source/isaaclab/isaaclab/utils/io/torchscript.py new file mode 100644 index 00000000000..f0607680d52 --- /dev/null +++ b/source/isaaclab/isaaclab/utils/io/torchscript.py @@ -0,0 +1,39 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + + +"""TorchScript I/O utilities.""" + +import os +import torch + + +def load_torchscript_model(model_path: str, device: str = "cpu") -> torch.nn.Module: + """Load a TorchScript model from the specified path. + + This function only loads TorchScript models (.pt or .pth files created with torch.jit.save). + It will not work with raw PyTorch checkpoints (.pth files created with torch.save). + + Args: + model_path (str): Path to the TorchScript model file (.pt or .pth) + device (str, optional): Device to load the model on. Defaults to 'cpu'. + + Returns: + torch.nn.Module: The loaded TorchScript model in evaluation mode + + Raises: + FileNotFoundError: If the model file does not exist + """ + if not os.path.exists(model_path): + raise FileNotFoundError(f"TorchScript model file not found: {model_path}") + + try: + model = torch.jit.load(model_path, map_location=device) + model.eval() + print(f"Successfully loaded TorchScript model from {model_path}") + return model + except Exception as e: + print(f"Error loading TorchScript model: {e}") + return None diff --git a/source/isaaclab/isaaclab/utils/string.py b/source/isaaclab/isaaclab/utils/string.py index 7a68df93aab..abde2ab0ec9 100644 --- a/source/isaaclab/isaaclab/utils/string.py +++ b/source/isaaclab/isaaclab/utils/string.py @@ -272,7 +272,10 @@ def resolve_matching_names( def resolve_matching_names_values( - data: dict[str, Any], list_of_strings: Sequence[str], preserve_order: bool = False + data: dict[str, Any], + list_of_strings: Sequence[str], + preserve_order: bool = False, + match_all: bool = True, ) -> tuple[list[int], list[str], list[Any]]: """Match a list of regular expressions in a dictionary against a list of strings and return the matched indices, names, and values. @@ -293,6 +296,7 @@ def resolve_matching_names_values( data: A dictionary of regular expressions and values to match the strings in the list. list_of_strings: A list of strings to match. preserve_order: Whether to preserve the order of the query keys in the returned values. Defaults to False. + match_all: Whether to require that all keys in the dictionary get matched. Defaults to True. Returns: A tuple of lists containing the matched indices, names, and values. @@ -300,7 +304,7 @@ def resolve_matching_names_values( Raises: TypeError: When the input argument :attr:`data` is not a dictionary. ValueError: When multiple matches are found for a string in the dictionary. - ValueError: When not all regular expressions in the data keys are matched. + ValueError: When not all regular expressions in the data keys are matched (if match_all is True). """ # check valid input if not isinstance(data, dict): @@ -353,8 +357,8 @@ def resolve_matching_names_values( index_list = index_list_reorder names_list = names_list_reorder values_list = values_list_reorder - # check that all regular expressions are matched - if not all(keys_match_found): + # check that all regular expressions are matched (only if match_all is True) + if match_all and not all(keys_match_found): # make this print nicely aligned for debugging msg = "\n" for key, value in zip(data.keys(), keys_match_found): diff --git a/source/isaaclab/test/controllers/simplified_test_robot.urdf b/source/isaaclab/test/controllers/simplified_test_robot.urdf new file mode 100644 index 00000000000..b66ce68324b --- /dev/null +++ b/source/isaaclab/test/controllers/simplified_test_robot.urdf @@ -0,0 +1,191 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/source/isaaclab/test/controllers/test_controller_utils.py b/source/isaaclab/test/controllers/test_controller_utils.py new file mode 100644 index 00000000000..c967c63bc1d --- /dev/null +++ b/source/isaaclab/test/controllers/test_controller_utils.py @@ -0,0 +1,662 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Test cases for Isaac Lab controller utilities.""" + +"""Launch Isaac Sim Simulator first.""" + +from isaaclab.app import AppLauncher + +# launch omniverse app +simulation_app = AppLauncher(headless=True).app + +import os + +# Import the function to test +import tempfile +import torch + +import pytest + +from isaaclab.controllers.utils import change_revolute_to_fixed, change_revolute_to_fixed_regex +from isaaclab.utils.assets import ISAACLAB_NUCLEUS_DIR, retrieve_file_path +from isaaclab.utils.io.torchscript import load_torchscript_model + + +@pytest.fixture +def mock_urdf_content(): + """Create mock URDF content for testing.""" + return """ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +""" + + +@pytest.fixture +def test_urdf_file(mock_urdf_content): + """Create a temporary URDF file for testing.""" + # Create a temporary directory for test files + test_dir = tempfile.mkdtemp() + + # Create the test URDF file + test_urdf_path = os.path.join(test_dir, "test_robot.urdf") + with open(test_urdf_path, "w") as f: + f.write(mock_urdf_content) + + yield test_urdf_path + + # Clean up the temporary directory and all its contents + import shutil + + shutil.rmtree(test_dir) + + +# ============================================================================= +# Test cases for change_revolute_to_fixed function +# ============================================================================= + + +def test_single_joint_conversion(test_urdf_file, mock_urdf_content): + """Test converting a single revolute joint to fixed.""" + # Test converting shoulder_to_elbow joint + fixed_joints = ["shoulder_to_elbow"] + change_revolute_to_fixed(test_urdf_file, fixed_joints) + + # Read the modified file + with open(test_urdf_file) as f: + modified_content = f.read() + + # Check that the joint was converted + assert '' in modified_content + assert '' not in modified_content + + # Check that other revolute joints remain unchanged + assert '' in modified_content + assert '' in modified_content + + +def test_multiple_joints_conversion(test_urdf_file, mock_urdf_content): + """Test converting multiple revolute joints to fixed.""" + # Test converting multiple joints + fixed_joints = ["base_to_shoulder", "elbow_to_wrist"] + change_revolute_to_fixed(test_urdf_file, fixed_joints) + + # Read the modified file + with open(test_urdf_file) as f: + modified_content = f.read() + + # Check that both joints were converted + assert '' in modified_content + assert '' in modified_content + assert '' not in modified_content + assert '' not in modified_content + + # Check that the middle joint remains unchanged + assert '' in modified_content + + +def test_non_existent_joint(test_urdf_file, mock_urdf_content): + """Test behavior when trying to convert a non-existent joint.""" + # Try to convert a joint that doesn't exist + fixed_joints = ["non_existent_joint"] + change_revolute_to_fixed(test_urdf_file, fixed_joints) + + # Read the modified file + with open(test_urdf_file) as f: + modified_content = f.read() + + # Check that the file content remains unchanged + assert modified_content == mock_urdf_content + + +def test_mixed_existent_and_non_existent_joints(test_urdf_file, mock_urdf_content): + """Test converting a mix of existent and non-existent joints.""" + # Try to convert both existent and non-existent joints + fixed_joints = ["base_to_shoulder", "non_existent_joint", "elbow_to_wrist"] + change_revolute_to_fixed(test_urdf_file, fixed_joints) + + # Read the modified file + with open(test_urdf_file) as f: + modified_content = f.read() + + # Check that existent joints were converted + assert '' in modified_content + assert '' in modified_content + + # Check that non-existent joint didn't cause issues + assert '' not in modified_content + + +def test_already_fixed_joint(test_urdf_file, mock_urdf_content): + """Test behavior when trying to convert an already fixed joint.""" + # Try to convert a joint that is already fixed + fixed_joints = ["wrist_to_gripper"] + change_revolute_to_fixed(test_urdf_file, fixed_joints) + + # Read the modified file + with open(test_urdf_file) as f: + modified_content = f.read() + + # Check that the file content remains unchanged (no conversion happened) + assert modified_content == mock_urdf_content + + +def test_empty_joints_list(test_urdf_file, mock_urdf_content): + """Test behavior when passing an empty list of joints.""" + # Try to convert with empty list + fixed_joints = [] + change_revolute_to_fixed(test_urdf_file, fixed_joints) + + # Read the modified file + with open(test_urdf_file) as f: + modified_content = f.read() + + # Check that the file content remains unchanged + assert modified_content == mock_urdf_content + + +def test_file_not_found(test_urdf_file): + """Test behavior when URDF file doesn't exist.""" + non_existent_path = os.path.join(os.path.dirname(test_urdf_file), "non_existent.urdf") + fixed_joints = ["base_to_shoulder"] + + # Should raise FileNotFoundError + with pytest.raises(FileNotFoundError): + change_revolute_to_fixed(non_existent_path, fixed_joints) + + +def test_preserve_other_content(test_urdf_file): + """Test that other content in the URDF file is preserved.""" + fixed_joints = ["shoulder_to_elbow"] + change_revolute_to_fixed(test_urdf_file, fixed_joints) + + # Read the modified file + with open(test_urdf_file) as f: + modified_content = f.read() + + # Check that other content is preserved + assert '' in modified_content + assert '' in modified_content + assert '' in modified_content + assert '' in modified_content + assert '' in modified_content + assert '' in modified_content + + # Check that the fixed joint remains unchanged + assert '' in modified_content + + +def test_joint_attributes_preserved(test_urdf_file): + """Test that joint attributes other than type are preserved.""" + fixed_joints = ["base_to_shoulder"] + change_revolute_to_fixed(test_urdf_file, fixed_joints) + + # Read the modified file + with open(test_urdf_file) as f: + modified_content = f.read() + + # Check that the joint was converted but other attributes preserved + assert '' in modified_content + assert '' in modified_content + assert '' in modified_content + assert '' in modified_content + assert '' in modified_content + assert '' in modified_content + + +# ============================================================================= +# Test cases for change_revolute_to_fixed_regex function +# ============================================================================= + + +def test_regex_single_joint_conversion(test_urdf_file, mock_urdf_content): + """Test converting a single revolute joint to fixed using regex pattern.""" + # Test converting shoulder_to_elbow joint using exact match + fixed_joints = ["shoulder_to_elbow"] + change_revolute_to_fixed_regex(test_urdf_file, fixed_joints) + + # Read the modified file + with open(test_urdf_file) as f: + modified_content = f.read() + + # Check that the joint was converted + assert '' in modified_content + assert '' not in modified_content + + # Check that other revolute joints remain unchanged + assert '' in modified_content + assert '' in modified_content + + +def test_regex_pattern_matching(test_urdf_file, mock_urdf_content): + """Test converting joints using regex patterns.""" + # Test converting joints that contain "to" in their name + fixed_joints = [r".*to.*"] + change_revolute_to_fixed_regex(test_urdf_file, fixed_joints) + + # Read the modified file + with open(test_urdf_file) as f: + modified_content = f.read() + + # Check that all joints with "to" in the name were converted + assert '' in modified_content + assert '' in modified_content + assert '' in modified_content + + # Check that the fixed joint remains unchanged + assert '' in modified_content + + +def test_regex_multiple_patterns(test_urdf_file, mock_urdf_content): + """Test converting joints using multiple regex patterns.""" + # Test converting joints that start with "base" or end with "wrist" + fixed_joints = [r"^base.*", r".*wrist$"] + change_revolute_to_fixed_regex(test_urdf_file, fixed_joints) + + # Read the modified file + with open(test_urdf_file) as f: + modified_content = f.read() + + # Check that matching joints were converted + assert '' in modified_content + assert '' in modified_content + + # Check that non-matching joints remain unchanged + assert '' in modified_content + + +def test_regex_case_sensitive_matching(test_urdf_file, mock_urdf_content): + """Test that regex matching is case sensitive.""" + # Test with uppercase pattern that won't match lowercase joint names + fixed_joints = [r".*TO.*"] + change_revolute_to_fixed_regex(test_urdf_file, fixed_joints) + + # Read the modified file + with open(test_urdf_file) as f: + modified_content = f.read() + + # Check that no joints were converted (case sensitive) + assert modified_content == mock_urdf_content + + +def test_regex_partial_word_matching(test_urdf_file, mock_urdf_content): + """Test converting joints using partial word matching.""" + # Test converting joints that contain "shoulder" in their name + fixed_joints = [r".*shoulder.*"] + change_revolute_to_fixed_regex(test_urdf_file, fixed_joints) + + # Read the modified file + with open(test_urdf_file) as f: + modified_content = f.read() + + # Check that shoulder-related joints were converted + assert '' in modified_content + assert '' in modified_content + + # Check that other joints remain unchanged + assert '' in modified_content + + +def test_regex_no_matches(test_urdf_file, mock_urdf_content): + """Test behavior when regex patterns don't match any joints.""" + # Test with pattern that won't match any joint names + fixed_joints = [r"^nonexistent.*"] + change_revolute_to_fixed_regex(test_urdf_file, fixed_joints) + + # Read the modified file + with open(test_urdf_file) as f: + modified_content = f.read() + + # Check that the file content remains unchanged + assert modified_content == mock_urdf_content + + +def test_regex_empty_patterns_list(test_urdf_file, mock_urdf_content): + """Test behavior when passing an empty list of regex patterns.""" + # Try to convert with empty list + fixed_joints = [] + change_revolute_to_fixed_regex(test_urdf_file, fixed_joints) + + # Read the modified file + with open(test_urdf_file) as f: + modified_content = f.read() + + # Check that the file content remains unchanged + assert modified_content == mock_urdf_content + + +def test_regex_file_not_found(test_urdf_file): + """Test behavior when URDF file doesn't exist for regex function.""" + non_existent_path = os.path.join(os.path.dirname(test_urdf_file), "non_existent.urdf") + fixed_joints = [r".*to.*"] + + # Should raise FileNotFoundError + with pytest.raises(FileNotFoundError): + change_revolute_to_fixed_regex(non_existent_path, fixed_joints) + + +def test_regex_preserve_other_content(test_urdf_file): + """Test that other content in the URDF file is preserved with regex function.""" + fixed_joints = [r".*shoulder.*"] + change_revolute_to_fixed_regex(test_urdf_file, fixed_joints) + + # Read the modified file + with open(test_urdf_file) as f: + modified_content = f.read() + + # Check that other content is preserved + assert '' in modified_content + assert '' in modified_content + assert '' in modified_content + assert '' in modified_content + assert '' in modified_content + assert '' in modified_content + + # Check that the fixed joint remains unchanged + assert '' in modified_content + + +def test_regex_joint_attributes_preserved(test_urdf_file): + """Test that joint attributes other than type are preserved with regex function.""" + fixed_joints = [r"^base.*"] + change_revolute_to_fixed_regex(test_urdf_file, fixed_joints) + + # Read the modified file + with open(test_urdf_file) as f: + modified_content = f.read() + + # Check that the joint was converted but other attributes preserved + assert '' in modified_content + assert '' in modified_content + assert '' in modified_content + assert '' in modified_content + assert '' in modified_content + assert '' in modified_content + + +def test_regex_complex_pattern(test_urdf_file, mock_urdf_content): + """Test converting joints using a complex regex pattern.""" + # Test converting joints that have "to" and end with a word starting with "w" + fixed_joints = [r".*to.*w.*"] + change_revolute_to_fixed_regex(test_urdf_file, fixed_joints) + + # Read the modified file + with open(test_urdf_file) as f: + modified_content = f.read() + + # Check that matching joints were converted + assert '' in modified_content + assert '' in modified_content + + # Check that non-matching joints remain unchanged + assert '' in modified_content + + +def test_regex_already_fixed_joint(test_urdf_file, mock_urdf_content): + """Test behavior when regex pattern matches an already fixed joint.""" + # Try to convert joints that contain "gripper" (which is already fixed) + fixed_joints = [r".*gripper.*"] + change_revolute_to_fixed_regex(test_urdf_file, fixed_joints) + + # Read the modified file + with open(test_urdf_file) as f: + modified_content = f.read() + + # Check that the file content remains unchanged (no conversion happened) + assert modified_content == mock_urdf_content + + +def test_regex_special_characters(test_urdf_file, mock_urdf_content): + """Test regex patterns with special characters.""" + # Test with pattern that includes special regex characters + fixed_joints = [r".*to.*"] # This should match joints with "to" + change_revolute_to_fixed_regex(test_urdf_file, fixed_joints) + + # Read the modified file + with open(test_urdf_file) as f: + modified_content = f.read() + + # Check that joints with "to" were converted + assert '' in modified_content + assert '' in modified_content + assert '' in modified_content + + # Check that the fixed joint remains unchanged + assert '' in modified_content + + +# ============================================================================= +# Test cases for load_torchscript_model function +# ============================================================================= + + +@pytest.fixture +def policy_model_path(): + """Path to the test TorchScript model.""" + _policy_path = f"{ISAACLAB_NUCLEUS_DIR}/Policies/Agile/agile_locomotion.pt" + return retrieve_file_path(_policy_path) + + +def test_load_torchscript_model_success(policy_model_path): + """Test successful loading of a TorchScript model.""" + model = load_torchscript_model(policy_model_path) + + # Check that model was loaded successfully + assert model is not None + assert isinstance(model, torch.nn.Module) + + # Check that model is in evaluation mode + assert model.training is False + + +def test_load_torchscript_model_cpu_device(policy_model_path): + """Test loading TorchScript model on CPU device.""" + model = load_torchscript_model(policy_model_path, device="cpu") + + # Check that model was loaded successfully + assert model is not None + assert isinstance(model, torch.nn.Module) + + # Check that model is in evaluation mode + assert model.training is False + + +def test_load_torchscript_model_cuda_device(policy_model_path): + """Test loading TorchScript model on CUDA device if available.""" + if torch.cuda.is_available(): + model = load_torchscript_model(policy_model_path, device="cuda") + + # Check that model was loaded successfully + assert model is not None + assert isinstance(model, torch.nn.Module) + + # Check that model is in evaluation mode + assert model.training is False + else: + # Skip test if CUDA is not available + pytest.skip("CUDA not available") + + +def test_load_torchscript_model_file_not_found(): + """Test behavior when TorchScript model file doesn't exist.""" + non_existent_path = "non_existent_model.pt" + + # Should raise FileNotFoundError + with pytest.raises(FileNotFoundError): + load_torchscript_model(non_existent_path) + + +def test_load_torchscript_model_invalid_file(): + """Test behavior when trying to load an invalid TorchScript file.""" + # Create a temporary file with invalid content + import tempfile + + with tempfile.NamedTemporaryFile(suffix=".pt", delete=False) as temp_file: + temp_file.write(b"invalid torchscript content") + temp_file_path = temp_file.name + + try: + # Should handle the error gracefully and return None + model = load_torchscript_model(temp_file_path) + assert model is None + finally: + # Clean up the temporary file + os.unlink(temp_file_path) + + +def test_load_torchscript_model_empty_file(): + """Test behavior when trying to load an empty TorchScript file.""" + # Create a temporary empty file + import tempfile + + with tempfile.NamedTemporaryFile(suffix=".pt", delete=False) as temp_file: + temp_file_path = temp_file.name + + try: + # Should handle the error gracefully and return None + model = load_torchscript_model(temp_file_path) + assert model is None + finally: + # Clean up the temporary file + os.unlink(temp_file_path) + + +def test_load_torchscript_model_different_device_mapping(policy_model_path): + """Test loading model with different device mapping.""" + # Test with specific device mapping + model = load_torchscript_model(policy_model_path, device="cpu") + + # Check that model was loaded successfully + assert model is not None + assert isinstance(model, torch.nn.Module) + + +def test_load_torchscript_model_evaluation_mode(policy_model_path): + """Test that loaded model is in evaluation mode.""" + model = load_torchscript_model(policy_model_path) + + # Check that model is in evaluation mode + assert model.training is False + + # Verify we can set it to training mode and back + model.train() + assert model.training is True + model.eval() + assert model.training is False + + +def test_load_torchscript_model_inference_capability(policy_model_path): + """Test that loaded model can perform inference.""" + model = load_torchscript_model(policy_model_path) + + # Check that model was loaded successfully + assert model is not None + + # Try to create a dummy input tensor (actual input shape depends on the model) + # This is a basic test to ensure the model can handle tensor inputs + try: + # Create a dummy input tensor (adjust size based on expected input) + dummy_input = torch.randn(1, 75) # Adjust dimensions as needed + + # Try to run inference (this might fail if input shape is wrong, but shouldn't crash) + with torch.no_grad(): + try: + output = model(dummy_input) + # If successful, check that output is a tensor + assert isinstance(output, torch.Tensor) + except (RuntimeError, ValueError): + # Expected if input shape doesn't match model expectations + # This is acceptable for this test + pass + except Exception: + # If model doesn't accept this input format, that's okay for this test + # The main goal is to ensure the model loads without crashing + pass + + +def test_load_torchscript_model_error_handling(): + """Test error handling when loading fails.""" + # Create a temporary file that will cause a loading error + import tempfile + + with tempfile.NamedTemporaryFile(suffix=".pt", delete=False) as temp_file: + temp_file.write(b"definitely not a torchscript model") + temp_file_path = temp_file.name + + try: + # Should handle the error gracefully and return None + model = load_torchscript_model(temp_file_path) + assert model is None + finally: + # Clean up the temporary file + os.unlink(temp_file_path) diff --git a/source/isaaclab/test/controllers/test_ik_configs/pink_ik_g1_test_configs.yaml b/source/isaaclab/test/controllers/test_ik_configs/pink_ik_g1_test_configs.yaml new file mode 100644 index 00000000000..b2224713dd3 --- /dev/null +++ b/source/isaaclab/test/controllers/test_ik_configs/pink_ik_g1_test_configs.yaml @@ -0,0 +1,149 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +tolerances: + position: 0.003 + pd_position: 0.003 + rotation: 0.017 + check_errors: true +# Pose format: position [x, y, z] and orientation [qw, qx, qy, qz]. +# Time values are in seconds +allowed_time_to_settle: 2 +tests: + pure_x_translation: + allowed_time_per_motion: 0.9 + left_hand_pose: + - position: [-0.18, 0.1, 0.8] + orientation: [0.7071, 0.0, 0.0, 0.7071] + - position: [-0.18, 0.14, 0.8] + orientation: [0.7071, 0.0, 0.0, 0.7071] + - position: [-0.18, 0.18, 0.8] + orientation: [0.7071, 0.0, 0.0, 0.7071] + - position: [-0.18, 0.14, 0.8] + orientation: [0.7071, 0.0, 0.0, 0.7071] + right_hand_pose: + - position: [0.18, 0.1, 0.8] + orientation: [0.7071, 0.0, 0.0, 0.7071] + - position: [0.18, 0.14, 0.8] + orientation: [0.7071, 0.0, 0.0, 0.7071] + - position: [0.18, 0.18, 0.8] + orientation: [0.7071, 0.0, 0.0, 0.7071] + - position: [0.18, 0.14, 0.8] + orientation: [0.7071, 0.0, 0.0, 0.7071] + requires_waist_bending: false + repeat: 2 + + pure_y_translation: + allowed_time_per_motion: 0.8 + left_hand_pose: + - position: [-0.15, 0.1, 0.8] + orientation: [0.7071, 0.0, 0.0, 0.7071] + - position: [-0.2, 0.1, 0.8] + orientation: [0.7071, 0.0, 0.0, 0.7071] + - position: [-0.25, 0.1, 0.8] + orientation: [0.7071, 0.0, 0.0, 0.7071] + right_hand_pose: + - position: [0.25, 0.1, 0.8] + orientation: [0.7071, 0.0, 0.0, 0.7071] + - position: [0.2, 0.1, 0.8] + orientation: [0.7071, 0.0, 0.0, 0.7071] + - position: [0.15, 0.1, 0.8] + orientation: [0.7071, 0.0, 0.0, 0.7071] + requires_waist_bending: false + repeat: 2 + + pure_z_translation: + allowed_time_per_motion: 0.8 + left_hand_pose: + - position: [-0.18, 0.1, 0.8] + orientation: [0.7071, 0.0, 0.0, 0.7071] + - position: [-0.18, 0.1, 0.85] + orientation: [0.7071, 0.0, 0.0, 0.7071] + - position: [-0.18, 0.1, 0.9] + orientation: [0.7071, 0.0, 0.0, 0.7071] + right_hand_pose: + - position: [0.18, 0.1, 0.8] + orientation: [0.7071, 0.0, 0.0, 0.7071] + - position: [0.18, 0.1, 0.85] + orientation: [0.7071, 0.0, 0.0, 0.7071] + - position: [0.18, 0.1, 0.9] + orientation: [0.7071, 0.0, 0.0, 0.7071] + requires_waist_bending: false + repeat: 2 + + diagonal_translation: + allowed_time_per_motion: 1.0 + left_hand_pose: + - position: [-0.18, 0.1, 0.8] + orientation: [0.7071, 0.0, 0.0, 0.7071] + - position: [-0.23, 0.15, 0.85] + orientation: [0.7071, 0.0, 0.0, 0.7071] + right_hand_pose: + - position: [0.18, 0.1, 0.8] + orientation: [0.7071, 0.0, 0.0, 0.7071] + - position: [0.23, 0.15, 0.85] + orientation: [0.7071, 0.0, 0.0, 0.7071] + requires_waist_bending: false + repeat: 2 + + pure_roll_rotation: + allowed_time_per_motion: 0.8 + left_hand_pose: + - position: [-0.18, 0.1, 0.8] + orientation: [0.7071, 0.0, 0.0, 0.7071] + - position: [-0.18, 0.1, 0.8] + orientation: [0.9239, 0.0, 0.0, 0.3827] + right_hand_pose: + - position: [0.18, 0.1, 0.8] + orientation: [0.7071, 0.0, 0.0, 0.7071] + - position: [0.18, 0.1, 0.8] + orientation: [0.9239, 0.0, 0.0, 0.3827] + requires_waist_bending: false + repeat: 2 + + pure_pitch_rotation: + allowed_time_per_motion: 0.8 + left_hand_pose: + - position: [-0.18, 0.1, 0.8] + orientation: [0.7071, 0.0, 0.0, 0.7071] + - position: [-0.18, 0.1, 0.8] + orientation: [0.6964, 0.1228, -0.1228, 0.6964] + right_hand_pose: + - position: [0.18, 0.1, 0.8] + orientation: [0.7071, 0.0, 0.0, 0.7071] + - position: [0.18, 0.1, 0.8] + orientation: [0.6964, 0.1228, -0.1228, 0.6964] + requires_waist_bending: false + repeat: 2 + + pure_yaw_rotation: + allowed_time_per_motion: 0.8 + left_hand_pose: + - position: [-0.18, 0.1, 0.8] + orientation: [0.7071, 0.0, 0.0, 0.7071] + - position: [-0.18, 0.1, 0.8] + orientation: [0.6964, 0.1228, 0.1228, 0.6964] + right_hand_pose: + - position: [0.18, 0.1, 0.8] + orientation: [0.7071, 0.0, 0.0, 0.7071] + - position: [0.18, 0.1, 0.8] + orientation: [0.6964, 0.1228, 0.1228, 0.6964] + requires_waist_bending: false + repeat: 2 + + compound_rotation: + allowed_time_per_motion: 0.8 + left_hand_pose: + - position: [-0.18, 0.1, 0.8] + orientation: [0.7071, 0.0, 0.0, 0.7071] + - position: [-0.18, 0.1, 0.8] + orientation: [0.7848, 0.1179, 0.0292, 0.6072] + right_hand_pose: + - position: [0.18, 0.1, 0.8] + orientation: [0.7071, 0.0, 0.0, 0.7071] + - position: [0.18, 0.1, 0.8] + orientation: [0.7848, 0.1179, 0.0292, 0.6072] + requires_waist_bending: false + repeat: 2 diff --git a/source/isaaclab/test/controllers/test_ik_configs/pink_ik_gr1_test_configs.yaml b/source/isaaclab/test/controllers/test_ik_configs/pink_ik_gr1_test_configs.yaml new file mode 100644 index 00000000000..8faa95fc61b --- /dev/null +++ b/source/isaaclab/test/controllers/test_ik_configs/pink_ik_gr1_test_configs.yaml @@ -0,0 +1,107 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +tolerances: + position: 0.001 + pd_position: 0.001 + rotation: 0.02 + check_errors: true +# Pose format: position [x, y, z] and orientation [qw, qx, qy, qz]. +# Poses are specified relative to the robot base frame (not world frame). +# Time values are in seconds +allowed_time_to_settle: 0.25 +tests: + vertical_movement: + left_hand_pose: + - position: [-0.23, 0.28, 1.1] + orientation: [0.5, 0.5, -0.5, 0.5] + - position: [-0.23, 0.32, 1.2] + orientation: [0.5, 0.5, -0.5, 0.5] + right_hand_pose: + - position: [0.23, 0.28, 1.1] + orientation: [0.5, 0.5, -0.5, 0.5] + - position: [0.23, 0.32, 1.2] + orientation: [0.5, 0.5, -0.5, 0.5] + allowed_time_per_motion: 0.4 + repeat: 2 + requires_waist_bending: false + stay_still: + left_hand_pose: + - position: [-0.23, 0.28, 1.1] + orientation: [0.5, 0.5, -0.5, 0.5] + - position: [-0.23, 0.28, 1.1] + orientation: [0.5, 0.5, -0.5, 0.5] + right_hand_pose: + - position: [0.23, 0.28, 1.1] + orientation: [0.5, 0.5, -0.5, 0.5] + - position: [0.23, 0.28, 1.1] + orientation: [0.5, 0.5, -0.5, 0.5] + allowed_time_per_motion: 0.4 + repeat: 4 + requires_waist_bending: false + horizontal_movement: + left_hand_pose: + - position: [-0.23, 0.28, 1.1] + orientation: [0.5, 0.5, -0.5, 0.5] + - position: [-0.13, 0.32, 1.1] + orientation: [0.5, 0.5, -0.5, 0.5] + right_hand_pose: + - position: [0.23, 0.28, 1.1] + orientation: [0.5, 0.5, -0.5, 0.5] + - position: [0.13, 0.32, 1.1] + orientation: [0.5, 0.5, -0.5, 0.5] + allowed_time_per_motion: 0.4 + repeat: 2 + requires_waist_bending: false + horizontal_small_movement: + left_hand_pose: + - position: [-0.23, 0.28, 1.1] + orientation: [0.5, 0.5, -0.5, 0.5] + - position: [-0.22, 0.32, 1.1] + orientation: [0.5, 0.5, -0.5, 0.5] + right_hand_pose: + - position: [0.23, 0.28, 1.1] + orientation: [0.5, 0.5, -0.5, 0.5] + - position: [0.22, 0.32, 1.1] + orientation: [0.5, 0.5, -0.5, 0.5] + allowed_time_per_motion: 0.4 + repeat: 2 + requires_waist_bending: false + forward_waist_bending_movement: + left_hand_pose: + - position: [-0.23, 0.28, 1.1] + orientation: [0.5, 0.5, -0.5, 0.5] + - position: [-0.23, 0.5, 1.05] + orientation: [0.5, 0.5, -0.5, 0.5] + right_hand_pose: + - position: [0.23, 0.28, 1.1] + orientation: [0.5, 0.5, -0.5, 0.5] + - position: [0.23, 0.5, 1.05] + orientation: [0.5, 0.5, -0.5, 0.5] + allowed_time_per_motion: 1.25 + repeat: 3 + requires_waist_bending: true + rotation_movements: + left_hand_pose: + - position: [-0.23, 0.28, 1.1] + orientation: [0.5, 0.5, -0.5, 0.5] + - position: [-0.23, 0.32, 1.1] + orientation: [0.7071, 0.7071, 0.0, 0.0] + - position: [-0.23, 0.28, 1.1] + orientation: [0.5, 0.5, -0.5, 0.5] + - position: [-0.23, 0.32, 1.1] + orientation: [0.0, 0.0, -0.7071, 0.7071] + right_hand_pose: + - position: [0.23, 0.28, 1.1] + orientation: [0.5, 0.5, -0.5, 0.5] + - position: [0.23, 0.32, 1.1] + orientation: [0.0, 0.0, -0.7071, 0.7071] + - position: [0.23, 0.28, 1.1] + orientation: [0.5, 0.5, -0.5, 0.5] + - position: [0.23, 0.32, 1.1] + orientation: [0.7071, 0.7071, 0.0, 0.0] + allowed_time_per_motion: 0.5 + repeat: 2 + requires_waist_bending: false diff --git a/source/isaaclab/test/controllers/test_local_frame_task.py b/source/isaaclab/test/controllers/test_local_frame_task.py new file mode 100644 index 00000000000..25aa6cb8ec3 --- /dev/null +++ b/source/isaaclab/test/controllers/test_local_frame_task.py @@ -0,0 +1,482 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Test cases for LocalFrameTask class.""" + +# Import pinocchio in the main script to force the use of the dependencies installed by IsaacLab and not the one installed by Isaac Sim +# pinocchio is required by the Pink IK controller +import sys + +if sys.platform != "win32": + import pinocchio # noqa: F401 + +from isaaclab.app import AppLauncher + +# launch omniverse app +simulation_app = AppLauncher(headless=True).app + +import numpy as np +from pathlib import Path + +import pinocchio as pin +import pytest + +from isaaclab.controllers.pink_ik.local_frame_task import LocalFrameTask +from isaaclab.controllers.pink_ik.pink_kinematics_configuration import PinkKinematicsConfiguration + +# class TestLocalFrameTask: +# """Test suite for LocalFrameTask class.""" + + +@pytest.fixture +def urdf_path(): + """Path to test URDF file.""" + return Path(__file__).parent / "urdfs" / "test_urdf_two_link_robot.urdf" + + +@pytest.fixture +def controlled_joint_names(): + """List of controlled joint names for testing.""" + return ["joint_1", "joint_2"] + + +@pytest.fixture +def pink_config(urdf_path, controlled_joint_names): + """Create a PinkKinematicsConfiguration instance for testing.""" + return PinkKinematicsConfiguration( + urdf_path=str(urdf_path), + controlled_joint_names=controlled_joint_names, + # copy_data=True, + # forward_kinematics=True, + ) + + +@pytest.fixture +def local_frame_task(): + """Create a LocalFrameTask instance for testing.""" + return LocalFrameTask( + frame="link_2", + base_link_frame_name="base_link", + position_cost=1.0, + orientation_cost=1.0, + lm_damping=0.0, + gain=1.0, + ) + + +def test_initialization(local_frame_task): + """Test proper initialization of LocalFrameTask.""" + # Check that the task is properly initialized + assert local_frame_task.frame == "link_2" + assert local_frame_task.base_link_frame_name == "base_link" + assert np.allclose(local_frame_task.cost[:3], [1.0, 1.0, 1.0]) + assert np.allclose(local_frame_task.cost[3:], [1.0, 1.0, 1.0]) + assert local_frame_task.lm_damping == 0.0 + assert local_frame_task.gain == 1.0 + + # Check that target is initially None + assert local_frame_task.transform_target_to_base is None + + +def test_initialization_with_sequence_costs(): + """Test initialization with sequence costs.""" + task = LocalFrameTask( + frame="link_1", + base_link_frame_name="base_link", + position_cost=[1.0, 1.0, 1.0], + orientation_cost=[1.0, 1.0, 1.0], + lm_damping=0.1, + gain=2.0, + ) + + assert task.frame == "link_1" + assert task.base_link_frame_name == "base_link" + assert np.allclose(task.cost[:3], [1.0, 1.0, 1.0]) + assert np.allclose(task.cost[3:], [1.0, 1.0, 1.0]) + assert task.lm_damping == 0.1 + assert task.gain == 2.0 + + +def test_inheritance_from_frame_task(local_frame_task): + """Test that LocalFrameTask properly inherits from FrameTask.""" + from pink.tasks.frame_task import FrameTask + + # Check inheritance + assert isinstance(local_frame_task, FrameTask) + + # Check that we can call parent class methods + assert hasattr(local_frame_task, "compute_error") + assert hasattr(local_frame_task, "compute_jacobian") + + +def test_set_target(local_frame_task): + """Test setting target with a transform.""" + # Create a test transform + target_transform = pin.SE3.Identity() + target_transform.translation = np.array([0.1, 0.2, 0.3]) + target_transform.rotation = pin.exp3(np.array([0.1, 0.0, 0.0])) + + # Set the target + local_frame_task.set_target(target_transform) + + # Check that target was set correctly + assert local_frame_task.transform_target_to_base is not None + assert isinstance(local_frame_task.transform_target_to_base, pin.SE3) + + # Check that it's a copy (not the same object) + assert local_frame_task.transform_target_to_base is not target_transform + + # Check that values match + assert np.allclose(local_frame_task.transform_target_to_base.translation, target_transform.translation) + assert np.allclose(local_frame_task.transform_target_to_base.rotation, target_transform.rotation) + + +def test_set_target_from_configuration(local_frame_task, pink_config): + """Test setting target from a robot configuration.""" + # Set target from configuration + local_frame_task.set_target_from_configuration(pink_config) + + # Check that target was set + assert local_frame_task.transform_target_to_base is not None + assert isinstance(local_frame_task.transform_target_to_base, pin.SE3) + + +def test_set_target_from_configuration_wrong_type(local_frame_task): + """Test that set_target_from_configuration raises error with wrong type.""" + with pytest.raises(ValueError, match="configuration must be a PinkKinematicsConfiguration"): + local_frame_task.set_target_from_configuration("not_a_configuration") + + +def test_compute_error_with_target_set(local_frame_task, pink_config): + """Test computing error when target is set.""" + # Set a target + target_transform = pin.SE3.Identity() + target_transform.translation = np.array([0.1, 0.2, 0.3]) + local_frame_task.set_target(target_transform) + + # Compute error + error = local_frame_task.compute_error(pink_config) + + # Check that error is computed correctly + assert isinstance(error, np.ndarray) + assert error.shape == (6,) # 6D error (3 position + 3 orientation) + + # Error should not be all zeros (unless target exactly matches current pose) + # This is a reasonable assumption for a random target + + +def test_compute_error_without_target(local_frame_task, pink_config): + """Test that compute_error raises error when no target is set.""" + with pytest.raises(ValueError, match="no target set for frame 'link_2'"): + local_frame_task.compute_error(pink_config) + + +def test_compute_error_wrong_configuration_type(local_frame_task): + """Test that compute_error raises error with wrong configuration type.""" + # Set a target first + target_transform = pin.SE3.Identity() + local_frame_task.set_target(target_transform) + + with pytest.raises(ValueError, match="configuration must be a PinkKinematicsConfiguration"): + local_frame_task.compute_error("not_a_configuration") + + +def test_compute_jacobian_with_target_set(local_frame_task, pink_config): + """Test computing Jacobian when target is set.""" + # Set a target + target_transform = pin.SE3.Identity() + target_transform.translation = np.array([0.1, 0.2, 0.3]) + local_frame_task.set_target(target_transform) + + # Compute Jacobian + jacobian = local_frame_task.compute_jacobian(pink_config) + + # Check that Jacobian is computed correctly + assert isinstance(jacobian, np.ndarray) + assert jacobian.shape == (6, 2) # 6 rows (error), 2 columns (controlled joints) + + # Jacobian should not be all zeros + assert not np.allclose(jacobian, 0.0) + + +def test_compute_jacobian_without_target(local_frame_task, pink_config): + """Test that compute_jacobian raises error when no target is set.""" + with pytest.raises(Exception, match="no target set for frame 'link_2'"): + local_frame_task.compute_jacobian(pink_config) + + +def test_error_consistency_across_configurations(local_frame_task, pink_config): + """Test that error computation is consistent across different configurations.""" + # Set a target + target_transform = pin.SE3.Identity() + target_transform.translation = np.array([0.1, 0.2, 0.3]) + local_frame_task.set_target(target_transform) + + # Compute error at initial configuration + error_1 = local_frame_task.compute_error(pink_config) + + # Update configuration + new_q = pink_config.full_q.copy() + new_q[1] = 0.5 # Change first revolute joint + pink_config.update(new_q) + + # Compute error at new configuration + error_2 = local_frame_task.compute_error(pink_config) + + # Errors should be different (not all close) + assert not np.allclose(error_1, error_2) + + +def test_jacobian_consistency_across_configurations(local_frame_task, pink_config): + """Test that Jacobian computation is consistent across different configurations.""" + # Set a target + target_transform = pin.SE3.Identity() + target_transform.translation = np.array([0.1, 0.2, 0.3]) + local_frame_task.set_target(target_transform) + + # Compute Jacobian at initial configuration + jacobian_1 = local_frame_task.compute_jacobian(pink_config) + + # Update configuration + new_q = pink_config.full_q.copy() + new_q[1] = 0.3 # Change first revolute joint + pink_config.update(new_q) + + # Compute Jacobian at new configuration + jacobian_2 = local_frame_task.compute_jacobian(pink_config) + + # Jacobians should be different (not all close) + assert not np.allclose(jacobian_1, jacobian_2) + + +def test_error_zero_at_target_pose(local_frame_task, pink_config): + """Test that error is zero when current pose matches target pose.""" + # Get current transform of the frame + current_transform = pink_config.get_transform_frame_to_world("link_2") + + # Set target to current pose + local_frame_task.set_target(current_transform) + + # Compute error + error = local_frame_task.compute_error(pink_config) + + # Error should be very close to zero + assert np.allclose(error, 0.0, atol=1e-10) + + +def test_different_frames(pink_config): + """Test LocalFrameTask with different frame names.""" + # Test with link_1 frame + task_link1 = LocalFrameTask( + frame="link_1", + base_link_frame_name="base_link", + position_cost=1.0, + orientation_cost=1.0, + ) + + # Set target and compute error + target_transform = pin.SE3.Identity() + target_transform.translation = np.array([0.1, 0.0, 0.0]) + task_link1.set_target(target_transform) + + error_link1 = task_link1.compute_error(pink_config) + assert error_link1.shape == (6,) + + # Test with base_link frame + task_base = LocalFrameTask( + frame="base_link", + base_link_frame_name="base_link", + position_cost=1.0, + orientation_cost=1.0, + ) + + task_base.set_target(target_transform) + error_base = task_base.compute_error(pink_config) + assert error_base.shape == (6,) + + +def test_different_base_frames(pink_config): + """Test LocalFrameTask with different base frame names.""" + # Test with base_link as base frame + task_base_base = LocalFrameTask( + frame="link_2", + base_link_frame_name="base_link", + position_cost=1.0, + orientation_cost=1.0, + ) + + target_transform = pin.SE3.Identity() + task_base_base.set_target(target_transform) + error_base_base = task_base_base.compute_error(pink_config) + assert error_base_base.shape == (6,) + + # Test with link_1 as base frame + task_link1_base = LocalFrameTask( + frame="link_2", + base_link_frame_name="link_1", + position_cost=1.0, + orientation_cost=1.0, + ) + + task_link1_base.set_target(target_transform) + error_link1_base = task_link1_base.compute_error(pink_config) + assert error_link1_base.shape == (6,) + + +def test_sequence_cost_parameters(): + """Test LocalFrameTask with sequence cost parameters.""" + task = LocalFrameTask( + frame="link_2", + base_link_frame_name="base_link", + position_cost=[1.0, 2.0, 3.0], + orientation_cost=[0.5, 1.0, 1.5], + lm_damping=0.1, + gain=2.0, + ) + + assert np.allclose(task.cost[:3], [1.0, 2.0, 3.0]) # Position costs + assert np.allclose(task.cost[3:], [0.5, 1.0, 1.5]) # Orientation costs + assert task.lm_damping == 0.1 + assert task.gain == 2.0 + + +def test_error_magnitude_consistency(local_frame_task, pink_config): + """Test that error computation produces reasonable results.""" + # Set a small target offset + small_target = pin.SE3.Identity() + small_target.translation = np.array([0.01, 0.01, 0.01]) + local_frame_task.set_target(small_target) + + error_small = local_frame_task.compute_error(pink_config) + + # Set a large target offset + large_target = pin.SE3.Identity() + large_target.translation = np.array([0.5, 0.5, 0.5]) + local_frame_task.set_target(large_target) + + error_large = local_frame_task.compute_error(pink_config) + + # Both errors should be finite and reasonable + assert np.all(np.isfinite(error_small)) + assert np.all(np.isfinite(error_large)) + assert not np.allclose(error_small, error_large) # Different targets should produce different errors + + +def test_jacobian_structure(local_frame_task, pink_config): + """Test that Jacobian has the correct structure.""" + # Set a target + target_transform = pin.SE3.Identity() + target_transform.translation = np.array([0.1, 0.2, 0.3]) + local_frame_task.set_target(target_transform) + + # Compute Jacobian + jacobian = local_frame_task.compute_jacobian(pink_config) + + # Check structure + assert jacobian.shape == (6, 2) # 6 error dimensions, 2 controlled joints + + # Check that Jacobian is not all zeros (basic functionality check) + assert not np.allclose(jacobian, 0.0) + + +def test_multiple_target_updates(local_frame_task, pink_config): + """Test that multiple target updates work correctly.""" + # Set first target + target1 = pin.SE3.Identity() + target1.translation = np.array([0.1, 0.0, 0.0]) + local_frame_task.set_target(target1) + + error1 = local_frame_task.compute_error(pink_config) + + # Set second target + target2 = pin.SE3.Identity() + target2.translation = np.array([0.0, 0.1, 0.0]) + local_frame_task.set_target(target2) + + error2 = local_frame_task.compute_error(pink_config) + + # Errors should be different + assert not np.allclose(error1, error2) + + +def test_inheritance_behavior(local_frame_task): + """Test that LocalFrameTask properly overrides parent class methods.""" + # Check that the class has the expected methods + assert hasattr(local_frame_task, "set_target") + assert hasattr(local_frame_task, "set_target_from_configuration") + assert hasattr(local_frame_task, "compute_error") + assert hasattr(local_frame_task, "compute_jacobian") + + # Check that these are the overridden methods, not the parent ones + assert local_frame_task.set_target.__qualname__ == "LocalFrameTask.set_target" + assert local_frame_task.compute_error.__qualname__ == "LocalFrameTask.compute_error" + assert local_frame_task.compute_jacobian.__qualname__ == "LocalFrameTask.compute_jacobian" + + +def test_target_copying_behavior(local_frame_task): + """Test that target transforms are properly copied.""" + # Create a target transform + original_target = pin.SE3.Identity() + original_target.translation = np.array([0.1, 0.2, 0.3]) + original_rotation = original_target.rotation.copy() + + # Set the target + local_frame_task.set_target(original_target) + + # Modify the original target + original_target.translation = np.array([0.5, 0.5, 0.5]) + original_target.rotation = pin.exp3(np.array([0.5, 0.0, 0.0])) + + # Check that the stored target is unchanged + assert np.allclose(local_frame_task.transform_target_to_base.translation, np.array([0.1, 0.2, 0.3])) + assert np.allclose(local_frame_task.transform_target_to_base.rotation, original_rotation) + + +def test_error_computation_with_orientation_difference(local_frame_task, pink_config): + """Test error computation when there's an orientation difference.""" + # Set a target with orientation difference + target_transform = pin.SE3.Identity() + target_transform.rotation = pin.exp3(np.array([0.2, 0.0, 0.0])) # Rotation around X-axis + local_frame_task.set_target(target_transform) + + # Compute error + error = local_frame_task.compute_error(pink_config) + + # Check that error is computed correctly + assert isinstance(error, np.ndarray) + assert error.shape == (6,) + + # Error should not be all zeros + assert not np.allclose(error, 0.0) + + +def test_jacobian_rank_consistency(local_frame_task, pink_config): + """Test that Jacobian maintains consistent shape across configurations.""" + # Set a target that we know can be reached by the test robot. + target_transform = pin.SE3.Identity() + target_transform.translation = np.array([0.0, 0.0, 0.45]) + # 90 degrees around x axis = pi/2 radians + target_transform.rotation = pin.exp3(np.array([np.pi / 2, 0.0, 0.0])) + local_frame_task.set_target(target_transform) + + # Compute Jacobian at multiple configurations + jacobians = [] + for i in range(5): + # Update configuration + new_q = pink_config.full_q.copy() + new_q[1] = 0.1 * i # Vary first joint + pink_config.update(new_q) + + # Compute Jacobian + jacobian = local_frame_task.compute_jacobian(pink_config) + jacobians.append(jacobian) + + # All Jacobians should have the same shape + for jacobian in jacobians: + assert jacobian.shape == (6, 2) + + # All Jacobians should have rank 2 (full rank for 2-DOF planar arm) + for jacobian in jacobians: + assert np.linalg.matrix_rank(jacobian) == 2 diff --git a/source/isaaclab/test/controllers/test_null_space_posture_task.py b/source/isaaclab/test/controllers/test_null_space_posture_task.py new file mode 100644 index 00000000000..f13f912a61f --- /dev/null +++ b/source/isaaclab/test/controllers/test_null_space_posture_task.py @@ -0,0 +1,340 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause +"""Launch Isaac Sim Simulator first.""" + +# Import pinocchio in the main script to force the use of the dependencies installed by IsaacLab and not the one installed by Isaac Sim +# pinocchio is required by the Pink IK controller +import sys + +if sys.platform != "win32": + import pinocchio # noqa: F401 + import pinocchio as pin # noqa: F401 +else: + import pinocchio # noqa: F401 + import pinocchio as pin # noqa: F401 + +from isaaclab.app import AppLauncher + +# launch omniverse app +simulation_app = AppLauncher(headless=True).app + +"""Unit tests for NullSpacePostureTask with simplified robot configuration using Pink library directly.""" + +import numpy as np + +import pytest +from pink.configuration import Configuration +from pink.tasks import FrameTask +from pinocchio.robot_wrapper import RobotWrapper + +from isaaclab.controllers.pink_ik.null_space_posture_task import NullSpacePostureTask + + +class TestNullSpacePostureTaskSimplifiedRobot: + """Test cases for NullSpacePostureTask with simplified robot configuration.""" + + @pytest.fixture + def num_joints(self): + """Number of joints in the simplified robot.""" + return 20 + + @pytest.fixture + def joint_configurations(self): + """Pre-generated joint configurations for testing.""" + # Set random seed for reproducible tests + np.random.seed(42) + + return { + "random": np.random.uniform(-0.5, 0.5, 20), + "controlled_only": np.array([0.5] * 5 + [0.0] * 15), # Non-zero for controlled joints only + "sequential": np.linspace(0.1, 2.0, 20), + } + + @pytest.fixture + def robot_urdf(self): + """Load the simplified test robot URDF file.""" + import os + + current_dir = os.path.dirname(os.path.abspath(__file__)) + urdf_path = os.path.join(current_dir, "simplified_test_robot.urdf") + return urdf_path + + @pytest.fixture + def robot_configuration(self, robot_urdf): + """Simplified robot wrapper.""" + wrapper = RobotWrapper.BuildFromURDF(robot_urdf, None, root_joint=None) + return Configuration(wrapper.model, wrapper.data, wrapper.q0) + + @pytest.fixture + def tasks(self): + """pink tasks.""" + return [ + FrameTask("left_hand_pitch_link", position_cost=1.0, orientation_cost=1.0), + NullSpacePostureTask( + cost=1.0, + controlled_frames=["left_hand_pitch_link"], + controlled_joints=[ + "waist_yaw_joint", + "waist_pitch_joint", + "waist_roll_joint", + "left_shoulder_pitch_joint", + "left_shoulder_roll_joint", + ], + ), + ] + + def test_null_space_jacobian_zero_end_effector_velocity( + self, robot_configuration, tasks, joint_configurations, num_joints + ): + """Test that velocities projected through null space Jacobian result in zero end-effector velocity.""" + # Set specific joint configuration + robot_configuration.q = joint_configurations["random"] + + # Set frame task target to a specific position in workspace + frame_task = tasks[0] + # Create pin.SE3 from position and quaternion + position = np.array([0.5, 0.3, 0.8]) # x, y, z + quaternion = pin.Quaternion(1.0, 0.0, 0.0, 0.0) # w, x, y, z (identity quaternion) + target_pose = pin.SE3(quaternion, position) + frame_task.set_target(target_pose) + + # Set null space posture task target + null_space_task = tasks[1] + target_posture = np.zeros(num_joints) + null_space_task.set_target(target_posture) + + # Get the null space Jacobian + null_space_jacobian = null_space_task.compute_jacobian(robot_configuration) + + # Get the end-effector Jacobian + frame_task_jacobian = frame_task.compute_jacobian(robot_configuration) + + # Test multiple random velocities in null space + for _ in range(10): + # Generate random joint velocity + random_velocity = np.random.randn(num_joints) * 0.1 + + # Project through null space Jacobian + null_space_velocity = null_space_jacobian @ random_velocity + + # Compute resulting end-effector velocity + ee_velocity = frame_task_jacobian @ null_space_velocity + + # The end-effector velocity should be approximately zero + assert np.allclose(ee_velocity, np.zeros(6), atol=1e-7), f"End-effector velocity not zero: {ee_velocity}" + + def test_null_space_jacobian_properties(self, robot_configuration, tasks, joint_configurations, num_joints): + """Test mathematical properties of the null space Jacobian.""" + # Set specific joint configuration + robot_configuration.q = joint_configurations["random"] + + # Set frame task target + frame_task = tasks[0] + # Create pin.SE3 from position and quaternion + position = np.array([0.3, 0.4, 0.6]) + quaternion = pin.Quaternion(0.707, 0.0, 0.0, 0.707) # w, x, y, z (90-degree rotation around X) + target_pose = pin.SE3(quaternion, position) + frame_task.set_target(target_pose) + + # Set null space posture task target + null_space_task = tasks[1] + target_posture = np.zeros(num_joints) + target_posture[0:5] = [0.1, -0.1, 0.2, -0.2, 0.0] # Set first 5 joints (controlled joints) + null_space_task.set_target(target_posture) + + # Get Jacobians + null_space_jacobian = null_space_task.compute_jacobian(robot_configuration) + ee_jacobian = robot_configuration.get_frame_jacobian("left_hand_pitch_link") + + # Test: N * J^T should be approximately zero (null space property) + # where N is the null space projector and J is the end-effector Jacobian + null_space_projection = null_space_jacobian @ ee_jacobian.T + assert np.allclose( + null_space_projection, np.zeros_like(null_space_projection), atol=1e-7 + ), f"Null space projection of end-effector Jacobian not zero: {null_space_projection}" + + def test_null_space_jacobian_identity_when_no_frame_tasks( + self, robot_configuration, joint_configurations, num_joints + ): + """Test that null space Jacobian is identity when no frame tasks are defined.""" + # Create null space task without frame task controlled joints + null_space_task = NullSpacePostureTask(cost=1.0, controlled_frames=[], controlled_joints=[]) + + # Set specific joint configuration + robot_configuration.q = joint_configurations["sequential"] + + # Set target + target_posture = np.zeros(num_joints) + null_space_task.set_target(target_posture) + + # Get the null space Jacobian + null_space_jacobian = null_space_task.compute_jacobian(robot_configuration) + + # Should be identity matrix + expected_identity = np.eye(num_joints) + assert np.allclose( + null_space_jacobian, expected_identity + ), f"Null space Jacobian should be identity when no frame tasks defined: {null_space_jacobian}" + + def test_null_space_jacobian_consistency_across_configurations( + self, robot_configuration, tasks, joint_configurations, num_joints + ): + """Test that null space Jacobian is consistent across different joint configurations.""" + # Test multiple joint configurations + test_configs = [ + np.zeros(num_joints), # Zero configuration + joint_configurations["controlled_only"], # Non-zero for controlled joints + joint_configurations["random"], # Random configuration + ] + + # Set frame task target + frame_task = tasks[0] + # Create pin.SE3 from position and quaternion + position = np.array([0.3, 0.3, 0.5]) + quaternion = pin.Quaternion(1.0, 0.0, 0.0, 0.0) # w, x, y, z (identity quaternion) + target_pose = pin.SE3(quaternion, position) + frame_task.set_target(target_pose) + + # Set null space posture task target + null_space_task = tasks[1] + target_posture = np.zeros(num_joints) + null_space_task.set_target(target_posture) + + jacobians = [] + for config in test_configs: + robot_configuration.q = config + jacobian = null_space_task.compute_jacobian(robot_configuration) + jacobians.append(jacobian) + + # Verify that velocities through this Jacobian result in zero end-effector velocity + ee_jacobian = robot_configuration.get_frame_jacobian("left_hand_pitch_link") + + # Test with random velocity + random_velocity = np.random.randn(num_joints) * 0.1 + null_space_velocity = jacobian @ random_velocity + ee_velocity = ee_jacobian @ null_space_velocity + + assert np.allclose( + ee_velocity, np.zeros(6), atol=1e-7 + ), f"End-effector velocity not zero for configuration {config}: {ee_velocity}" + + def test_compute_error_without_target(self, robot_configuration, joint_configurations): + """Test that compute_error raises ValueError when no target is set.""" + null_space_task = NullSpacePostureTask( + cost=1.0, + controlled_frames=["left_hand_pitch_link"], + controlled_joints=["waist_yaw_joint", "waist_pitch_joint"], + ) + + robot_configuration.q = joint_configurations["sequential"] + + # Should raise ValueError when no target is set + with pytest.raises(ValueError, match="No posture target has been set"): + null_space_task.compute_error(robot_configuration) + + def test_joint_masking(self, robot_configuration, joint_configurations, num_joints): + """Test that joint mask correctly filters only controlled joints.""" + + controlled_joint_names = ["waist_pitch_joint", "left_shoulder_pitch_joint", "left_elbow_pitch_joint"] + + # Create task with specific controlled joints + null_space_task = NullSpacePostureTask( + cost=1.0, controlled_frames=["left_hand_pitch_link"], controlled_joints=controlled_joint_names + ) + + # Find the joint indexes in robot_configuration.model.names.tolist()[1:] + joint_names = robot_configuration.model.names.tolist()[1:] + joint_indexes = [joint_names.index(jn) for jn in controlled_joint_names] + + # Set configurations + current_config = joint_configurations["sequential"] + target_config = np.zeros(num_joints) + + robot_configuration.q = current_config + null_space_task.set_target(target_config) + + # Compute error + error = null_space_task.compute_error(robot_configuration) + + # Only controlled joints should have non-zero error + # Joint indices: waist_yaw_joint=0, waist_pitch_joint=1, waist_roll_joint=2, left_shoulder_pitch_joint=3, left_shoulder_roll_joint=4, etc. + expected_error = np.zeros(num_joints) + for i in joint_indexes: + expected_error[i] = current_config[i] + + assert np.allclose( + error, expected_error, atol=1e-7 + ), f"Joint mask not working correctly: expected {expected_error}, got {error}" + + def test_empty_controlled_joints(self, robot_configuration, joint_configurations, num_joints): + """Test behavior when controlled_joints is empty.""" + null_space_task = NullSpacePostureTask( + cost=1.0, controlled_frames=["left_hand_pitch_link"], controlled_joints=[] + ) + + current_config = joint_configurations["sequential"] + target_config = np.zeros(num_joints) + + robot_configuration.q = current_config + null_space_task.set_target(target_config) + + # Error should be all zeros + error = null_space_task.compute_error(robot_configuration) + expected_error = np.zeros(num_joints) + assert np.allclose(error, expected_error), f"Error should be zero when no joints controlled: {error}" + + def test_set_target_from_configuration(self, robot_configuration, joint_configurations): + """Test set_target_from_configuration method.""" + null_space_task = NullSpacePostureTask( + cost=1.0, + controlled_frames=["left_hand_pitch_link"], + controlled_joints=["waist_yaw_joint", "waist_pitch_joint"], + ) + + # Set a specific configuration + test_config = joint_configurations["sequential"] + robot_configuration.q = test_config + + # Set target from configuration + null_space_task.set_target_from_configuration(robot_configuration) + + # Verify target was set correctly + assert null_space_task.target_q is not None + assert np.allclose(null_space_task.target_q, test_config) + + def test_multiple_frame_tasks(self, robot_configuration, joint_configurations, num_joints): + """Test null space projection with multiple frame tasks.""" + # Create task with multiple controlled frames + null_space_task = NullSpacePostureTask( + cost=1.0, + controlled_frames=["left_hand_pitch_link", "right_hand_pitch_link"], + controlled_joints=["waist_yaw_joint", "waist_pitch_joint", "waist_roll_joint"], + ) + + current_config = joint_configurations["sequential"] + robot_configuration.q = current_config + + # Get null space Jacobian + null_space_jacobian = null_space_task.compute_jacobian(robot_configuration) + + # Get Jacobians for both frames + jacobian_left_hand = robot_configuration.get_frame_jacobian("left_hand_pitch_link") + jacobian_right_hand = robot_configuration.get_frame_jacobian("right_hand_pitch_link") + + # Test that null space velocities result in zero velocity for both frames + for _ in range(5): + random_velocity = np.random.randn(num_joints) * 0.1 + null_space_velocity = null_space_jacobian @ random_velocity + + # Check both frames + ee_velocity_left = jacobian_left_hand @ null_space_velocity + ee_velocity_right = jacobian_right_hand @ null_space_velocity + + assert np.allclose( + ee_velocity_left, np.zeros(6), atol=1e-7 + ), f"Left hand velocity not zero: {ee_velocity_left}" + assert np.allclose( + ee_velocity_right, np.zeros(6), atol=1e-7 + ), f"Right hand velocity not zero: {ee_velocity_right}" diff --git a/source/isaaclab/test/controllers/test_pink_ik.py b/source/isaaclab/test/controllers/test_pink_ik.py new file mode 100644 index 00000000000..10705173918 --- /dev/null +++ b/source/isaaclab/test/controllers/test_pink_ik.py @@ -0,0 +1,494 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Launch Isaac Sim Simulator first.""" + +# Import pinocchio in the main script to force the use of the dependencies installed by IsaacLab and not the one installed by Isaac Sim +# pinocchio is required by the Pink IK controller +import argparse +import sys + +if sys.platform != "win32": + import pinocchio # noqa: F401 + +from isaaclab.app import AppLauncher + +# add argparse arguments +parser = argparse.ArgumentParser(description="Test Pink IK controller.") +# append AppLauncher cli args +AppLauncher.add_app_launcher_args(parser) +# parse the arguments (use parse_known_args to ignore pytest arguments) +args_cli, _ = parser.parse_known_args() +# Always run tests in headless mode by default +args_cli.headless = True +args_cli.visualizer = ["newton"] +args_cli.num_envs = 1 + +# launch omniverse app +app_launcher = AppLauncher(args_cli) +simulation_app = app_launcher.app + +"""Rest everything follows.""" + +import contextlib +import gymnasium as gym +import numpy as np +import re +import torch +import yaml +from pathlib import Path + +import pytest +import warp as wp +from pink.configuration import Configuration +from pink.tasks import FrameTask + +import isaaclab.sim.utils.stage as stage_utils +from isaaclab.utils.math import axis_angle_from_quat, matrix_from_quat, quat_from_matrix, quat_inv + +import isaaclab_tasks # noqa: F401 +import isaaclab_tasks.manager_based.locomanipulation.pick_place # noqa: F401 + +# import isaaclab_tasks.manager_based.manipulation.pick_place # noqa: F401 +from isaaclab_tasks.utils.parse_cfg import parse_env_cfg + + +def load_test_config(env_name): + """Load test configuration based on environment type.""" + # Determine which config file to load based on environment name + if "G1" in env_name: + config_file = "pink_ik_g1_test_configs.yaml" + elif "GR1" in env_name: + config_file = "pink_ik_gr1_test_configs.yaml" + else: + raise ValueError(f"Unknown environment type in {env_name}. Expected G1 or GR1.") + + config_path = Path(__file__).parent / "test_ik_configs" / config_file + with open(config_path) as f: + return yaml.safe_load(f) + + +def normalize_hand_poses(hand_pose_entries): + """Normalize pose entries to a (N, 7) float array. + + Converts quaternion orientation from (w, x, y, z) format in config to (x, y, z, w) format. + """ + if not hand_pose_entries: + return np.empty((0, 7), dtype=np.float32) + + first_entry = hand_pose_entries[0] + if isinstance(first_entry, dict): + pose_list = [] + for pose in hand_pose_entries: + position = pose["position"] + orientation = pose["orientation"] + # Convert quaternion from (w, x, y, z) to (x, y, z, w) format + orientation_xyzw = [orientation[1], orientation[2], orientation[3], orientation[0]] + pose_list.append(position + orientation_xyzw) + return np.array(pose_list, dtype=np.float32) + + return np.array(hand_pose_entries, dtype=np.float32) + + +def is_waist_enabled(env_cfg): + """Check if waist joints are enabled in the environment configuration.""" + if not hasattr(env_cfg.actions, "upper_body_ik"): + return False + + pink_controlled_joints = env_cfg.actions.upper_body_ik.pink_controlled_joint_names + + # Also check for pattern-based joint names (e.g., "waist_.*_joint") + return any(re.match("waist", joint) for joint in pink_controlled_joints) + + +def create_test_env(env_name, num_envs): + """Create a test environment with the Pink IK controller.""" + device = "cuda:0" + + # Create a new stage (works with both omni and newton backends) + stage_utils.create_new_stage() + env_cfg = parse_env_cfg(env_name, device=device, num_envs=num_envs) + # Modify scene config to not spawn the packing table to avoid collision with the robot + # del env_cfg.scene.packing_table + # del env_cfg.terminations.object_dropping + # del env_cfg.terminations.time_out + return gym.make(env_name, cfg=env_cfg).unwrapped, env_cfg + + # try: + # env_cfg = parse_env_cfg(env_name, device=device, num_envs=num_envs) + # # Modify scene config to not spawn the packing table to avoid collision with the robot + # # del env_cfg.scene.packing_table + # # del env_cfg.terminations.object_dropping + # # del env_cfg.terminations.time_out + # return gym.make(env_name, cfg=env_cfg).unwrapped, env_cfg + # except Exception as e: + # print(f"Failed to create environment: {str(e)}") + # raise + + +@pytest.fixture( + scope="module", + params=[ + # "Isaac-PickPlace-GR1T2-Abs-v0", + # "Isaac-PickPlace-GR1T2-WaistEnabled-Abs-v0", + "Isaac-PickPlace-FixedBaseUpperBodyIK-G1-Abs-v0", + "Isaac-PickPlace-Locomanipulation-G1-Abs-v0", + ], +) +def env_and_cfg(request): + """Create environment and configuration for tests.""" + env_name = request.param + + # Load the appropriate test configuration based on environment type + test_cfg = load_test_config(env_name) + + env, env_cfg = create_test_env(env_name, num_envs=1) + + # Get only the FrameTasks from variable_input_tasks + variable_input_tasks = [ + task for task in env_cfg.actions.upper_body_ik.controller.variable_input_tasks if isinstance(task, FrameTask) + ] + assert len(variable_input_tasks) == 2, "Expected exactly two FrameTasks (left and right hand)." + frames = [task.frame for task in variable_input_tasks] + # Try to infer which is left and which is right + left_candidates = [f for f in frames if "left" in f.lower()] + right_candidates = [f for f in frames if "right" in f.lower()] + assert ( + len(left_candidates) == 1 and len(right_candidates) == 1 + ), f"Could not uniquely identify left/right frames from: {frames}" + left_eef_urdf_link_name = left_candidates[0] + right_eef_urdf_link_name = right_candidates[0] + + # Set up camera view + env.sim.set_camera_view(eye=[2.5, 2.5, 2.5], target=[0.0, 0.0, 1.0]) + + # Create test parameters from test_cfg + test_params = { + "position": test_cfg["tolerances"]["position"], + "rotation": test_cfg["tolerances"]["rotation"], + "pd_position": test_cfg["tolerances"]["pd_position"], + "check_errors": test_cfg["tolerances"]["check_errors"], + "left_eef_urdf_link_name": left_eef_urdf_link_name, + "right_eef_urdf_link_name": right_eef_urdf_link_name, + } + + try: + yield env, env_cfg, test_cfg, test_params + finally: + env.close() + + +@pytest.fixture +def test_setup(env_and_cfg): + """Set up test case - runs before each test.""" + env, env_cfg, test_cfg, test_params = env_and_cfg + + num_joints_in_robot_hands = env_cfg.actions.upper_body_ik.controller.num_hand_joints + + # Get Action Term and IK controller + action_term = env.action_manager.get_term(name="upper_body_ik") + pink_controllers = action_term._ik_controllers + articulation = action_term._asset + + # Initialize Pink Configuration for forward kinematics + test_kinematics_model = Configuration( + pink_controllers[0].pink_configuration.model, + pink_controllers[0].pink_configuration.data, + pink_controllers[0].pink_configuration.q, + ) + left_target_link_name = env_cfg.actions.upper_body_ik.target_eef_link_names["left_wrist"] + right_target_link_name = env_cfg.actions.upper_body_ik.target_eef_link_names["right_wrist"] + + return { + "env": env, + "env_cfg": env_cfg, + "test_cfg": test_cfg, + "test_params": test_params, + "num_joints_in_robot_hands": num_joints_in_robot_hands, + "action_term": action_term, + "pink_controllers": pink_controllers, + "articulation": articulation, + "test_kinematics_model": test_kinematics_model, + "left_target_link_name": left_target_link_name, + "right_target_link_name": right_target_link_name, + "left_eef_urdf_link_name": test_params["left_eef_urdf_link_name"], + "right_eef_urdf_link_name": test_params["right_eef_urdf_link_name"], + } + + +@pytest.mark.parametrize( + "test_name", + [ + "pure_x_translation", + "pure_y_translation", + "pure_z_translation", + "diagonal_translation", + "pure_roll_rotation", + "pure_pitch_rotation", + "pure_yaw_rotation", + "compound_rotation", + ], +) +def test_movement_types(test_setup, test_name): + """Test different movement types using parametrization.""" + test_cfg = test_setup["test_cfg"] + env_cfg = test_setup["env_cfg"] + + if test_name not in test_cfg["tests"]: + print(f"Skipping {test_name} test for {env_cfg.__class__.__name__} environment (test not defined)...") + pytest.skip(f"Test {test_name} not defined for {env_cfg.__class__.__name__}") + return + + test_config = test_cfg["tests"][test_name] + + # Check if test requires waist bending and if waist is enabled + requires_waist_bending = test_config.get("requires_waist_bending", False) + waist_enabled = is_waist_enabled(env_cfg) + + if requires_waist_bending and not waist_enabled: + print( + f"Skipping {test_name} test because it requires waist bending but waist is not enabled in" + f" {env_cfg.__class__.__name__}..." + ) + pytest.skip(f"Test {test_name} requires waist bending but waist is not enabled") + return + + print(f"Running {test_name} test...") + run_movement_test(test_setup, test_config, test_cfg) + + +def run_movement_test(test_setup, test_config, test_cfg, aux_function=None): + """Run a movement test with the given configuration.""" + env = test_setup["env"] + num_joints_in_robot_hands = test_setup["num_joints_in_robot_hands"] + + # Load base-relative poses from config + left_hand_poses = normalize_hand_poses(test_config["left_hand_pose"]) + right_hand_poses = normalize_hand_poses(test_config["right_hand_pose"]) + + curr_pose_idx = 0 + test_counter = 0 + num_runs = 0 + + with contextlib.suppress(KeyboardInterrupt) and torch.inference_mode(): + obs, _ = env.reset() + + # Convert time (seconds) to steps using environment's step_dt + step_dt = env.step_dt + initial_steps = int(test_cfg["allowed_time_to_settle"] / step_dt) + steps_per_motion = int(test_config["allowed_time_per_motion"] / step_dt) + + phase = "initial" + steps_in_phase = 0 + + # while simulation_app.is_running() and not simulation_app.is_exiting(): + while True: + num_runs += 1 + steps_in_phase += 1 + + # Call auxiliary function if provided + if aux_function is not None: + aux_function(num_runs) + + # Create actions from hand poses and joint positions + setpoint_poses = np.concatenate([left_hand_poses[curr_pose_idx], right_hand_poses[curr_pose_idx]]) + actions = np.concatenate([setpoint_poses, np.zeros(num_joints_in_robot_hands)]) + actions = torch.tensor(actions, device=env.device, dtype=torch.float32) + # Append base command for Locomanipulation environments with fixed height + if test_setup["env_cfg"].__class__.__name__ == "LocomanipulationG1EnvCfg": + # Use a named variable for base height for clarity and maintainability + BASE_HEIGHT = 0.72 + base_command = torch.zeros(4, device=env.device, dtype=actions.dtype) + base_command[3] = BASE_HEIGHT + actions = torch.cat([actions, base_command]) + actions = actions.repeat(env.num_envs, 1) + + # Step environment + obs, _, _, _, _ = env.step(actions) + + # Determine the step interval for error checking + if phase == "initial": + check_interval = initial_steps + else: + check_interval = steps_per_motion + + # Check convergence and verify errors + if steps_in_phase % check_interval == 0: + print("Computing errors...") + errors = compute_errors( + test_setup, + env, + left_hand_poses[curr_pose_idx], + right_hand_poses[curr_pose_idx], + test_setup["left_eef_urdf_link_name"], + test_setup["right_eef_urdf_link_name"], + ) + print_debug_info(errors, test_counter) + test_params = test_setup["test_params"] + if test_params["check_errors"]: + verify_errors(errors, test_setup, test_params) + num_runs += 1 + + curr_pose_idx = (curr_pose_idx + 1) % len(left_hand_poses) + if curr_pose_idx == 0: + test_counter += 1 + if test_counter > test_config["repeat"]: + print("Test completed successfully") + break + # After the first phase, switch to normal interval + if phase == "initial": + phase = "normal" + steps_in_phase = 0 + + +def get_link_pose(env, link_name): + """Get the position and orientation of a link.""" + link_index = env.scene["robot"].body_names.index(link_name) + link_states = wp.to_torch(env.scene._articulations["robot"]._data.body_link_state_w) + link_pose = link_states[:, link_index, :7] + return link_pose[:, :3], link_pose[:, 3:7] + + +def calculate_rotation_error(current_rot, target_rot): + """Calculate the rotation error between current and target orientations in axis-angle format.""" + if isinstance(target_rot, torch.Tensor): + target_rot_tensor = ( + target_rot.unsqueeze(0).expand(current_rot.shape[0], -1) if target_rot.dim() == 1 else target_rot + ) + else: + target_rot_tensor = torch.tensor(target_rot, device=current_rot.device) + if target_rot_tensor.dim() == 1: + target_rot_tensor = target_rot_tensor.unsqueeze(0).expand(current_rot.shape[0], -1) + + return axis_angle_from_quat( + quat_from_matrix(matrix_from_quat(target_rot_tensor) * matrix_from_quat(quat_inv(current_rot))) + ) + + +def compute_errors( + test_setup, env, left_target_pose, right_target_pose, left_eef_urdf_link_name, right_eef_urdf_link_name +): + """Compute all error metrics for the current state.""" + action_term = test_setup["action_term"] + pink_controllers = test_setup["pink_controllers"] + articulation = test_setup["articulation"] + test_kinematics_model = test_setup["test_kinematics_model"] + left_target_link_name = test_setup["left_target_link_name"] + right_target_link_name = test_setup["right_target_link_name"] + + # Get current hand positions and orientations + left_hand_pos, left_hand_rot = get_link_pose(env, left_target_link_name) + right_hand_pos, right_hand_rot = get_link_pose(env, right_target_link_name) + + # Create setpoint tensors + device = env.device + num_envs = env.num_envs + left_hand_pose_setpoint = torch.tensor(left_target_pose, device=device).unsqueeze(0).repeat(num_envs, 1) + right_hand_pose_setpoint = torch.tensor(right_target_pose, device=device).unsqueeze(0).repeat(num_envs, 1) + + # Calculate position and rotation errors + left_pos_error = left_hand_pose_setpoint[:, :3] - left_hand_pos + right_pos_error = right_hand_pose_setpoint[:, :3] - right_hand_pos + left_rot_error = calculate_rotation_error(left_hand_rot, left_hand_pose_setpoint[:, 3:]) + right_rot_error = calculate_rotation_error(right_hand_rot, right_hand_pose_setpoint[:, 3:]) + + # Calculate PD controller errors + ik_controller = pink_controllers[0] + isaaclab_controlled_joint_ids = action_term._isaaclab_controlled_joint_ids + + # Get current and target positions for controlled joints only + curr_joints = wp.to_torch(articulation.data.joint_pos)[:, isaaclab_controlled_joint_ids].cpu().numpy()[0] + target_joints = action_term.processed_actions[:, : len(isaaclab_controlled_joint_ids)].cpu().numpy()[0] + + # Reorder joints for Pink IK (using controlled joint ordering) + curr_joints = np.array(curr_joints)[ik_controller.isaac_lab_to_pink_controlled_ordering] + target_joints = np.array(target_joints)[ik_controller.isaac_lab_to_pink_controlled_ordering] + + # Run forward kinematics + test_kinematics_model.update(curr_joints) + left_curr_pos = test_kinematics_model.get_transform_frame_to_world(frame=left_eef_urdf_link_name).translation + right_curr_pos = test_kinematics_model.get_transform_frame_to_world(frame=right_eef_urdf_link_name).translation + + test_kinematics_model.update(target_joints) + left_target_pos = test_kinematics_model.get_transform_frame_to_world(frame=left_eef_urdf_link_name).translation + right_target_pos = test_kinematics_model.get_transform_frame_to_world(frame=right_eef_urdf_link_name).translation + + # Calculate PD errors + left_pd_error = ( + torch.tensor(left_target_pos - left_curr_pos, device=device, dtype=torch.float32) + .unsqueeze(0) + .repeat(num_envs, 1) + ) + right_pd_error = ( + torch.tensor(right_target_pos - right_curr_pos, device=device, dtype=torch.float32) + .unsqueeze(0) + .repeat(num_envs, 1) + ) + + return { + "left_pos_error": left_pos_error, + "right_pos_error": right_pos_error, + "left_rot_error": left_rot_error, + "right_rot_error": right_rot_error, + "left_pd_error": left_pd_error, + "right_pd_error": right_pd_error, + } + + +def verify_errors(errors, test_setup, tolerances): + """Verify that all error metrics are within tolerance.""" + env = test_setup["env"] + device = env.device + num_envs = env.num_envs + zero_tensor = torch.zeros(num_envs, device=device) + + for hand in ["left", "right"]: + # Check PD controller errors + pd_error_norm = torch.norm(errors[f"{hand}_pd_error"], dim=1) + torch.testing.assert_close( + pd_error_norm, + zero_tensor, + rtol=0.0, + atol=tolerances["pd_position"], + msg=( + f"{hand.capitalize()} hand PD controller error ({pd_error_norm.item():.6f}) exceeds tolerance" + f" ({tolerances['pd_position']:.6f})" + ), + ) + + # Check IK position errors + pos_error_norm = torch.norm(errors[f"{hand}_pos_error"], dim=1) + torch.testing.assert_close( + pos_error_norm, + zero_tensor, + rtol=0.0, + atol=tolerances["position"], + msg=( + f"{hand.capitalize()} hand IK position error ({pos_error_norm.item():.6f}) exceeds tolerance" + f" ({tolerances['position']:.6f})" + ), + ) + + # Check rotation errors + rot_error_max = torch.max(errors[f"{hand}_rot_error"]) + torch.testing.assert_close( + rot_error_max, + torch.zeros_like(rot_error_max), + rtol=0.0, + atol=tolerances["rotation"], + msg=( + f"{hand.capitalize()} hand IK rotation error ({rot_error_max.item():.6f}) exceeds tolerance" + f" ({tolerances['rotation']:.6f})" + ), + ) + + +def print_debug_info(errors, test_counter): + """Print debug information about the current state.""" + print(f"\nTest iteration {test_counter + 1}:") + for hand in ["left", "right"]: + print(f"Measured {hand} hand position error:", errors[f"{hand}_pos_error"]) + print(f"Measured {hand} hand rotation error:", errors[f"{hand}_rot_error"]) + print(f"Measured {hand} hand PD error:", errors[f"{hand}_pd_error"]) diff --git a/source/isaaclab/test/controllers/test_pink_ik_components.py b/source/isaaclab/test/controllers/test_pink_ik_components.py new file mode 100644 index 00000000000..91d6a40f048 --- /dev/null +++ b/source/isaaclab/test/controllers/test_pink_ik_components.py @@ -0,0 +1,308 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Test cases for PinkKinematicsConfiguration class.""" + +# Import pinocchio in the main script to force the use of the dependencies installed by IsaacLab and not the one installed by Isaac Sim +# pinocchio is required by the Pink IK controller +import sys + +if sys.platform != "win32": + import pinocchio # noqa: F401 + +from isaaclab.app import AppLauncher + +# launch omniverse app +simulation_app = AppLauncher(headless=True).app + +import numpy as np +from pathlib import Path + +import pinocchio as pin +import pytest +from pink.exceptions import FrameNotFound + +from isaaclab.controllers.pink_ik.pink_kinematics_configuration import PinkKinematicsConfiguration + + +class TestPinkKinematicsConfiguration: + """Test suite for PinkKinematicsConfiguration class.""" + + @pytest.fixture + def urdf_path(self): + """Path to test URDF file.""" + return Path(__file__).parent / "urdfs/test_urdf_two_link_robot.urdf" + + @pytest.fixture + def mesh_path(self): + """Path to mesh directory (empty for simple test).""" + return "" + + @pytest.fixture + def controlled_joint_names(self): + """List of controlled joint names for testing.""" + return ["joint_1", "joint_2"] + + @pytest.fixture + def pink_config(self, urdf_path, mesh_path, controlled_joint_names): + """Create a PinkKinematicsConfiguration instance for testing.""" + return PinkKinematicsConfiguration( + urdf_path=str(urdf_path), + mesh_path=mesh_path, + controlled_joint_names=controlled_joint_names, + copy_data=True, + forward_kinematics=True, + ) + + def test_initialization(self, pink_config, controlled_joint_names): + """Test proper initialization of PinkKinematicsConfiguration.""" + # Check that controlled joint names are stored correctly + assert pink_config._controlled_joint_names == controlled_joint_names + + # Check that both full and controlled models are created + assert pink_config.full_model is not None + assert pink_config.controlled_model is not None + assert pink_config.full_data is not None + assert pink_config.controlled_data is not None + + # Check that configuration vectors are initialized + assert pink_config.full_q is not None + assert pink_config.controlled_q is not None + + # Check that the controlled model has the same number or fewer joints than the full model + assert pink_config.controlled_model.nq == pink_config.full_model.nq + + def test_joint_names_properties(self, pink_config): + """Test joint name properties.""" + # Test controlled joint names in pinocchio order + controlled_names = pink_config.controlled_joint_names_pinocchio_order + assert isinstance(controlled_names, list) + assert len(controlled_names) == len(pink_config._controlled_joint_names) + assert "joint_1" in controlled_names + assert "joint_2" in controlled_names + + # Test all joint names in pinocchio order + all_names = pink_config.all_joint_names_pinocchio_order + assert isinstance(all_names, list) + assert len(all_names) == len(controlled_names) + assert "joint_1" in all_names + assert "joint_2" in all_names + + def test_update_with_valid_configuration(self, pink_config): + """Test updating configuration with valid joint values.""" + # Get initial configuration + initial_q = pink_config.full_q.copy() + + # Create a new configuration with different joint values + new_q = initial_q.copy() + new_q[1] = 0.5 # Change first revolute joint value (index 1, since 0 is fixed joint) + + # Update configuration + pink_config.update(new_q) + + # Check that the configuration was updated + print(pink_config.full_q) + assert not np.allclose(pink_config.full_q, initial_q) + assert np.allclose(pink_config.full_q, new_q) + + def test_update_with_none(self, pink_config): + """Test updating configuration with None (should use current configuration).""" + # Get initial configuration + initial_q = pink_config.full_q.copy() + + # Update with None + pink_config.update(None) + + # Configuration should remain the same + assert np.allclose(pink_config.full_q, initial_q) + + def test_update_with_wrong_dimensions(self, pink_config): + """Test that update raises ValueError with wrong configuration dimensions.""" + # Create configuration with wrong number of joints + wrong_q = np.array([0.1, 0.2, 0.3]) # Wrong number of joints + + with pytest.raises(ValueError, match="q must have the same length as the number of joints"): + pink_config.update(wrong_q) + + def test_get_frame_jacobian_existing_frame(self, pink_config): + """Test getting Jacobian for an existing frame.""" + # Get Jacobian for link_1 frame + jacobian = pink_config.get_frame_jacobian("link_1") + + # Check that Jacobian has correct shape + # Should be 6 rows (linear + angular velocity) and columns equal to controlled joints + expected_rows = 6 + expected_cols = len(pink_config._controlled_joint_names) + assert jacobian.shape == (expected_rows, expected_cols) + + # Check that Jacobian is not all zeros (should have some non-zero values) + assert not np.allclose(jacobian, 0.0) + + def test_get_frame_jacobian_nonexistent_frame(self, pink_config): + """Test that get_frame_jacobian raises FrameNotFound for non-existent frame.""" + with pytest.raises(FrameNotFound): + pink_config.get_frame_jacobian("nonexistent_frame") + + def test_get_transform_frame_to_world_existing_frame(self, pink_config): + """Test getting transform for an existing frame.""" + # Get transform for link_1 frame + transform = pink_config.get_transform_frame_to_world("link_1") + + # Check that transform is a pinocchio SE3 object + assert isinstance(transform, pin.SE3) + + # Check that transform has reasonable values (not identity for non-zero joint angles) + assert not np.allclose(transform.homogeneous, np.eye(4)) + + def test_get_transform_frame_to_world_nonexistent_frame(self, pink_config): + """Test that get_transform_frame_to_world raises FrameNotFound for non-existent frame.""" + with pytest.raises(FrameNotFound): + pink_config.get_transform_frame_to_world("nonexistent_frame") + + def test_multiple_controlled_joints(self, urdf_path, mesh_path): + """Test configuration with multiple controlled joints.""" + # Create configuration with all available joints as controlled + controlled_joint_names = ["joint_1", "joint_2"] # Both revolute joints + + pink_config = PinkKinematicsConfiguration( + urdf_path=str(urdf_path), + mesh_path=mesh_path, + controlled_joint_names=controlled_joint_names, + ) + + # Check that controlled model has correct number of joints + assert pink_config.controlled_model.nq == len(controlled_joint_names) + + def test_no_controlled_joints(self, urdf_path, mesh_path): + """Test configuration with no controlled joints.""" + controlled_joint_names = [] + + pink_config = PinkKinematicsConfiguration( + urdf_path=str(urdf_path), + mesh_path=mesh_path, + controlled_joint_names=controlled_joint_names, + ) + + # Check that controlled model has 0 joints + assert pink_config.controlled_model.nq == 0 + assert len(pink_config.controlled_q) == 0 + + def test_jacobian_consistency(self, pink_config): + """Test that Jacobian computation is consistent across updates.""" + # Get Jacobian at initial configuration + jacobian_1 = pink_config.get_frame_jacobian("link_2") + + # Update configuration + new_q = pink_config.full_q.copy() + new_q[1] = 0.3 # Change first revolute joint (index 1, since 0 is fixed joint) + pink_config.update(new_q) + + # Get Jacobian at new configuration + jacobian_2 = pink_config.get_frame_jacobian("link_2") + + # Jacobians should be different (not all close) + assert not np.allclose(jacobian_1, jacobian_2) + + def test_transform_consistency(self, pink_config): + """Test that transform computation is consistent across updates.""" + # Get transform at initial configuration + transform_1 = pink_config.get_transform_frame_to_world("link_2") + + # Update configuration + new_q = pink_config.full_q.copy() + new_q[1] = 0.5 # Change first revolute joint (index 1, since 0 is fixed joint) + pink_config.update(new_q) + + # Get transform at new configuration + transform_2 = pink_config.get_transform_frame_to_world("link_2") + + # Transforms should be different + assert not np.allclose(transform_1.homogeneous, transform_2.homogeneous) + + def test_inheritance_from_configuration(self, pink_config): + """Test that PinkKinematicsConfiguration properly inherits from Pink Configuration.""" + from pink.configuration import Configuration + + # Check inheritance + assert isinstance(pink_config, Configuration) + + # Check that we can call parent class methods + assert hasattr(pink_config, "update") + assert hasattr(pink_config, "get_transform_frame_to_world") + + def test_controlled_joint_indices_calculation(self, pink_config): + """Test that controlled joint indices are calculated correctly.""" + # Check that controlled joint indices are valid + assert len(pink_config._controlled_joint_indices) == len(pink_config._controlled_joint_names) + + # Check that all indices are within bounds + for idx in pink_config._controlled_joint_indices: + assert 0 <= idx < len(pink_config._all_joint_names) + + # Check that indices correspond to controlled joint names + for i, idx in enumerate(pink_config._controlled_joint_indices): + joint_name = pink_config._all_joint_names[idx] + assert joint_name in pink_config._controlled_joint_names + + def test_full_model_integrity(self, pink_config): + """Test that the full model maintains integrity.""" + # Check that full model has all joints + assert pink_config.full_model.nq > 0 + assert len(pink_config.full_model.names) > 1 # More than just "universe" + + def test_controlled_model_integrity(self, pink_config): + """Test that the controlled model maintains integrity.""" + # Check that controlled model has correct number of joints + assert pink_config.controlled_model.nq == len(pink_config._controlled_joint_names) + + def test_configuration_vector_consistency(self, pink_config): + """Test that configuration vectors are consistent between full and controlled models.""" + # Check that controlled_q is a subset of full_q + controlled_indices = pink_config._controlled_joint_indices + for i, idx in enumerate(controlled_indices): + assert np.isclose(pink_config.controlled_q[i], pink_config.full_q[idx]) + + def test_error_handling_invalid_urdf(self, mesh_path, controlled_joint_names): + """Test error handling with invalid URDF path.""" + with pytest.raises(Exception): # Should raise some exception for invalid URDF + PinkKinematicsConfiguration( + urdf_path="nonexistent.urdf", + mesh_path=mesh_path, + controlled_joint_names=controlled_joint_names, + ) + + def test_error_handling_invalid_joint_names(self, urdf_path, mesh_path): + """Test error handling with invalid joint names.""" + invalid_joint_names = ["nonexistent_joint"] + + # This should not raise an error, but the controlled model should have 0 joints + pink_config = PinkKinematicsConfiguration( + urdf_path=str(urdf_path), + mesh_path=mesh_path, + controlled_joint_names=invalid_joint_names, + ) + + assert pink_config.controlled_model.nq == 0 + assert len(pink_config.controlled_q) == 0 + + def test_undercontrolled_kinematics_model(self, urdf_path, mesh_path): + """Test that the fixed joint to world is properly handled.""" + + test_model = PinkKinematicsConfiguration( + urdf_path=str(urdf_path), + mesh_path=mesh_path, + controlled_joint_names=["joint_1"], + copy_data=True, + forward_kinematics=True, + ) + # Check that the controlled model only includes the revolute joints + assert "joint_1" in test_model.controlled_joint_names_pinocchio_order + assert "joint_2" not in test_model.controlled_joint_names_pinocchio_order + assert len(test_model.controlled_joint_names_pinocchio_order) == 1 # Only the two revolute joints + + # Check that the full configuration has more elements than controlled + assert len(test_model.full_q) > len(test_model.controlled_q) + assert len(test_model.full_q) == len(test_model.all_joint_names_pinocchio_order) + assert len(test_model.controlled_q) == len(test_model.controlled_joint_names_pinocchio_order) diff --git a/source/isaaclab/test/controllers/urdfs/test_urdf_two_link_robot.urdf b/source/isaaclab/test/controllers/urdfs/test_urdf_two_link_robot.urdf new file mode 100644 index 00000000000..cb1a305c50d --- /dev/null +++ b/source/isaaclab/test/controllers/urdfs/test_urdf_two_link_robot.urdf @@ -0,0 +1,32 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/source/isaaclab_assets/isaaclab_assets/robots/fourier.py b/source/isaaclab_assets/isaaclab_assets/robots/fourier.py new file mode 100644 index 00000000000..a47678efff6 --- /dev/null +++ b/source/isaaclab_assets/isaaclab_assets/robots/fourier.py @@ -0,0 +1,170 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Configuration for the Fourier Robots. + +The following configuration parameters are available: + +* :obj:`GR1T2_CFG`: The GR1T2 humanoid. +* :obj:`GR1T2_HIGH_PD_CFG`: The GR1T2 humanoid configured with high PD gains on upper body joints for pick-place manipulation tasks. + +Reference: https://www.fftai.com/products-gr1 +""" + +import torch + +import isaaclab.sim as sim_utils +from isaaclab.actuators import ImplicitActuatorCfg +from isaaclab.assets import ArticulationCfg +from isaaclab.utils.assets import ISAAC_NUCLEUS_DIR + +## +# Configuration +## + + +GR1T2_CFG = ArticulationCfg( + spawn=sim_utils.UsdFileCfg( + usd_path=( + # f"{ISAAC_NUCLEUS_DIR}/Robots/FourierIntelligence/GR-1/GR1T2_fourier_hand_6dof/GR1T2_fourier_hand_6dof.usd" + "/home/michalin/workspace/IsaacLab_Assets/fourier/Collected_GR1T2_fourier_hand_6dof_v2/GR1T2_fourier_hand_6dof.usd" + ), + activate_contact_sensors=False, + # rigid_props=sim_utils.RigidBodyPropertiesCfg( + # disable_gravity=True, + # retain_accelerations=False, + # linear_damping=0.0, + # angular_damping=0.0, + # max_linear_velocity=1000.0, + # max_angular_velocity=1000.0, + # max_depenetration_velocity=1.0, + # ), + # articulation_props=sim_utils.ArticulationRootPropertiesCfg( + # enabled_self_collisions=True, solver_position_iteration_count=8, solver_velocity_iteration_count=4 + # ), + articulation_props=sim_utils.ArticulationRootPropertiesCfg( + enabled_self_collisions=False + ), + # collision_props=sim_utils.CollisionPropertiesCfg( + # mesh_collision_property=sim_utils.BoundingCubePropertiesCfg(), + # ), + ), + init_state=ArticulationCfg.InitialStateCfg( + # pos=(0.0, 0.0, 0.95), + joint_pos={".*": 0.0}, + joint_vel={".*": 0.0}, + ), + actuators={ + "head": ImplicitActuatorCfg( + joint_names_expr=[ + "head_.*", + ], + effort_limit=None, + velocity_limit=None, + stiffness=None, + damping=None, + ), + "trunk": ImplicitActuatorCfg( + joint_names_expr=[ + "waist_.*", + ], + effort_limit=None, + velocity_limit=None, + stiffness=None, + damping=None, + ), + "legs": ImplicitActuatorCfg( + joint_names_expr=[ + ".*_hip_.*", + ".*_knee_.*", + ".*_ankle_.*", + ], + effort_limit=None, + velocity_limit=None, + stiffness=None, + damping=None, + ), + "right-arm": ImplicitActuatorCfg( + joint_names_expr=[ + "right_shoulder_.*", + "right_elbow_.*", + "right_wrist_.*", + ], + effort_limit=torch.inf, + velocity_limit=torch.inf, + stiffness=None, + damping=None, + armature=0.0, + ), + "left-arm": ImplicitActuatorCfg( + joint_names_expr=[ + "left_shoulder_.*", + "left_elbow_.*", + "left_wrist_.*", + ], + effort_limit=torch.inf, + velocity_limit=torch.inf, + stiffness=None, + damping=None, + armature=0.0, + ), + "right-hand": ImplicitActuatorCfg( + joint_names_expr=[ + "R_.*", + ], + effort_limit=None, + velocity_limit=None, + stiffness=None, + damping=None, + ), + "left-hand": ImplicitActuatorCfg( + joint_names_expr=[ + "L_.*", + ], + effort_limit=None, + velocity_limit=None, + stiffness=None, + damping=None, + ), + }, +) +"""Configuration for the GR1T2 Humanoid robot.""" + + +GR1T2_HIGH_PD_CFG = GR1T2_CFG.replace( + actuators={ + "trunk": ImplicitActuatorCfg( + joint_names_expr=["waist_.*"], + effort_limit=None, + velocity_limit=None, + stiffness=4400, + damping=40.0, + armature=0.1, + ), + "right-arm": ImplicitActuatorCfg( + joint_names_expr=["right_shoulder_.*", "right_elbow_.*", "right_wrist_.*"], + stiffness=4400.0, + damping=40.0, + armature=0.1, + ), + "left-arm": ImplicitActuatorCfg( + joint_names_expr=["left_shoulder_.*", "left_elbow_.*", "left_wrist_.*"], + stiffness=4400.0, + damping=40.0, + armature=0.1, + ), + "right-hand": ImplicitActuatorCfg( + joint_names_expr=["R_.*"], + stiffness=None, + damping=None, + ), + "left-hand": ImplicitActuatorCfg( + joint_names_expr=["L_.*"], + stiffness=None, + damping=None, + ), + }, +) +"""Configuration for the GR1T2 Humanoid robot configured for with high PD gains for pick-place manipulation tasks.""" diff --git a/source/isaaclab_assets/isaaclab_assets/robots/unitree.py b/source/isaaclab_assets/isaaclab_assets/robots/unitree.py index e835dcefc85..c49731a6530 100644 --- a/source/isaaclab_assets/isaaclab_assets/robots/unitree.py +++ b/source/isaaclab_assets/isaaclab_assets/robots/unitree.py @@ -337,6 +337,34 @@ """Configuration for the Unitree G1 Humanoid robot with all 29 degrees of freedom + 7 DOF per hand.""" +ARMATURE_5020 = 0.003610 # kg-m^2 +ARMATURE_7520_14 = 0.01018 # kg-m^2 +ARMATURE_7520_22 = 0.02510 # kg-m^2 +ARMATURE_4010 = 0.00425 # kg-m^2 + +VEL_LIMIT_5020 = 37.0 # rad/s +VEL_LIMIT_7520_14 = 32.0 # rad/s +VEL_LIMIT_7520_22 = 20.0 # rad/s +VEL_LIMIT_4010 = 22.0 # rad/s + +EFFORT_LIMIT_5020 = 25.0 # Nm +EFFORT_LIMIT_7520_14 = 88.0 # Nm +EFFORT_LIMIT_7520_22 = 139.0 # Nm +EFFORT_LIMIT_4010 = 5.0 # Nm + +NATURAL_FREQ = 50 * 2.0 * 3.1415926535 # 10Hz +DAMPING_RATIO = 2.0 + +STIFFNESS_5020 = ARMATURE_5020 * NATURAL_FREQ**2 +STIFFNESS_7520_14 = ARMATURE_7520_14 * NATURAL_FREQ**2 +STIFFNESS_7520_22 = ARMATURE_7520_22 * NATURAL_FREQ**2 +STIFFNESS_4010 = ARMATURE_4010 * NATURAL_FREQ**2 + +DAMPING_5020 = 2.0 * DAMPING_RATIO * ARMATURE_5020 * NATURAL_FREQ +DAMPING_7520_14 = 2.0 * DAMPING_RATIO * ARMATURE_7520_14 * NATURAL_FREQ +DAMPING_7520_22 = 2.0 * DAMPING_RATIO * ARMATURE_7520_22 * NATURAL_FREQ +DAMPING_4010 = 2.0 * DAMPING_RATIO * ARMATURE_4010 * NATURAL_FREQ + G1_29_DOF_CFG = ArticulationCfg( spawn=sim_utils.UsdFileCfg( usd_path=f"{ISAAC_NUCLEUS_DIR}/Robots/Unitree/G1/g1.usd", @@ -349,13 +377,22 @@ ), ), soft_joint_pos_limit_factor=0.9, + # Gravity compensation for upper body (arms and hands experience reduced gravity) + gravity_compensation=[ + ".*shoulder.*", # Shoulder links + ".*elbow.*", # Elbow links + ".*wrist.*", # Wrist links + ".*hand.*", # Hand links + ".*torso.*", # Torso links + ], + gravity_compensation_root_link_index="pelvis", init_state=ArticulationCfg.InitialStateCfg( - pos=(0.0, 0.0, 0.76), + pos=(0.0, 0.0, 0.75), + rot=(0, 0, 0.7071, 0.7071), joint_pos={ ".*_hip_pitch_joint": -0.10, ".*_knee_joint": 0.30, ".*_ankle_pitch_joint": -0.20, - ".*_wrist_.*_joint": 0.0, }, joint_vel={".*": 0.0}, ), @@ -386,32 +423,65 @@ ".*_knee_joint": 200.0, }, damping={ - ".*_hip_yaw_joint": 3.5, - ".*_hip_roll_joint": 3.5, - ".*_hip_pitch_joint": 3.5, + ".*_hip_yaw_joint": 2.5, + ".*_hip_roll_joint": 2.5, + ".*_hip_pitch_joint": 2.5, ".*_knee_joint": 5.0, }, armature={ - ".*_hip_.*": 0.01, - ".*_knee_joint": 0.01, + ".*_hip_.*": 0.03, + ".*_knee_joint": 0.03, }, ), "feet": ImplicitActuatorCfg( - effort_limit=50, joint_names_expr=[".*_ankle_pitch_joint", ".*_ankle_roll_joint"], - stiffness=20.0, - damping=1.5, - armature=0.01, + stiffness={ + ".*_ankle_pitch_joint": 20.0, + ".*_ankle_roll_joint": 20.0, + }, + damping={ + ".*_ankle_pitch_joint": 0.2, + ".*_ankle_roll_joint": 0.1, + }, + effort_limit={ + ".*_ankle_pitch_joint": 50.0, + ".*_ankle_roll_joint": 50.0, + }, + velocity_limit={ + ".*_ankle_pitch_joint": 37.0, + ".*_ankle_roll_joint": 37.0, + }, + armature=0.03, ), "waist": ImplicitActuatorCfg( joint_names_expr=[ "waist_.*_joint", ], - effort_limit=80, - velocity_limit=32.0, - stiffness=300.0, - damping=6.0, - armature=0.01, + effort_limit={ + "waist_yaw_joint": EFFORT_LIMIT_7520_14, + "waist_pitch_joint": EFFORT_LIMIT_5020, + "waist_roll_joint": EFFORT_LIMIT_5020, + }, + velocity_limit={ + "waist_yaw_joint": VEL_LIMIT_7520_14, + "waist_pitch_joint": VEL_LIMIT_5020, + "waist_roll_joint": VEL_LIMIT_5020, + }, + stiffness={ + "waist_yaw_joint": STIFFNESS_7520_14, + "waist_pitch_joint": STIFFNESS_5020, + "waist_roll_joint": STIFFNESS_5020, + }, + damping={ + "waist_yaw_joint": DAMPING_7520_14, + "waist_pitch_joint": DAMPING_5020, + "waist_roll_joint": DAMPING_5020, + }, + armature={ + "waist_yaw_joint": ARMATURE_7520_14, + "waist_pitch_joint": ARMATURE_5020, + "waist_roll_joint": ARMATURE_5020, + }, ), "arms": ImplicitActuatorCfg( joint_names_expr=[ @@ -420,32 +490,25 @@ ".*_shoulder_yaw_joint", ".*_elbow_joint", ".*_wrist_.*_joint", + ], + effort_limit=EFFORT_LIMIT_5020, + velocity_limit=VEL_LIMIT_5020, + # TODO: These high gains are needed to achieve the <3mm accuracy test for Pink IK test. + # Ideally, we should be able to use much lower gains, but the gravity compensation needs to + # first be improved. + stiffness=1500, + damping=10, + armature=ARMATURE_5020, + ), + "hands": ImplicitActuatorCfg( + joint_names_expr=[ ".*_hand_.*", ], effort_limit=300, - velocity_limit=100.0, - stiffness={ - ".*_shoulder_pitch_joint": 40.0, - ".*_shoulder_roll_joint": 40.0, - ".*_shoulder_yaw_joint": 40.0, - ".*_elbow_joint": 40.0, - ".*_wrist_.*_joint": 20.0, - ".*_hand_.*": 10.0, - }, - damping={ - ".*_shoulder_pitch_joint": 2.0, - ".*_shoulder_roll_joint": 2.0, - ".*_shoulder_yaw_joint": 2.0, - ".*_elbow_joint": 10.0, - ".*_wrist_.*_joint": 1.5, - ".*_hand_.*": 1.0, - }, - armature={ - ".*_shoulder_.*": 0.03, - ".*_elbow_.*": 0.03, - ".*_wrist_.*_joint": 0.03, - ".*_hand_.*": 0.03, - }, + velocity_limit=100, + stiffness=20, + damping=2, + armature=0.03, ), }, ) diff --git a/source/isaaclab_newton/isaaclab_newton/assets/articulation/articulation.py b/source/isaaclab_newton/isaaclab_newton/assets/articulation/articulation.py index fff003b43b5..49331ca9cb1 100644 --- a/source/isaaclab_newton/isaaclab_newton/assets/articulation/articulation.py +++ b/source/isaaclab_newton/isaaclab_newton/assets/articulation/articulation.py @@ -20,6 +20,8 @@ from isaaclab_newton.assets.articulation.articulation_data import ArticulationData from isaaclab_newton.assets.utils.shared import find_bodies, find_joints from isaaclab_newton.kernels import ( + apply_accumulated_root_force, + apply_gravity_compensation_force, project_link_velocity_to_com_frame_masked_root, split_state_to_pose_and_velocity, transform_CoM_pose_to_link_frame_masked_root, @@ -44,6 +46,7 @@ update_array1D_with_value_masked, update_array2D_with_array1D_indexed, update_array2D_with_array2D_masked, + update_array2D_with_value_indexed, update_array2D_with_value_masked, ) from isaaclab.utils.warp.utils import ( @@ -260,6 +263,8 @@ def write_data_to_sim(self): This ensures that the external wrench is applied at every simulation step. """ # Wrenches are automatically applied by set_external_force_and_torque. + # Apply gravity compensation forces if enabled + self._apply_gravity_compensation() # apply actuator models. Actuator models automatically write the joint efforts into the simulation. self._apply_actuator_model() # Write the actuator targets into the simulation @@ -1818,6 +1823,7 @@ def _initialize_impl(self): self._process_cfg() self._process_actuators_cfg() self._process_tendons() + self._process_gravity_compensation() # validate configuration self._validate_cfg() # update the robot data @@ -1840,6 +1846,13 @@ def _create_buffers(self, *args, **kwargs): self.cfg.soft_joint_pos_limit_factor, ], ) + # Create gravity compensation buffer (initialized to 0 = no compensation) + self._gravity_compensation_factor = wp.zeros( + (self.num_instances, self.num_bodies), dtype=wp.float32, device=self.device + ) + # Accumulator for root link compensation forces (shape: num_envs x 3 for x, y, z components) + self._root_comp_force = wp.zeros((self.num_instances, 3), dtype=wp.float32, device=self.device) + self._has_gravity_compensation = False def _process_cfg(self): """Post processing of configuration parameters.""" @@ -2019,6 +2032,54 @@ def _process_tendons(self): if self.num_fixed_tendons > 0: raise NotImplementedError("Tendons are not implemented yet") + def _process_gravity_compensation(self): + """Process and apply gravity compensation configuration. + + This method reads the gravity compensation configuration from the articulation config + and initializes a buffer of gravity compensation wrenches for all bodies. + + Gravity compensation is applied through body_f (external wrench) by adding the opposite + of the gravitational force scaled by the compensation factor during write_data_to_sim. + """ + # Skip if no gravity compensation is configured + if self.cfg.gravity_compensation is None or self.cfg.gravity_compensation_root_link_index is None: + return + + # Resolve body names from the configuration (list of regex patterns) + indices_list, names_list = string_utils.resolve_matching_names(self.cfg.gravity_compensation, self.body_names) + + # Resolve body names and values of root_link + root_link_indices, _ = string_utils.resolve_matching_names( + self.cfg.gravity_compensation_root_link_index, self.body_names + ) + self._root_link_index = root_link_indices[0] + + if len(indices_list) == 0: + warnings.warn( + f"No bodies matched the gravity compensation patterns: {self.cfg.gravity_compensation}. " + f"Available body names: {self.body_names}", + UserWarning, + ) + return + + # Log the gravity compensation settings + logger.info(f"Applying gravity compensation for articulation '{self.cfg.prim_path}': {names_list}") + + # Fill in the values for matched bodies + wp.launch( + update_array2D_with_value_indexed, + dim=(self.num_instances, len(indices_list)), + device=self.device, + inputs=[ + 1.0, + self._gravity_compensation_factor, + None, + wp.array(indices_list, dtype=wp.int32, device=self.device), + ], + ) + + self._has_gravity_compensation = True + def _apply_actuator_model(self): """Processes joint commands for the articulation by forwarding them to the actuators. @@ -2032,6 +2093,47 @@ def _apply_actuator_model(self): # if hasattr(actuator, "gear_ratio"): # self._data.gear_ratio[:, actuator.joint_indices] = actuator.gear_ratio + def _apply_gravity_compensation(self): + """Apply gravity compensation to the articulation. + + This method uses a two-kernel approach to avoid race conditions: + 1. First kernel accumulates compensation forces per-body and atomically accumulates + the negative compensation to the root link accumulator. + 2. Second kernel applies the accumulated force to the root link. + """ + if self._has_gravity_compensation: + gravity = NewtonManager._gravity_vector # Already a tuple (x, y, z) + + # Zero the accumulator before use + self._root_comp_force.zero_() + + # First kernel: apply per-body compensation and atomically accumulate root link force + wp.launch( + apply_gravity_compensation_force, + dim=(self.num_instances, self.num_bodies), + inputs=[ + self._data.body_mass, + self._gravity_compensation_factor, + wp.vec3f(gravity[0], gravity[1], gravity[2]), + self._data._sim_bind_body_external_wrench, + self._root_link_index, + self._root_comp_force, + ], + device=self.device, + ) + + # Second kernel: apply the accumulated compensation force to the root link + wp.launch( + apply_accumulated_root_force, + dim=self.num_instances, + inputs=[ + self._root_comp_force, + self._data._sim_bind_body_external_wrench, + self._root_link_index, + ], + device=self.device, + ) + """ Internal helpers -- Debugging. """ diff --git a/source/isaaclab_newton/isaaclab_newton/kernels/other_kernels.py b/source/isaaclab_newton/isaaclab_newton/kernels/other_kernels.py index 669b24b759e..9d9b1d2677b 100644 --- a/source/isaaclab_newton/isaaclab_newton/kernels/other_kernels.py +++ b/source/isaaclab_newton/isaaclab_newton/kernels/other_kernels.py @@ -91,3 +91,68 @@ def populate_empty_array( ): index = wp.tid() output_array[indices[index]] = input_array[index] + + +@wp.kernel +def apply_gravity_compensation_force( + body_mass: wp.array2d(dtype=wp.float32), + gravcomp: wp.array2d(dtype=wp.float32), + gravity: wp.vec3f, + body_f: wp.array2d(dtype=wp.spatial_vectorf), + root_link_index: wp.int32, + root_comp_force: wp.array2d(dtype=wp.float32), +): + """Apply gravity compensation force to body_f. + + The compensation force is: F = m * (-g) * gravcomp_factor + where m is body mass, g is gravity vector, and gravcomp_factor is the compensation factor. + The compensated weight is accumulated atomically into root_comp_force for later application to the root link. + + Args: + root_comp_force: Accumulator array of shape (num_envs, 3) for x, y, z components. + Must be zeroed before calling this kernel. + """ + env_index, body_index = wp.tid() + mass = body_mass[env_index, body_index] + comp_factor = gravcomp[env_index, body_index] + # Calculate the opposite of gravity force scaled by compensation factor + comp_force = wp.vec3f( + -gravity[0] * mass * comp_factor, -gravity[1] * mass * comp_factor, -gravity[2] * mass * comp_factor + ) + # Add to existing wrench (keep torque, add to force) + current_wrench = body_f[env_index, body_index] + current_force = wp.spatial_top(current_wrench) + new_force = wp.vec3f( + current_force[0] + comp_force[0], current_force[1] + comp_force[1], current_force[2] + comp_force[2] + ) + body_f[env_index, body_index] = wp.spatial_vector(new_force, wp.spatial_bottom(current_wrench), wp.float32) + + # Atomically accumulate the negative compensation force for the root link + wp.atomic_add(root_comp_force, env_index, 0, -comp_force[0]) + wp.atomic_add(root_comp_force, env_index, 1, -comp_force[1]) + wp.atomic_add(root_comp_force, env_index, 2, -comp_force[2]) + + +@wp.kernel +def apply_accumulated_root_force( + root_comp_force: wp.array2d(dtype=wp.float32), + body_f: wp.array2d(dtype=wp.spatial_vectorf), + root_link_index: wp.int32, +): + """Apply the accumulated compensation force to the root link. + + This kernel should be called after apply_gravity_compensation_force to apply + the atomically accumulated forces to the root link. + + Args: + root_comp_force: Accumulator array of shape (num_envs, 3) populated by + apply_gravity_compensation_force. + """ + env_index = wp.tid() + current_wrench = body_f[env_index, root_link_index] + current_force = wp.spatial_top(current_wrench) + accumulated = wp.vec3f(root_comp_force[env_index, 0], root_comp_force[env_index, 1], root_comp_force[env_index, 2]) + new_force = wp.vec3f( + current_force[0] + accumulated[0], current_force[1] + accumulated[1], current_force[2] + accumulated[2] + ) + body_f[env_index, root_link_index] = wp.spatial_vector(new_force, wp.spatial_bottom(current_wrench), wp.float32) diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomanipulation/__init__.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomanipulation/__init__.py new file mode 100644 index 00000000000..72b01368b49 --- /dev/null +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomanipulation/__init__.py @@ -0,0 +1,9 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + + +"""This sub-module contains the functions that are specific to the locomanipulation environments.""" + +from .tracking import * # noqa diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomanipulation/pick_place/__init__.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomanipulation/pick_place/__init__.py new file mode 100644 index 00000000000..ff7e927b05e --- /dev/null +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomanipulation/pick_place/__init__.py @@ -0,0 +1,31 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + + +"""This sub-module contains the functions that are specific to the locomanipulation environments.""" + +import gymnasium as gym +import os + +from . import agents, fixed_base_upper_body_ik_g1_env_cfg, locomanipulation_g1_env_cfg + +gym.register( + id="Isaac-PickPlace-Locomanipulation-G1-Abs-v0", + entry_point="isaaclab.envs:ManagerBasedRLEnv", + kwargs={ + "env_cfg_entry_point": locomanipulation_g1_env_cfg.LocomanipulationG1EnvCfg, + "robomimic_bc_cfg_entry_point": os.path.join(agents.__path__[0], "robomimic/bc_rnn_low_dim.json"), + }, + disable_env_checker=True, +) + +gym.register( + id="Isaac-PickPlace-FixedBaseUpperBodyIK-G1-Abs-v0", + entry_point="isaaclab.envs:ManagerBasedRLEnv", + kwargs={ + "env_cfg_entry_point": fixed_base_upper_body_ik_g1_env_cfg.FixedBaseUpperBodyIKG1EnvCfg, + }, + disable_env_checker=True, +) diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomanipulation/pick_place/agents/robomimic/bc_rnn_low_dim.json b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomanipulation/pick_place/agents/robomimic/bc_rnn_low_dim.json new file mode 100644 index 00000000000..c1dce5f832c --- /dev/null +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomanipulation/pick_place/agents/robomimic/bc_rnn_low_dim.json @@ -0,0 +1,117 @@ +{ + "algo_name": "bc", + "experiment": { + "name": "bc_rnn_low_dim_g1", + "validate": false, + "logging": { + "terminal_output_to_txt": true, + "log_tb": true + }, + "save": { + "enabled": true, + "every_n_seconds": null, + "every_n_epochs": 100, + "epochs": [], + "on_best_validation": false, + "on_best_rollout_return": false, + "on_best_rollout_success_rate": true + }, + "epoch_every_n_steps": 100, + "env": null, + "additional_envs": null, + "render": false, + "render_video": false, + "rollout": { + "enabled": false + } + }, + "train": { + "data": null, + "num_data_workers": 4, + "hdf5_cache_mode": "all", + "hdf5_use_swmr": true, + "hdf5_normalize_obs": false, + "hdf5_filter_key": null, + "hdf5_validation_filter_key": null, + "seq_length": 10, + "dataset_keys": [ + "actions" + ], + "goal_mode": null, + "cuda": true, + "batch_size": 100, + "num_epochs": 2000, + "seed": 101 + }, + "algo": { + "optim_params": { + "policy": { + "optimizer_type": "adam", + "learning_rate": { + "initial": 0.001, + "decay_factor": 0.1, + "epoch_schedule": [], + "scheduler_type": "multistep" + }, + "regularization": { + "L2": 0.0 + } + } + }, + "loss": { + "l2_weight": 1.0, + "l1_weight": 0.0, + "cos_weight": 0.0 + }, + "actor_layer_dims": [], + "gmm": { + "enabled": false, + "num_modes": 5, + "min_std": 0.0001, + "std_activation": "softplus", + "low_noise_eval": true + }, + "rnn": { + "enabled": true, + "horizon": 10, + "hidden_dim": 400, + "rnn_type": "LSTM", + "num_layers": 2, + "open_loop": false, + "kwargs": { + "bidirectional": false + } + }, + "transformer": { + "enabled": false, + "context_length": 10, + "embed_dim": 512, + "num_layers": 6, + "num_heads": 8, + "emb_dropout": 0.1, + "attn_dropout": 0.1, + "block_output_dropout": 0.1, + "sinusoidal_embedding": false, + "activation": "gelu", + "supervise_all_steps": false, + "nn_parameter_for_timesteps": true + } + }, + "observation": { + "modalities": { + "obs": { + "low_dim": [ + "left_eef_pos", + "left_eef_quat", + "right_eef_pos", + "right_eef_quat", + "hand_joint_state", + "object" + ], + "rgb": [], + "depth": [], + "scan": [] + } + } + } +} diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomanipulation/pick_place/configs/action_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomanipulation/pick_place/configs/action_cfg.py new file mode 100644 index 00000000000..ab28e6dff67 --- /dev/null +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomanipulation/pick_place/configs/action_cfg.py @@ -0,0 +1,41 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from dataclasses import MISSING + +from isaaclab.managers.action_manager import ActionTerm, ActionTermCfg +from isaaclab.utils import configclass + +from ..mdp.actions import AgileBasedLowerBodyAction + + +@configclass +class AgileBasedLowerBodyActionCfg(ActionTermCfg): + """Configuration for the lower body action term that is based on Agile lower body RL policy.""" + + class_type: type[ActionTerm] = AgileBasedLowerBodyAction + """The class type for the lower body action term.""" + + joint_names: list[str] = MISSING + """The names of the joints to control.""" + + obs_group_name: str = MISSING + """The name of the observation group to use.""" + + policy_path: str = MISSING + """The path to the policy model.""" + + policy_output_offset: float = 0.0 + """Offsets the output of the policy.""" + + policy_output_scale: float = 1.0 + """Scales the output of the policy.""" + + policy_joint_order: list[str] | None = None + """The order of joints in the policy output. If None, uses the articulation joint order. + + When specified, the policy output is assumed to be in this order and will be remapped + to the articulation's joint order before applying. This should match the joint ordering + used during policy training.""" diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomanipulation/pick_place/configs/agile_locomotion_observation_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomanipulation/pick_place/configs/agile_locomotion_observation_cfg.py new file mode 100644 index 00000000000..38e9d1c9041 --- /dev/null +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomanipulation/pick_place/configs/agile_locomotion_observation_cfg.py @@ -0,0 +1,156 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from isaaclab.envs import mdp +from isaaclab.managers import ObservationGroupCfg as ObsGroup +from isaaclab.managers import ObservationTermCfg as ObsTerm +from isaaclab.managers import SceneEntityCfg +from isaaclab.utils import configclass + +from isaaclab_tasks.manager_based.locomanipulation.pick_place.mdp import observations as locomanip_obs + + +@configclass +class AgileTeacherPolicyObservationsCfg(ObsGroup): + """Observation specifications for the Agile lower body policy. + + Note: This configuration defines only part of the observation input to the Agile lower body policy. + The lower body command portion is appended to the observation tensor in the action term, as that + is where the environment has access to those commands. + """ + + base_lin_vel = ObsTerm( + func=mdp.base_lin_vel, + params={"asset_cfg": SceneEntityCfg("robot")}, + ) + + base_ang_vel = ObsTerm( + func=mdp.base_ang_vel, + params={"asset_cfg": SceneEntityCfg("robot")}, + ) + + projected_gravity = ObsTerm( + func=mdp.projected_gravity, + scale=1.0, + ) + + joint_pos = ObsTerm( + func=mdp.joint_pos_rel, + params={ + "asset_cfg": SceneEntityCfg( + "robot", + # Agile policy was trained in Isaac Lab + PhysX which has different joint ordering than MuJoCo + # this list does the re-mapping of joint orders + joint_names=[ + "left_hip_pitch_joint", + "right_hip_pitch_joint", + "waist_yaw_joint", + "left_hip_roll_joint", + "right_hip_roll_joint", + "waist_roll_joint", + "left_hip_yaw_joint", + "right_hip_yaw_joint", + "waist_pitch_joint", + "left_knee_joint", + "right_knee_joint", + "left_shoulder_pitch_joint", + "right_shoulder_pitch_joint", + "left_ankle_pitch_joint", + "right_ankle_pitch_joint", + "left_shoulder_roll_joint", + "right_shoulder_roll_joint", + "left_ankle_roll_joint", + "right_ankle_roll_joint", + "left_shoulder_yaw_joint", + "right_shoulder_yaw_joint", + "left_elbow_joint", + "right_elbow_joint", + "left_wrist_roll_joint", + "right_wrist_roll_joint", + "left_wrist_pitch_joint", + "right_wrist_pitch_joint", + "left_wrist_yaw_joint", + "right_wrist_yaw_joint", + ], + preserve_order=True, + ), + }, + ) + + joint_vel = ObsTerm( + func=mdp.joint_vel_rel, + scale=0.1, + params={ + "asset_cfg": SceneEntityCfg( + "robot", + # Agile policy was trained in Isaac Lab + PhysX which has different joint ordering than MuJoCo + # this list does the re-mapping of joint orders + joint_names=[ + "left_hip_pitch_joint", + "right_hip_pitch_joint", + "waist_yaw_joint", + "left_hip_roll_joint", + "right_hip_roll_joint", + "waist_roll_joint", + "left_hip_yaw_joint", + "right_hip_yaw_joint", + "waist_pitch_joint", + "left_knee_joint", + "right_knee_joint", + "left_shoulder_pitch_joint", + "right_shoulder_pitch_joint", + "left_ankle_pitch_joint", + "right_ankle_pitch_joint", + "left_shoulder_roll_joint", + "right_shoulder_roll_joint", + "left_ankle_roll_joint", + "right_ankle_roll_joint", + "left_shoulder_yaw_joint", + "right_shoulder_yaw_joint", + "left_elbow_joint", + "right_elbow_joint", + "left_wrist_roll_joint", + "right_wrist_roll_joint", + "left_wrist_pitch_joint", + "right_wrist_pitch_joint", + "left_wrist_yaw_joint", + "right_wrist_yaw_joint", + ], + preserve_order=True, + ), + }, + ) + + actions = ObsTerm( + func=locomanip_obs.last_action_with_remap, + scale=1.0, + params={ + "action_name": "lower_body_joint_pos", + "asset_cfg": SceneEntityCfg( + "robot", + # Agile policy was trained in Isaac Lab + PhysX which has different joint ordering than MuJoCo + # this list does the re-mapping of joint orders + joint_names=[ + "left_hip_pitch_joint", + "right_hip_pitch_joint", + "left_hip_roll_joint", + "right_hip_roll_joint", + "left_hip_yaw_joint", + "right_hip_yaw_joint", + "left_knee_joint", + "right_knee_joint", + "left_ankle_pitch_joint", + "right_ankle_pitch_joint", + "left_ankle_roll_joint", + "right_ankle_roll_joint", + ], + preserve_order=True, + ), + }, + ) + + def __post_init__(self): + self.enable_corruption = False + self.concatenate_terms = True diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomanipulation/pick_place/configs/pink_controller_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomanipulation/pick_place/configs/pink_controller_cfg.py new file mode 100644 index 00000000000..488d22c137b --- /dev/null +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomanipulation/pick_place/configs/pink_controller_cfg.py @@ -0,0 +1,126 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Configuration for pink controller. + +This module provides configurations for humanoid robot pink IK controllers, +including both fixed base and mobile configurations for upper body manipulation. +""" + +from isaaclab.controllers.pink_ik.local_frame_task import LocalFrameTask +from isaaclab.controllers.pink_ik.null_space_posture_task import NullSpacePostureTask +from isaaclab.controllers.pink_ik.pink_ik_cfg import PinkIKControllerCfg +from isaaclab.envs.mdp.actions.pink_actions_cfg import PinkInverseKinematicsActionCfg + +## +# Pink IK Controller Configuration for G1 +## + +G1_UPPER_BODY_IK_CONTROLLER_CFG = PinkIKControllerCfg( + articulation_name="robot", + base_link_name="pelvis", + num_hand_joints=14, + show_ik_warnings=True, + fail_on_joint_limit_violation=False, + variable_input_tasks=[ + LocalFrameTask( + "g1_29dof_with_hand_rev_1_0_left_wrist_yaw_link", + base_link_frame_name="g1_29dof_with_hand_rev_1_0_pelvis", + position_cost=8.0, # [cost] / [m] + orientation_cost=2.0, # [cost] / [rad] + lm_damping=10, # dampening for solver for step jumps + gain=0.5, + ), + LocalFrameTask( + "g1_29dof_with_hand_rev_1_0_right_wrist_yaw_link", + base_link_frame_name="g1_29dof_with_hand_rev_1_0_pelvis", + position_cost=8.0, # [cost] / [m] + orientation_cost=2.0, # [cost] / [rad] + lm_damping=10, # dampening for solver for step jumps + gain=0.5, + ), + NullSpacePostureTask( + cost=0.5, + lm_damping=1, + controlled_frames=[ + "g1_29dof_with_hand_rev_1_0_left_wrist_yaw_link", + "g1_29dof_with_hand_rev_1_0_right_wrist_yaw_link", + ], + controlled_joints=[ + "left_shoulder_pitch_joint", + "left_shoulder_roll_joint", + "left_shoulder_yaw_joint", + "right_shoulder_pitch_joint", + "right_shoulder_roll_joint", + "right_shoulder_yaw_joint", + "waist_yaw_joint", + "waist_pitch_joint", + "waist_roll_joint", + ], + gain=0.3, + ), + ], + fixed_input_tasks=[], +) +"""Base configuration for the G1 pink IK controller. + +This configuration sets up the pink IK controller for the G1 humanoid robot with +left and right wrist control tasks. The controller is designed for upper body +manipulation tasks. +""" + + +## +# Pink IK Action Configuration for G1 +## + +G1_UPPER_BODY_IK_ACTION_CFG = PinkInverseKinematicsActionCfg( + pink_controlled_joint_names=[ + ".*_shoulder_pitch_joint", + ".*_shoulder_roll_joint", + ".*_shoulder_yaw_joint", + ".*_elbow_joint", + ".*_wrist_pitch_joint", + ".*_wrist_roll_joint", + ".*_wrist_yaw_joint", + "waist_.*_joint", + ], + hand_joint_names=[ + "left_hand_index_0_joint", # Index finger proximal + "left_hand_middle_0_joint", # Middle finger proximal + "left_hand_thumb_0_joint", # Thumb base (yaw axis) + "right_hand_index_0_joint", # Index finger proximal + "right_hand_middle_0_joint", # Middle finger proximal + "right_hand_thumb_0_joint", # Thumb base (yaw axis) + "left_hand_index_1_joint", # Index finger distal + "left_hand_middle_1_joint", # Middle finger distal + "left_hand_thumb_1_joint", # Thumb middle (pitch axis) + "right_hand_index_1_joint", # Index finger distal + "right_hand_middle_1_joint", # Middle finger distal + "right_hand_thumb_1_joint", # Thumb middle (pitch axis) + "left_hand_thumb_2_joint", # Thumb tip + "right_hand_thumb_2_joint", # Thumb tip + ], + target_eef_link_names={ + "left_wrist": "left_wrist_yaw_link", + "right_wrist": "right_wrist_yaw_link", + }, + # the robot in the sim scene we are controlling + asset_name="robot", + # Configuration for the IK controller + # The frames names are the ones present in the URDF file + # The urdf has to be generated from the USD that is being used in the scene + controller=G1_UPPER_BODY_IK_CONTROLLER_CFG, +) +"""Base configuration for the G1 pink IK action. + +This configuration sets up the pink IK action for the G1 humanoid robot, +defining which joints are controlled by the IK solver and which are fixed. +The configuration includes: +- Upper body joints controlled by IK (shoulders, elbows, wrists) +- Fixed joints (pelvis, legs, hands) +- Hand joint names for additional control +- Reference to the pink IK controller configuration +""" diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomanipulation/pick_place/fixed_base_upper_body_ik_g1_env_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomanipulation/pick_place/fixed_base_upper_body_ik_g1_env_cfg.py new file mode 100644 index 00000000000..ec2d7aeef5f --- /dev/null +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomanipulation/pick_place/fixed_base_upper_body_ik_g1_env_cfg.py @@ -0,0 +1,209 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + + +from isaaclab_assets.robots.unitree import G1_29_DOF_CFG + +import isaaclab.envs.mdp as base_mdp +import isaaclab.sim as sim_utils +from isaaclab.assets import ArticulationCfg, AssetBaseCfg +from isaaclab.envs import ManagerBasedRLEnvCfg +from isaaclab.managers import ObservationGroupCfg as ObsGroup +from isaaclab.managers import ObservationTermCfg as ObsTerm +from isaaclab.managers import SceneEntityCfg +from isaaclab.managers import TerminationTermCfg as DoneTerm +from isaaclab.scene import InteractiveSceneCfg +from isaaclab.sim import SimulationCfg +from isaaclab.sim._impl.newton_manager_cfg import NewtonCfg +from isaaclab.sim._impl.solvers_cfg import MJWarpSolverCfg +from isaaclab.sim.spawners.from_files.from_files_cfg import GroundPlaneCfg +from isaaclab.utils import configclass +from isaaclab.utils.assets import ISAACLAB_NUCLEUS_DIR, retrieve_file_path + +from isaaclab_tasks.manager_based.locomanipulation.pick_place import mdp as locomanip_mdp +from isaaclab_tasks.manager_based.manipulation.pick_place import mdp as manip_mdp + +from isaaclab_tasks.manager_based.locomanipulation.pick_place.configs.pink_controller_cfg import ( # isort: skip + G1_UPPER_BODY_IK_ACTION_CFG, +) + + +## +# Scene definition +## +@configclass +class FixedBaseUpperBodyIKG1SceneCfg(InteractiveSceneCfg): + """Scene configuration for fixed base upper body IK environment with G1 robot. + + This configuration sets up the G1 humanoid robot with fixed pelvis and legs, + allowing only arm manipulation while the base remains stationary. The robot is + controlled using upper body IK. + """ + + # Table + # packing_table = RigidObjectCfg( + # prim_path="{ENV_REGEX_NS}/PackingTable", + # init_state=RigidObjectCfg.InitialStateCfg(pos=[0.0, 0.55, -0.3], rot=[0.0, 0.0, 0.0, 1.0]), + # spawn=UsdFileCfg( + # # usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/PackingTable/packing_table.usd", + # usd_path="/home/michalin/workspace/IsaacLab_Assets/props/Collected_packing_table/packing_table.usd", + # rigid_props=sim_utils.RigidBodyPropertiesCfg(kinematic_enabled=True), + # ), + # ) + + # Object as ArticulationCfg (Newton does not support RigidObjectCfg) + # object: ArticulationCfg = ArticulationCfg( + # prim_path="{ENV_REGEX_NS}/Object", + # spawn=sim_utils.UsdFileCfg( + # # usd_path=f"{ISAACLAB_NUCLEUS_DIR}/Mimic/pick_place_task/pick_place_assets/steering_wheel.usd", + # usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/Blocks/DexCube/dex_cube_instanceable.usd", + # scale=(0.75, 0.75, 0.75), + # ), + # init_state=ArticulationCfg.InitialStateCfg(pos=(-0.35, 0.45, 0.6996), rot=(0.0, 0.0, 0.0, 1.0)), + # actuators={}, + # articulation_root_prim_path="", + # ) + + # Unitree G1 Humanoid robot - fixed base configuration + robot: ArticulationCfg = G1_29_DOF_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot") + + # Ground plane + ground = AssetBaseCfg( + prim_path="/World/GroundPlane", + spawn=GroundPlaneCfg(), + ) + + # Lights + light = AssetBaseCfg( + prim_path="/World/light", + spawn=sim_utils.DomeLightCfg(color=(0.75, 0.75, 0.75), intensity=3000.0), + ) + + def __post_init__(self): + """Post initialization.""" + # Set the robot to fixed base + self.robot.spawn.articulation_props.fix_root_link = True + + +@configclass +class ActionsCfg: + """Action specifications for the MDP.""" + + upper_body_ik = G1_UPPER_BODY_IK_ACTION_CFG + + +@configclass +class ObservationsCfg: + """Observation specifications for the MDP. + This class is required by the environment configuration but not used in this implementation + """ + + @configclass + class PolicyCfg(ObsGroup): + """Observations for policy group with state values.""" + + actions = ObsTerm(func=manip_mdp.last_action) + robot_joint_pos = ObsTerm( + func=base_mdp.joint_pos, + params={"asset_cfg": SceneEntityCfg("robot")}, + ) + robot_root_pos = ObsTerm(func=base_mdp.root_pos_w, params={"asset_cfg": SceneEntityCfg("robot")}) + robot_root_rot = ObsTerm(func=base_mdp.root_quat_w, params={"asset_cfg": SceneEntityCfg("robot")}) + # object_pos = ObsTerm(func=base_mdp.root_pos_w, params={"asset_cfg": SceneEntityCfg("object")}) + # object_rot = ObsTerm(func=base_mdp.root_quat_w, params={"asset_cfg": SceneEntityCfg("object")}) + robot_links_state = ObsTerm(func=manip_mdp.get_all_robot_link_state) + + left_eef_pos = ObsTerm(func=manip_mdp.get_eef_pos, params={"link_name": "left_wrist_yaw_link"}) + left_eef_quat = ObsTerm(func=manip_mdp.get_eef_quat, params={"link_name": "left_wrist_yaw_link"}) + right_eef_pos = ObsTerm(func=manip_mdp.get_eef_pos, params={"link_name": "right_wrist_yaw_link"}) + right_eef_quat = ObsTerm(func=manip_mdp.get_eef_quat, params={"link_name": "right_wrist_yaw_link"}) + + hand_joint_state = ObsTerm(func=manip_mdp.get_robot_joint_state, params={"joint_names": [".*_hand.*"]}) + head_joint_state = ObsTerm(func=manip_mdp.get_robot_joint_state, params={"joint_names": []}) + + # object = ObsTerm( + # func=manip_mdp.object_obs, + # params={"left_eef_link_name": "left_wrist_yaw_link", "right_eef_link_name": "right_wrist_yaw_link"}, + # ) + + def __post_init__(self): + self.enable_corruption = False + self.concatenate_terms = False + + # observation groups + policy: PolicyCfg = PolicyCfg() + + +@configclass +class TerminationsCfg: + """Termination terms for the MDP.""" + + time_out = DoneTerm(func=locomanip_mdp.time_out, time_out=True) + + # object_dropping = DoneTerm( + # func=base_mdp.root_height_below_minimum, params={"minimum_height": 0.5, "asset_cfg": SceneEntityCfg("object")} + # ) + + # success = DoneTerm(func=manip_mdp.task_done_pick_place, params={"task_link_name": "right_wrist_yaw_link"}) + + +## +# MDP settings +## + + +@configclass +class FixedBaseUpperBodyIKG1EnvCfg(ManagerBasedRLEnvCfg): + """Configuration for the G1 fixed base upper body IK environment. + + This environment is designed for manipulation tasks where the G1 humanoid robot + has a fixed pelvis and legs, allowing only arm and hand movements for manipulation. The robot is + controlled using upper body IK. + """ + + sim: SimulationCfg = SimulationCfg( + newton_cfg=NewtonCfg( + solver_cfg=MJWarpSolverCfg( + njmax=50, + nconmax=50, + cone="elliptic", + ls_parallel=True, + impratio=100, + iterations=100, + ls_iterations=50, + integrator="implicit", + solver="newton", + ), + ) + ) + + # Scene settings + scene: FixedBaseUpperBodyIKG1SceneCfg = FixedBaseUpperBodyIKG1SceneCfg( + num_envs=1, env_spacing=2.5, replicate_physics=True + ) + # MDP settings + terminations: TerminationsCfg = TerminationsCfg() + observations: ObservationsCfg = ObservationsCfg() + actions: ActionsCfg = ActionsCfg() + + # Unused managers + commands = None + rewards = None + curriculum = None + + def __post_init__(self): + """Post initialization.""" + # general settings + self.decimation = 4 + self.episode_length_s = 20.0 + # simulation settings + self.sim.dt = 1 / 200 # 200Hz + self.sim.render_interval = 2 + + # Set the URDF and mesh paths for the IK controller + urdf_omniverse_path = f"{ISAACLAB_NUCLEUS_DIR}/Controllers/LocomanipulationAssets/unitree_g1_kinematics_asset/g1_29dof_with_hand_only_kinematics.urdf" + + # Retrieve local paths for the URDF and mesh files. Will be cached for call after the first time. + self.actions.upper_body_ik.controller.urdf_path = retrieve_file_path(urdf_omniverse_path) diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomanipulation/pick_place/locomanipulation_g1_env_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomanipulation/pick_place/locomanipulation_g1_env_cfg.py new file mode 100644 index 00000000000..31a4f60b0fe --- /dev/null +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomanipulation/pick_place/locomanipulation_g1_env_cfg.py @@ -0,0 +1,229 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from isaaclab_assets.robots.unitree import G1_29_DOF_CFG + +import isaaclab.envs.mdp as base_mdp +import isaaclab.sim as sim_utils +from isaaclab.assets import ArticulationCfg, AssetBaseCfg +from isaaclab.envs import ManagerBasedRLEnvCfg +from isaaclab.managers import ObservationGroupCfg as ObsGroup +from isaaclab.managers import ObservationTermCfg as ObsTerm +from isaaclab.managers import SceneEntityCfg +from isaaclab.managers import TerminationTermCfg as DoneTerm +from isaaclab.scene import InteractiveSceneCfg +from isaaclab.sim import SimulationCfg +from isaaclab.sim._impl.newton_manager_cfg import NewtonCfg +from isaaclab.sim._impl.solvers_cfg import MJWarpSolverCfg +from isaaclab.sim.spawners.from_files.from_files_cfg import GroundPlaneCfg +from isaaclab.utils import configclass +from isaaclab.utils.assets import ISAACLAB_NUCLEUS_DIR, retrieve_file_path + +from isaaclab_tasks.manager_based.locomanipulation.pick_place import mdp as locomanip_mdp +from isaaclab_tasks.manager_based.locomanipulation.pick_place.configs.action_cfg import AgileBasedLowerBodyActionCfg +from isaaclab_tasks.manager_based.locomanipulation.pick_place.configs.agile_locomotion_observation_cfg import ( + AgileTeacherPolicyObservationsCfg, +) +from isaaclab_tasks.manager_based.manipulation.pick_place import mdp as manip_mdp + +from isaaclab_tasks.manager_based.locomanipulation.pick_place.configs.pink_controller_cfg import ( # isort: skip + G1_UPPER_BODY_IK_ACTION_CFG, +) + + +## +# Scene definition +## +@configclass +class LocomanipulationG1SceneCfg(InteractiveSceneCfg): + """Scene configuration for locomanipulation environment with G1 robot. + + This configuration sets up the G1 humanoid robot for locomanipulation tasks, + allowing both locomotion and manipulation capabilities. The robot can move its + base and use its arms for manipulation tasks. + """ + + # Table + # packing_table = AssetBaseCfg( + # prim_path="/World/envs/env_.*/PackingTable", + # init_state=AssetBaseCfg.InitialStateCfg(pos=[0.0, 0.55, -0.3], rot=[1.0, 0.0, 0.0, 0.0]), + # spawn=UsdFileCfg( + # usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/PackingTable/packing_table.usd", + # rigid_props=sim_utils.RigidBodyPropertiesCfg(kinematic_enabled=True), + # ), + # ) + + # object = RigidObjectCfg( + # prim_path="{ENV_REGEX_NS}/Object", + # init_state=RigidObjectCfg.InitialStateCfg(pos=[-0.35, 0.45, 0.6996], rot=[1, 0, 0, 0]), + # spawn=UsdFileCfg( + # usd_path=f"{ISAACLAB_NUCLEUS_DIR}/Mimic/pick_place_task/pick_place_assets/steering_wheel.usd", + # scale=(0.75, 0.75, 0.75), + # rigid_props=sim_utils.RigidBodyPropertiesCfg(), + # ), + # ) + + # Humanoid robot w/ arms higher + robot: ArticulationCfg = G1_29_DOF_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot") + + # Ground plane + ground = AssetBaseCfg( + prim_path="/World/GroundPlane", + spawn=GroundPlaneCfg(), + ) + + # Lights + light = AssetBaseCfg( + prim_path="/World/light", + spawn=sim_utils.DomeLightCfg(color=(0.75, 0.75, 0.75), intensity=3000.0), + ) + + +@configclass +class ActionsCfg: + """Action specifications for the MDP.""" + + upper_body_ik = G1_UPPER_BODY_IK_ACTION_CFG + + lower_body_joint_pos = AgileBasedLowerBodyActionCfg( + asset_name="robot", + joint_names=[ + ".*_hip_.*_joint", + ".*_knee_joint", + ".*_ankle_.*_joint", + ], + policy_output_scale=0.25, + obs_group_name="lower_body_policy", # need to be the same name as the on in ObservationCfg + policy_path=f"{ISAACLAB_NUCLEUS_DIR}/Policies/Agile/agile_locomotion.pt", + # Policy outputs lower body joint actions in this order (matching training order) + policy_joint_order=[ + "left_hip_pitch_joint", + "right_hip_pitch_joint", + "left_hip_roll_joint", + "right_hip_roll_joint", + "left_hip_yaw_joint", + "right_hip_yaw_joint", + "left_knee_joint", + "right_knee_joint", + "left_ankle_pitch_joint", + "right_ankle_pitch_joint", + "left_ankle_roll_joint", + "right_ankle_roll_joint", + ], + ) + + +@configclass +class ObservationsCfg: + """Observation specifications for the MDP. + This class is required by the environment configuration but not used in this implementation + """ + + @configclass + class PolicyCfg(ObsGroup): + """Observations for policy group with state values.""" + + actions = ObsTerm(func=manip_mdp.last_action) + robot_joint_pos = ObsTerm( + func=base_mdp.joint_pos, + params={"asset_cfg": SceneEntityCfg("robot")}, + ) + robot_root_pos = ObsTerm(func=base_mdp.root_pos_w, params={"asset_cfg": SceneEntityCfg("robot")}) + robot_root_rot = ObsTerm(func=base_mdp.root_quat_w, params={"asset_cfg": SceneEntityCfg("robot")}) + # object_pos = ObsTerm(func=base_mdp.root_pos_w, params={"asset_cfg": SceneEntityCfg("object")}) + # object_rot = ObsTerm(func=base_mdp.root_quat_w, params={"asset_cfg": SceneEntityCfg("object")}) + robot_links_state = ObsTerm(func=manip_mdp.get_all_robot_link_state) + + left_eef_pos = ObsTerm(func=manip_mdp.get_eef_pos, params={"link_name": "left_wrist_yaw_link"}) + left_eef_quat = ObsTerm(func=manip_mdp.get_eef_quat, params={"link_name": "left_wrist_yaw_link"}) + right_eef_pos = ObsTerm(func=manip_mdp.get_eef_pos, params={"link_name": "right_wrist_yaw_link"}) + right_eef_quat = ObsTerm(func=manip_mdp.get_eef_quat, params={"link_name": "right_wrist_yaw_link"}) + + hand_joint_state = ObsTerm(func=manip_mdp.get_robot_joint_state, params={"joint_names": [".*_hand.*"]}) + + # object = ObsTerm( + # func=manip_mdp.object_obs, + # params={"left_eef_link_name": "left_wrist_yaw_link", "right_eef_link_name": "right_wrist_yaw_link"}, + # ) + + def __post_init__(self): + self.enable_corruption = False + self.concatenate_terms = False + + # observation groups + policy: PolicyCfg = PolicyCfg() + lower_body_policy: AgileTeacherPolicyObservationsCfg = AgileTeacherPolicyObservationsCfg() + + +@configclass +class TerminationsCfg: + """Termination terms for the MDP.""" + + time_out = DoneTerm(func=locomanip_mdp.time_out, time_out=True) + + # object_dropping = DoneTerm( + # func=base_mdp.root_height_below_minimum, params={"minimum_height": 0.5, "asset_cfg": SceneEntityCfg("object")} + # ) + + # success = DoneTerm(func=manip_mdp.task_done_pick_place, params={"task_link_name": "right_wrist_yaw_link"}) + + +## +# MDP settings +## + + +@configclass +class LocomanipulationG1EnvCfg(ManagerBasedRLEnvCfg): + """Configuration for the G1 locomanipulation environment. + + This environment is designed for locomanipulation tasks where the G1 humanoid robot + can perform both locomotion and manipulation simultaneously. The robot can move its + base and use its arms for manipulation tasks, enabling complex mobile manipulation + behaviors. + """ + + sim: SimulationCfg = SimulationCfg( + newton_cfg=NewtonCfg( + solver_cfg=MJWarpSolverCfg( + njmax=50, + nconmax=50, + cone="elliptic", + ls_parallel=True, + impratio=100, + iterations=100, + ls_iterations=50, + integrator="implicit", + solver="newton", + ), + ) + ) + + # Scene settings + scene: LocomanipulationG1SceneCfg = LocomanipulationG1SceneCfg(num_envs=1, env_spacing=2.5, replicate_physics=True) + # MDP settings + observations: ObservationsCfg = ObservationsCfg() + actions: ActionsCfg = ActionsCfg() + commands = None + terminations: TerminationsCfg = TerminationsCfg() + + # Unused managers + rewards = None + curriculum = None + + def __post_init__(self): + """Post initialization.""" + # general settings + self.decimation = 4 + self.episode_length_s = 20.0 + # simulation settings + self.sim.dt = 1 / 200 # 200Hz + self.sim.render_interval = 2 + + # Set the URDF and mesh paths for the IK controller + urdf_omniverse_path = f"{ISAACLAB_NUCLEUS_DIR}/Controllers/LocomanipulationAssets/unitree_g1_kinematics_asset/g1_29dof_with_hand_only_kinematics.urdf" + + # Retrieve local paths for the URDF and mesh files. Will be cached for call after the first time. + self.actions.upper_body_ik.controller.urdf_path = retrieve_file_path(urdf_omniverse_path) diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomanipulation/pick_place/mdp/__init__.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomanipulation/pick_place/mdp/__init__.py new file mode 100644 index 00000000000..7e559309b5c --- /dev/null +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomanipulation/pick_place/mdp/__init__.py @@ -0,0 +1,12 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + + +"""This sub-module contains the functions that are specific to the locomanipulation environments.""" + +from isaaclab.envs.mdp import * # noqa: F401, F403 + +from .actions import * # noqa: F401, F403 +from .observations import * # noqa: F401, F403 diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomanipulation/pick_place/mdp/actions.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomanipulation/pick_place/mdp/actions.py new file mode 100644 index 00000000000..ce80ca88d6d --- /dev/null +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomanipulation/pick_place/mdp/actions.py @@ -0,0 +1,146 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from __future__ import annotations + +import torch +from typing import TYPE_CHECKING + +import warp as wp + +from isaaclab.assets.articulation import Articulation +from isaaclab.managers.action_manager import ActionTerm +from isaaclab.utils.assets import retrieve_file_path +from isaaclab.utils.io.torchscript import load_torchscript_model + +if TYPE_CHECKING: + from isaaclab.envs import ManagerBasedEnv + + from .configs.action_cfg import AgileBasedLowerBodyActionCfg + + +class AgileBasedLowerBodyAction(ActionTerm): + """Action term that is based on Agile lower body RL policy.""" + + cfg: AgileBasedLowerBodyActionCfg + """The configuration of the action term.""" + + _asset: Articulation + """The articulation asset to which the action term is applied.""" + + def __init__(self, cfg: AgileBasedLowerBodyActionCfg, env: ManagerBasedEnv): + super().__init__(cfg, env) + + # Save the observation config from cfg + self._observation_cfg = env.cfg.observations + self._obs_group_name = cfg.obs_group_name + + # Load policy here if needed + _temp_policy_path = retrieve_file_path(cfg.policy_path) + self._policy = load_torchscript_model(_temp_policy_path, device=env.device) + self._env = env + + # Find joint ids for the lower body joints + # find_joints returns (joint_mask: wp.array, joint_names: list[str], joint_indices: list[int]) + _, self._joint_names, self._joint_ids = self._asset.find_joints(self.cfg.joint_names) + + # Build mapping from policy output order to articulation order if policy_joint_order is specified + self._policy_to_articulation_indices: torch.Tensor | None = None + if cfg.policy_joint_order is not None: + # Create a mapping from policy output index to articulation joint index + # policy_joint_order contains joint names in the order the policy outputs them + # self._joint_names contains joint names in articulation order (from find_joints) + policy_to_art_indices = [] + for policy_joint_name in cfg.policy_joint_order: + if policy_joint_name in self._joint_names: + art_idx = self._joint_names.index(policy_joint_name) + policy_to_art_indices.append(art_idx) + self._policy_to_articulation_indices = torch.tensor(policy_to_art_indices, device=env.device) + + # Get the scale and offset from the configuration + self._policy_output_scale = torch.tensor(cfg.policy_output_scale, device=env.device) + self._policy_output_offset = wp.to_torch(self._asset.data.default_joint_pos)[:, self._joint_ids].clone() + + # Create tensors to store raw and processed actions + self._raw_actions = torch.zeros(self.num_envs, len(self._joint_ids), device=self.device) + self._processed_actions = torch.zeros(self.num_envs, len(self._joint_ids), device=self.device) + + """ + Properties. + """ + + @property + def action_dim(self) -> int: + """Lower Body Action: [vx, vy, wz, hip_height]""" + return 4 + + @property + def raw_actions(self) -> torch.Tensor: + return self._raw_actions + + @property + def processed_actions(self) -> torch.Tensor: + return self._processed_actions + + def _compose_policy_input(self, base_command: torch.Tensor, obs_tensor: torch.Tensor) -> torch.Tensor: + """Compose the policy input by concatenating repeated commands with observations. + + Args: + base_command: The base command tensor [vx, vy, wz, hip_height]. + obs_tensor: The observation tensor from the environment. + + Returns: + The composed policy input tensor with repeated commands concatenated to observations. + """ + # Get history length from observation configuration + history_length = getattr(self._observation_cfg, self._obs_group_name).history_length + # Default to 1 if history_length is None (no history, just current observation) + if history_length is None: + history_length = 1 + + # Repeat commands based on history length and concatenate with observations + repeated_commands = base_command.unsqueeze(1).repeat(1, history_length, 1).reshape(base_command.shape[0], -1) + policy_input = torch.cat([repeated_commands, obs_tensor], dim=-1) + + return policy_input + + def process_actions(self, actions: torch.Tensor): + """Process the input actions using the locomotion policy. + + Args: + actions: The lower body commands. + """ + + # Extract base command from the action tensor + # Assuming the base command [vx, vy, wz, hip_height] + base_command = actions + + obs_tensor = self._env.obs_buf["lower_body_policy"] + + # Compose policy input using helper function + policy_input = self._compose_policy_input(base_command, obs_tensor) + + joint_actions = self._policy.forward(policy_input) + + # Remap policy output from policy order to articulation order if mapping is defined + self._raw_actions.zero_() + if self._policy_to_articulation_indices is not None: + self._raw_actions[:, self._policy_to_articulation_indices] = joint_actions + else: + self._raw_actions[:] = joint_actions + + # Apply scaling and offset to the raw actions from the policy + self._processed_actions = self._raw_actions * self._policy_output_scale + self._policy_output_offset + + # Clip actions if configured + if self.cfg.clip is not None: + self._processed_actions = torch.clamp( + self._processed_actions, min=self._clip[:, :, 0], max=self._clip[:, :, 1] + ) + + def apply_actions(self): + """Apply the actions to the environment.""" + # Store the raw actions + self._asset.set_joint_position_target(self._processed_actions, joint_ids=self._joint_ids) diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomanipulation/pick_place/mdp/observations.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomanipulation/pick_place/mdp/observations.py new file mode 100644 index 00000000000..41369b2b2b2 --- /dev/null +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/locomanipulation/pick_place/mdp/observations.py @@ -0,0 +1,62 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from __future__ import annotations + +import torch + +import warp as wp + +from isaaclab.envs import ManagerBasedRLEnv +from isaaclab.envs.utils.io_descriptors import generic_io_descriptor, record_shape +from isaaclab.managers import SceneEntityCfg + + +def upper_body_last_action( + env: ManagerBasedRLEnv, + asset_cfg: SceneEntityCfg = SceneEntityCfg("robot"), +) -> torch.Tensor: + """Extract the last action of the upper body.""" + asset = env.scene[asset_cfg.name] + joint_pos_target = wp.to_torch(asset.data.joint_pos_target) + + # Use joint_names from asset_cfg to find indices + joint_names = asset_cfg.joint_names if hasattr(asset_cfg, "joint_names") else None + if joint_names is None: + raise ValueError("asset_cfg must have 'joint_names' attribute for upper_body_last_action.") + + # Find joint indices matching the provided joint_names (supports regex) + # find_joints returns (joint_mask: wp.array, joint_names: list[str], joint_indices: list[int]) + _, _, joint_indices = asset.find_joints(joint_names) + + # Get upper body joint positions for all environments + upper_body_joint_pos_target = joint_pos_target[:, joint_indices] + + return upper_body_joint_pos_target + + +@generic_io_descriptor(dtype=torch.float32, observation_type="Action", on_inspect=[record_shape]) +def last_action_with_remap( + env: ManagerBasedRLEnv, + action_name: str | None = None, + asset_cfg: SceneEntityCfg = SceneEntityCfg("robot"), +) -> torch.Tensor: + """Get the last raw action from an action term with reordering based on joint_ids. + + Note: Only the joints configured in :attr:`asset_cfg.joint_ids` will have their actions returned. + + Args: + env: The manager-based RL environment. + action_name: The name of the action term to get raw actions from. + asset_cfg: The SceneEntity associated with this observation. The joint_ids are used to index/reorder. + + Returns: + The raw actions tensor reordered by joint_ids. + Shape: (num_envs, len(joint_ids)) + """ + if action_name is None: + return env.action_manager.action[:, asset_cfg.joint_ids] + else: + return env.action_manager.get_term(action_name).raw_actions[:, asset_cfg.joint_ids] diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/pick_place/__init__.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/pick_place/__init__.py new file mode 100644 index 00000000000..6df830c98c0 --- /dev/null +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/pick_place/__init__.py @@ -0,0 +1,58 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +import gymnasium as gym + +from . import agents + +gym.register( + id="Isaac-PickPlace-GR1T2-Abs-v0", + entry_point="isaaclab.envs:ManagerBasedRLEnv", + kwargs={ + "env_cfg_entry_point": f"{__name__}.pickplace_gr1t2_env_cfg:PickPlaceGR1T2EnvCfg", + "robomimic_bc_cfg_entry_point": f"{agents.__name__}:robomimic/bc_rnn_low_dim.json", + }, + disable_env_checker=True, +) + +# gym.register( +# id="Isaac-NutPour-GR1T2-Pink-IK-Abs-v0", +# entry_point="isaaclab.envs:ManagerBasedRLEnv", +# kwargs={ +# "env_cfg_entry_point": f"{__name__}.nutpour_gr1t2_pink_ik_env_cfg:NutPourGR1T2PinkIKEnvCfg", +# "robomimic_bc_cfg_entry_point": f"{agents.__name__}:robomimic/bc_rnn_image_nut_pouring.json", +# }, +# disable_env_checker=True, +# ) + +# gym.register( +# id="Isaac-ExhaustPipe-GR1T2-Pink-IK-Abs-v0", +# entry_point="isaaclab.envs:ManagerBasedRLEnv", +# kwargs={ +# "env_cfg_entry_point": f"{__name__}.exhaustpipe_gr1t2_pink_ik_env_cfg:ExhaustPipeGR1T2PinkIKEnvCfg", +# "robomimic_bc_cfg_entry_point": f"{agents.__name__}:robomimic/bc_rnn_image_exhaust_pipe.json", +# }, +# disable_env_checker=True, +# ) + +gym.register( + id="Isaac-PickPlace-GR1T2-WaistEnabled-Abs-v0", + entry_point="isaaclab.envs:ManagerBasedRLEnv", + kwargs={ + "env_cfg_entry_point": f"{__name__}.pickplace_gr1t2_waist_enabled_env_cfg:PickPlaceGR1T2WaistEnabledEnvCfg", + "robomimic_bc_cfg_entry_point": f"{agents.__name__}:robomimic/bc_rnn_low_dim.json", + }, + disable_env_checker=True, +) + +# gym.register( +# id="Isaac-PickPlace-G1-InspireFTP-Abs-v0", +# entry_point="isaaclab.envs:ManagerBasedRLEnv", +# kwargs={ +# "env_cfg_entry_point": f"{__name__}.pickplace_unitree_g1_inspire_hand_env_cfg:PickPlaceG1InspireFTPEnvCfg", +# "robomimic_bc_cfg_entry_point": f"{agents.__name__}:robomimic/bc_rnn_low_dim.json", +# }, +# disable_env_checker=True, +# ) diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/pick_place/agents/robomimic/bc_rnn_low_dim.json b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/pick_place/agents/robomimic/bc_rnn_low_dim.json new file mode 100644 index 00000000000..d2e0f8fc6d9 --- /dev/null +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/pick_place/agents/robomimic/bc_rnn_low_dim.json @@ -0,0 +1,117 @@ +{ + "algo_name": "bc", + "experiment": { + "name": "bc_rnn_low_dim_gr1t2", + "validate": false, + "logging": { + "terminal_output_to_txt": true, + "log_tb": true + }, + "save": { + "enabled": true, + "every_n_seconds": null, + "every_n_epochs": 100, + "epochs": [], + "on_best_validation": false, + "on_best_rollout_return": false, + "on_best_rollout_success_rate": true + }, + "epoch_every_n_steps": 100, + "env": null, + "additional_envs": null, + "render": false, + "render_video": false, + "rollout": { + "enabled": false + } + }, + "train": { + "data": null, + "num_data_workers": 4, + "hdf5_cache_mode": "all", + "hdf5_use_swmr": true, + "hdf5_normalize_obs": false, + "hdf5_filter_key": null, + "hdf5_validation_filter_key": null, + "seq_length": 10, + "dataset_keys": [ + "actions" + ], + "goal_mode": null, + "cuda": true, + "batch_size": 100, + "num_epochs": 2000, + "seed": 101 + }, + "algo": { + "optim_params": { + "policy": { + "optimizer_type": "adam", + "learning_rate": { + "initial": 0.001, + "decay_factor": 0.1, + "epoch_schedule": [], + "scheduler_type": "multistep" + }, + "regularization": { + "L2": 0.0 + } + } + }, + "loss": { + "l2_weight": 1.0, + "l1_weight": 0.0, + "cos_weight": 0.0 + }, + "actor_layer_dims": [], + "gmm": { + "enabled": false, + "num_modes": 5, + "min_std": 0.0001, + "std_activation": "softplus", + "low_noise_eval": true + }, + "rnn": { + "enabled": true, + "horizon": 10, + "hidden_dim": 400, + "rnn_type": "LSTM", + "num_layers": 2, + "open_loop": false, + "kwargs": { + "bidirectional": false + } + }, + "transformer": { + "enabled": false, + "context_length": 10, + "embed_dim": 512, + "num_layers": 6, + "num_heads": 8, + "emb_dropout": 0.1, + "attn_dropout": 0.1, + "block_output_dropout": 0.1, + "sinusoidal_embedding": false, + "activation": "gelu", + "supervise_all_steps": false, + "nn_parameter_for_timesteps": true + } + }, + "observation": { + "modalities": { + "obs": { + "low_dim": [ + "left_eef_pos", + "left_eef_quat", + "right_eef_pos", + "right_eef_quat", + "hand_joint_state", + "object" + ], + "rgb": [], + "depth": [], + "scan": [] + } + } + } +} diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/pick_place/mdp/__init__.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/pick_place/mdp/__init__.py new file mode 100644 index 00000000000..555bfb7cbe8 --- /dev/null +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/pick_place/mdp/__init__.py @@ -0,0 +1,12 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""This sub-module contains the functions that are specific to the lift environments.""" + +from isaaclab.envs.mdp import * # noqa: F401, F403 + +from .observations import * # noqa: F401, F403 +from .pick_place_events import * # noqa: F401, F403 +from .terminations import * # noqa: F401, F403 diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/pick_place/mdp/observations.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/pick_place/mdp/observations.py new file mode 100644 index 00000000000..d60a995e299 --- /dev/null +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/pick_place/mdp/observations.py @@ -0,0 +1,87 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from __future__ import annotations + +import torch +from typing import TYPE_CHECKING + +import warp as wp + +if TYPE_CHECKING: + from isaaclab.envs import ManagerBasedRLEnv + + +def object_obs( + env: ManagerBasedRLEnv, + left_eef_link_name: str, + right_eef_link_name: str, +) -> torch.Tensor: + """ + Object observations (in world frame): + object pos, + object quat, + left_eef to object, + right_eef_to object, + """ + + body_pos_w = wp.to_torch(env.scene["robot"].data.body_pos_w) + left_eef_idx = env.scene["robot"].body_names.index(left_eef_link_name) + right_eef_idx = env.scene["robot"].body_names.index(right_eef_link_name) + left_eef_pos = body_pos_w[:, left_eef_idx] - env.scene.env_origins + right_eef_pos = body_pos_w[:, right_eef_idx] - env.scene.env_origins + + object_pos = wp.to_torch(env.scene["object"].data.root_pos_w) - env.scene.env_origins + object_quat = wp.to_torch(env.scene["object"].data.root_quat_w) + + left_eef_to_object = object_pos - left_eef_pos + right_eef_to_object = object_pos - right_eef_pos + + return torch.cat( + ( + object_pos, + object_quat, + left_eef_to_object, + right_eef_to_object, + ), + dim=1, + ) + + +def get_eef_pos(env: ManagerBasedRLEnv, link_name: str) -> torch.Tensor: + body_pos_w = wp.to_torch(env.scene["robot"].data.body_pos_w) + left_eef_idx = env.scene["robot"].body_names.index(link_name) + left_eef_pos = body_pos_w[:, left_eef_idx] - env.scene.env_origins + + return left_eef_pos + + +def get_eef_quat(env: ManagerBasedRLEnv, link_name: str) -> torch.Tensor: + body_quat_w = wp.to_torch(env.scene["robot"].data.body_quat_w) + left_eef_idx = env.scene["robot"].body_names.index(link_name) + left_eef_quat = body_quat_w[:, left_eef_idx] + + return left_eef_quat + + +def get_robot_joint_state( + env: ManagerBasedRLEnv, + joint_names: list[str], +) -> torch.Tensor: + # hand_joint_names is a list of regex, use find_joints + # find_joints returns (joint_mask: wp.array, joint_names: list[str], joint_indices: list[int]) + _, _, joint_indices = env.scene["robot"].find_joints(joint_names) + robot_joint_states = wp.to_torch(env.scene["robot"].data.joint_pos)[:, joint_indices] + + return robot_joint_states + + +def get_all_robot_link_state( + env: ManagerBasedRLEnv, +) -> torch.Tensor: + body_pos_w = wp.to_torch(env.scene["robot"].data.body_link_state_w)[:, :, :] + all_robot_link_pos = body_pos_w + + return all_robot_link_pos diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/pick_place/mdp/pick_place_events.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/pick_place/mdp/pick_place_events.py new file mode 100644 index 00000000000..8ffa7bf4df8 --- /dev/null +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/pick_place/mdp/pick_place_events.py @@ -0,0 +1,95 @@ +# Copyright (c) 2025-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from __future__ import annotations + +import torch +from typing import TYPE_CHECKING + +import isaaclab.utils.math as math_utils +from isaaclab.managers import SceneEntityCfg + +if TYPE_CHECKING: + from isaaclab.envs import ManagerBasedEnv + + +def reset_object_poses_nut_pour( + env: ManagerBasedEnv, + env_ids: torch.Tensor, + pose_range: dict[str, tuple[float, float]], + sorting_beaker_cfg: SceneEntityCfg = SceneEntityCfg("sorting_beaker"), + factory_nut_cfg: SceneEntityCfg = SceneEntityCfg("factory_nut"), + sorting_bowl_cfg: SceneEntityCfg = SceneEntityCfg("sorting_bowl"), + sorting_scale_cfg: SceneEntityCfg = SceneEntityCfg("sorting_scale"), +): + """Reset the asset root states to a random position and orientation uniformly within the given ranges. + + Args: + env: The RL environment instance. + env_ids: The environment IDs to reset the object poses for. + sorting_beaker_cfg: The configuration for the sorting beaker asset. + factory_nut_cfg: The configuration for the factory nut asset. + sorting_bowl_cfg: The configuration for the sorting bowl asset. + sorting_scale_cfg: The configuration for the sorting scale asset. + pose_range: The dictionary of pose ranges for the objects. Keys are + ``x``, ``y``, ``z``, ``roll``, ``pitch``, and ``yaw``. + """ + # extract the used quantities (to enable type-hinting) + sorting_beaker = env.scene[sorting_beaker_cfg.name] + factory_nut = env.scene[factory_nut_cfg.name] + sorting_bowl = env.scene[sorting_bowl_cfg.name] + sorting_scale = env.scene[sorting_scale_cfg.name] + + # get default root state + sorting_beaker_root_states = sorting_beaker.data.default_root_state[env_ids].clone() + factory_nut_root_states = factory_nut.data.default_root_state[env_ids].clone() + sorting_bowl_root_states = sorting_bowl.data.default_root_state[env_ids].clone() + sorting_scale_root_states = sorting_scale.data.default_root_state[env_ids].clone() + + # get pose ranges + range_list = [pose_range.get(key, (0.0, 0.0)) for key in ["x", "y", "z", "roll", "pitch", "yaw"]] + ranges = torch.tensor(range_list, device=sorting_beaker.device) + + # randomize sorting beaker and factory nut together + rand_samples = math_utils.sample_uniform( + ranges[:, 0], ranges[:, 1], (len(env_ids), 6), device=sorting_beaker.device + ) + orientations_delta = math_utils.quat_from_euler_xyz(rand_samples[:, 3], rand_samples[:, 4], rand_samples[:, 5]) + positions_sorting_beaker = ( + sorting_beaker_root_states[:, 0:3] + env.scene.env_origins[env_ids] + rand_samples[:, 0:3] + ) + positions_factory_nut = factory_nut_root_states[:, 0:3] + env.scene.env_origins[env_ids] + rand_samples[:, 0:3] + orientations_sorting_beaker = math_utils.quat_mul(sorting_beaker_root_states[:, 3:7], orientations_delta) + orientations_factory_nut = math_utils.quat_mul(factory_nut_root_states[:, 3:7], orientations_delta) + + # randomize sorting bowl + rand_samples = math_utils.sample_uniform( + ranges[:, 0], ranges[:, 1], (len(env_ids), 6), device=sorting_beaker.device + ) + orientations_delta = math_utils.quat_from_euler_xyz(rand_samples[:, 3], rand_samples[:, 4], rand_samples[:, 5]) + positions_sorting_bowl = sorting_bowl_root_states[:, 0:3] + env.scene.env_origins[env_ids] + rand_samples[:, 0:3] + orientations_sorting_bowl = math_utils.quat_mul(sorting_bowl_root_states[:, 3:7], orientations_delta) + + # randomize scorting scale + rand_samples = math_utils.sample_uniform( + ranges[:, 0], ranges[:, 1], (len(env_ids), 6), device=sorting_beaker.device + ) + orientations_delta = math_utils.quat_from_euler_xyz(rand_samples[:, 3], rand_samples[:, 4], rand_samples[:, 5]) + positions_sorting_scale = sorting_scale_root_states[:, 0:3] + env.scene.env_origins[env_ids] + rand_samples[:, 0:3] + orientations_sorting_scale = math_utils.quat_mul(sorting_scale_root_states[:, 3:7], orientations_delta) + + # set into the physics simulation + sorting_beaker.write_root_pose_to_sim( + torch.cat([positions_sorting_beaker, orientations_sorting_beaker], dim=-1), env_ids=env_ids + ) + factory_nut.write_root_pose_to_sim( + torch.cat([positions_factory_nut, orientations_factory_nut], dim=-1), env_ids=env_ids + ) + sorting_bowl.write_root_pose_to_sim( + torch.cat([positions_sorting_bowl, orientations_sorting_bowl], dim=-1), env_ids=env_ids + ) + sorting_scale.write_root_pose_to_sim( + torch.cat([positions_sorting_scale, orientations_sorting_scale], dim=-1), env_ids=env_ids + ) diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/pick_place/mdp/terminations.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/pick_place/mdp/terminations.py new file mode 100644 index 00000000000..fecca22d04d --- /dev/null +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/pick_place/mdp/terminations.py @@ -0,0 +1,221 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Common functions that can be used to activate certain terminations for the lift task. + +The functions can be passed to the :class:`isaaclab.managers.TerminationTermCfg` object to enable +the termination introduced by the function. +""" + +from __future__ import annotations + +import torch +from typing import TYPE_CHECKING + +import warp as wp + +from isaaclab.assets import RigidObject +from isaaclab.managers import SceneEntityCfg + +if TYPE_CHECKING: + from isaaclab.envs import ManagerBasedRLEnv + + +def task_done_pick_place( + env: ManagerBasedRLEnv, + task_link_name: str = "", + object_cfg: SceneEntityCfg = SceneEntityCfg("object"), + right_wrist_max_x: float = 0.26, + min_x: float = 0.40, + max_x: float = 0.85, + min_y: float = 0.35, + max_y: float = 0.60, + max_height: float = 1.10, + min_vel: float = 0.20, +) -> torch.Tensor: + """Determine if the object placement task is complete. + + This function checks whether all success conditions for the task have been met: + 1. object is within the target x/y range + 2. object is below a minimum height + 3. object velocity is below threshold + 4. Right robot wrist is retracted back towards body (past a given x pos threshold) + + Args: + env: The RL environment instance. + object_cfg: Configuration for the object entity. + right_wrist_max_x: Maximum x position of the right wrist for task completion. + min_x: Minimum x position of the object for task completion. + max_x: Maximum x position of the object for task completion. + min_y: Minimum y position of the object for task completion. + max_y: Maximum y position of the object for task completion. + max_height: Maximum height (z position) of the object for task completion. + min_vel: Minimum velocity magnitude of the object for task completion. + + Returns: + Boolean tensor indicating which environments have completed the task. + """ + if task_link_name == "": + raise ValueError("task_link_name must be provided to task_done_pick_place") + + # Get object entity from the scene + object: RigidObject = env.scene[object_cfg.name] + + # Extract wheel position relative to environment origin + object_root_pos_w = wp.to_torch(object.data.root_pos_w) + object_x = object_root_pos_w[:, 0] - env.scene.env_origins[:, 0] + object_y = object_root_pos_w[:, 1] - env.scene.env_origins[:, 1] + object_height = object_root_pos_w[:, 2] - env.scene.env_origins[:, 2] + object_vel = torch.abs(wp.to_torch(object.data.root_vel_w)) + + # Get right wrist position relative to environment origin + robot_body_pos_w = wp.to_torch(env.scene["robot"].data.body_pos_w) + right_eef_idx = env.scene["robot"].body_names.index(task_link_name) + right_wrist_x = robot_body_pos_w[:, right_eef_idx, 0] - env.scene.env_origins[:, 0] + + # Check all success conditions and combine with logical AND + done = object_x < max_x + done = torch.logical_and(done, object_x > min_x) + done = torch.logical_and(done, object_y < max_y) + done = torch.logical_and(done, object_y > min_y) + done = torch.logical_and(done, object_height < max_height) + done = torch.logical_and(done, right_wrist_x < right_wrist_max_x) + done = torch.logical_and(done, object_vel[:, 0] < min_vel) + done = torch.logical_and(done, object_vel[:, 1] < min_vel) + done = torch.logical_and(done, object_vel[:, 2] < min_vel) + + return done + + +def task_done_nut_pour( + env: ManagerBasedRLEnv, + sorting_scale_cfg: SceneEntityCfg = SceneEntityCfg("sorting_scale"), + sorting_bowl_cfg: SceneEntityCfg = SceneEntityCfg("sorting_bowl"), + sorting_beaker_cfg: SceneEntityCfg = SceneEntityCfg("sorting_beaker"), + factory_nut_cfg: SceneEntityCfg = SceneEntityCfg("factory_nut"), + sorting_bin_cfg: SceneEntityCfg = SceneEntityCfg("black_sorting_bin"), + max_bowl_to_scale_x: float = 0.055, + max_bowl_to_scale_y: float = 0.055, + max_bowl_to_scale_z: float = 0.025, + max_nut_to_bowl_x: float = 0.050, + max_nut_to_bowl_y: float = 0.050, + max_nut_to_bowl_z: float = 0.019, + max_beaker_to_bin_x: float = 0.08, + max_beaker_to_bin_y: float = 0.12, + max_beaker_to_bin_z: float = 0.07, +) -> torch.Tensor: + """Determine if the nut pouring task is complete. + + This function checks whether all success conditions for the task have been met: + 1. The factory nut is in the sorting bowl + 2. The sorting beaker is in the sorting bin + 3. The sorting bowl is placed on the sorting scale + + Args: + env: The RL environment instance. + sorting_scale_cfg: Configuration for the sorting scale entity. + sorting_bowl_cfg: Configuration for the sorting bowl entity. + sorting_beaker_cfg: Configuration for the sorting beaker entity. + factory_nut_cfg: Configuration for the factory nut entity. + sorting_bin_cfg: Configuration for the sorting bin entity. + max_bowl_to_scale_x: Maximum x position of the sorting bowl relative to the sorting scale for task completion. + max_bowl_to_scale_y: Maximum y position of the sorting bowl relative to the sorting scale for task completion. + max_bowl_to_scale_z: Maximum z position of the sorting bowl relative to the sorting scale for task completion. + max_nut_to_bowl_x: Maximum x position of the factory nut relative to the sorting bowl for task completion. + max_nut_to_bowl_y: Maximum y position of the factory nut relative to the sorting bowl for task completion. + max_nut_to_bowl_z: Maximum z position of the factory nut relative to the sorting bowl for task completion. + max_beaker_to_bin_x: Maximum x position of the sorting beaker relative to the sorting bin for task completion. + max_beaker_to_bin_y: Maximum y position of the sorting beaker relative to the sorting bin for task completion. + max_beaker_to_bin_z: Maximum z position of the sorting beaker relative to the sorting bin for task completion. + + Returns: + Boolean tensor indicating which environments have completed the task. + """ + # Get object entities from the scene + sorting_scale: RigidObject = env.scene[sorting_scale_cfg.name] + sorting_bowl: RigidObject = env.scene[sorting_bowl_cfg.name] + factory_nut: RigidObject = env.scene[factory_nut_cfg.name] + sorting_beaker: RigidObject = env.scene[sorting_beaker_cfg.name] + sorting_bin: RigidObject = env.scene[sorting_bin_cfg.name] + + # Get positions relative to environment origin + scale_pos = wp.to_torch(sorting_scale.data.root_pos_w) - env.scene.env_origins + bowl_pos = wp.to_torch(sorting_bowl.data.root_pos_w) - env.scene.env_origins + sorting_beaker_pos = wp.to_torch(sorting_beaker.data.root_pos_w) - env.scene.env_origins + nut_pos = wp.to_torch(factory_nut.data.root_pos_w) - env.scene.env_origins + bin_pos = wp.to_torch(sorting_bin.data.root_pos_w) - env.scene.env_origins + + # nut to bowl + nut_to_bowl_x = torch.abs(nut_pos[:, 0] - bowl_pos[:, 0]) + nut_to_bowl_y = torch.abs(nut_pos[:, 1] - bowl_pos[:, 1]) + nut_to_bowl_z = nut_pos[:, 2] - bowl_pos[:, 2] + + # bowl to scale + bowl_to_scale_x = torch.abs(bowl_pos[:, 0] - scale_pos[:, 0]) + bowl_to_scale_y = torch.abs(bowl_pos[:, 1] - scale_pos[:, 1]) + bowl_to_scale_z = bowl_pos[:, 2] - scale_pos[:, 2] + + # beaker to bin + beaker_to_bin_x = torch.abs(sorting_beaker_pos[:, 0] - bin_pos[:, 0]) + beaker_to_bin_y = torch.abs(sorting_beaker_pos[:, 1] - bin_pos[:, 1]) + beaker_to_bin_z = sorting_beaker_pos[:, 2] - bin_pos[:, 2] + + done = nut_to_bowl_x < max_nut_to_bowl_x + done = torch.logical_and(done, nut_to_bowl_y < max_nut_to_bowl_y) + done = torch.logical_and(done, nut_to_bowl_z < max_nut_to_bowl_z) + done = torch.logical_and(done, bowl_to_scale_x < max_bowl_to_scale_x) + done = torch.logical_and(done, bowl_to_scale_y < max_bowl_to_scale_y) + done = torch.logical_and(done, bowl_to_scale_z < max_bowl_to_scale_z) + done = torch.logical_and(done, beaker_to_bin_x < max_beaker_to_bin_x) + done = torch.logical_and(done, beaker_to_bin_y < max_beaker_to_bin_y) + done = torch.logical_and(done, beaker_to_bin_z < max_beaker_to_bin_z) + + return done + + +def task_done_exhaust_pipe( + env: ManagerBasedRLEnv, + blue_exhaust_pipe_cfg: SceneEntityCfg = SceneEntityCfg("blue_exhaust_pipe"), + blue_sorting_bin_cfg: SceneEntityCfg = SceneEntityCfg("blue_sorting_bin"), + max_blue_exhaust_to_bin_x: float = 0.085, + max_blue_exhaust_to_bin_y: float = 0.200, + min_blue_exhaust_to_bin_y: float = -0.090, + max_blue_exhaust_to_bin_z: float = 0.070, +) -> torch.Tensor: + """Determine if the exhaust pipe task is complete. + + This function checks whether all success conditions for the task have been met: + 1. The blue exhaust pipe is placed in the correct position + + Args: + env: The RL environment instance. + blue_exhaust_pipe_cfg: Configuration for the blue exhaust pipe entity. + blue_sorting_bin_cfg: Configuration for the blue sorting bin entity. + max_blue_exhaust_to_bin_x: Maximum x position of the blue exhaust pipe relative to the blue sorting bin for task completion. + max_blue_exhaust_to_bin_y: Maximum y position of the blue exhaust pipe relative to the blue sorting bin for task completion. + max_blue_exhaust_to_bin_z: Maximum z position of the blue exhaust pipe relative to the blue sorting bin for task completion. + + Returns: + Boolean tensor indicating which environments have completed the task. + """ + # Get object entities from the scene + blue_exhaust_pipe: RigidObject = env.scene[blue_exhaust_pipe_cfg.name] + blue_sorting_bin: RigidObject = env.scene[blue_sorting_bin_cfg.name] + + # Get positions relative to environment origin + blue_exhaust_pipe_pos = wp.to_torch(blue_exhaust_pipe.data.root_pos_w) - env.scene.env_origins + blue_sorting_bin_pos = wp.to_torch(blue_sorting_bin.data.root_pos_w) - env.scene.env_origins + + # blue exhaust to bin + blue_exhaust_to_bin_x = torch.abs(blue_exhaust_pipe_pos[:, 0] - blue_sorting_bin_pos[:, 0]) + blue_exhaust_to_bin_y = blue_exhaust_pipe_pos[:, 1] - blue_sorting_bin_pos[:, 1] + blue_exhaust_to_bin_z = blue_exhaust_pipe_pos[:, 2] - blue_sorting_bin_pos[:, 2] + + done = blue_exhaust_to_bin_x < max_blue_exhaust_to_bin_x + done = torch.logical_and(done, blue_exhaust_to_bin_y < max_blue_exhaust_to_bin_y) + done = torch.logical_and(done, blue_exhaust_to_bin_y > min_blue_exhaust_to_bin_y) + done = torch.logical_and(done, blue_exhaust_to_bin_z < max_blue_exhaust_to_bin_z) + + return done diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/pick_place/pickplace_gr1t2_env_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/pick_place/pickplace_gr1t2_env_cfg.py new file mode 100644 index 00000000000..2128004cd4a --- /dev/null +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/pick_place/pickplace_gr1t2_env_cfg.py @@ -0,0 +1,443 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +import tempfile + +from pink.tasks import DampingTask, FrameTask + +# import isaaclab.controllers.utils as ControllerUtils +import isaaclab.envs.mdp as base_mdp +import isaaclab.sim as sim_utils +from isaaclab.assets import ArticulationCfg, AssetBaseCfg +from isaaclab.controllers.pink_ik import NullSpacePostureTask, PinkIKControllerCfg + +# TODO: comment back in when XR is supported again +# from isaaclab.devices.device_base import DevicesCfg +# from isaaclab.devices.openxr import ManusViveCfg, OpenXRDeviceCfg, XrCfg +# from isaaclab.devices.openxr.retargeters.humanoid.fourier.gr1t2_retargeter import GR1T2RetargeterCfg +from isaaclab.envs import ManagerBasedRLEnvCfg +from isaaclab.envs.mdp.actions.pink_actions_cfg import PinkInverseKinematicsActionCfg +from isaaclab.managers import EventTermCfg as EventTerm +from isaaclab.managers import ObservationGroupCfg as ObsGroup +from isaaclab.managers import ObservationTermCfg as ObsTerm +from isaaclab.managers import SceneEntityCfg +from isaaclab.managers import TerminationTermCfg as DoneTerm +from isaaclab.scene import InteractiveSceneCfg +from isaaclab.sim import SimulationCfg +from isaaclab.sim._impl.newton_manager_cfg import NewtonCfg +from isaaclab.sim._impl.solvers_cfg import MJWarpSolverCfg +from isaaclab.sim.spawners.from_files.from_files_cfg import GroundPlaneCfg +from isaaclab.utils import configclass + +from . import mdp + +from isaaclab_assets.robots.fourier import GR1T2_HIGH_PD_CFG # isort: skip + + +## +# Scene definition +## +@configclass +class ObjectTableSceneCfg(InteractiveSceneCfg): + + # Table + # TODO: comment back in when packing table asset can be loaded + # packing_table = AssetBaseCfg( + # prim_path="/World/envs/env_.*/PackingTable", + # init_state=AssetBaseCfg.InitialStateCfg(pos=[0.0, 0.55, 0.0], rot=[1.0, 0.0, 0.0, 0.0]), + # spawn=UsdFileCfg( + # usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/PackingTable/packing_table.usd", + # rigid_props=sim_utils.RigidBodyPropertiesCfg(kinematic_enabled=True), + # ), + # ) + + # TODO: comment back in when streering wheel asset can be loaded + # object = RigidObjectCfg( + # prim_path="{ENV_REGEX_NS}/Object", + # init_state=RigidObjectCfg.InitialStateCfg(pos=[-0.45, 0.45, 0.9996], rot=[1, 0, 0, 0]), + # spawn=UsdFileCfg( + # usd_path=f"{ISAACLAB_NUCLEUS_DIR}/Mimic/pick_place_task/pick_place_assets/steering_wheel.usd", + # scale=(0.75, 0.75, 0.75), + # rigid_props=sim_utils.RigidBodyPropertiesCfg(), + # ), + # ) + + # Humanoid robot configured for pick-place manipulation tasks + robot: ArticulationCfg = GR1T2_HIGH_PD_CFG.replace( + # prim_path="/World/envs/env_.*/Robot", + prim_path="{ENV_REGEX_NS}/Robot", + init_state=ArticulationCfg.InitialStateCfg( + pos=(0, 0, 0.93), + # rot=(0.0, 0.0, 0.0, 1.0), # Identity quaternion (no rotation) + rot=(0, 0, 0.7071, 0.7071), + joint_pos={ + # right-arm + "right_shoulder_pitch_joint": 0.0, + "right_shoulder_roll_joint": 0.0, + "right_shoulder_yaw_joint": 0.0, + "right_elbow_pitch_joint": -1.5708, + "right_wrist_yaw_joint": 0.0, + "right_wrist_roll_joint": 0.0, + "right_wrist_pitch_joint": 0.0, + # left-arm + "left_shoulder_pitch_joint": 0.0, + "left_shoulder_roll_joint": 0.0, + "left_shoulder_yaw_joint": 0.0, + "left_elbow_pitch_joint": -1.5708, + "left_wrist_yaw_joint": 0.0, + "left_wrist_roll_joint": 0.0, + "left_wrist_pitch_joint": 0.0, + # -- + "head_.*": 0.0, + "waist_.*": 0.0, + ".*_hip_.*": 0.0, + ".*_knee_.*": 0.0, + ".*_ankle_.*": 0.0, + "R_.*": 0.0, + "L_.*": 0.0, + }, + joint_vel={".*": 0.0}, + ), + ) + + # Ground plane + ground = AssetBaseCfg( + prim_path="/World/GroundPlane", + spawn=GroundPlaneCfg(), + ) + + # Lights + light = AssetBaseCfg( + prim_path="/World/light", + spawn=sim_utils.DomeLightCfg(color=(0.75, 0.75, 0.75), intensity=3000.0), + ) + + +## +# MDP settings +## +@configclass +class ActionsCfg: + """Action specifications for the MDP.""" + + upper_body_ik = PinkInverseKinematicsActionCfg( + pink_controlled_joint_names=[ + "left_shoulder_pitch_joint", + "left_shoulder_roll_joint", + "left_shoulder_yaw_joint", + "left_elbow_pitch_joint", + "left_wrist_yaw_joint", + "left_wrist_roll_joint", + "left_wrist_pitch_joint", + "right_shoulder_pitch_joint", + "right_shoulder_roll_joint", + "right_shoulder_yaw_joint", + "right_elbow_pitch_joint", + "right_wrist_yaw_joint", + "right_wrist_roll_joint", + "right_wrist_pitch_joint", + ], + hand_joint_names=[ + "L_index_proximal_joint", + "L_middle_proximal_joint", + "L_pinky_proximal_joint", + "L_ring_proximal_joint", + "L_thumb_proximal_yaw_joint", + "R_index_proximal_joint", + "R_middle_proximal_joint", + "R_pinky_proximal_joint", + "R_ring_proximal_joint", + "R_thumb_proximal_yaw_joint", + "L_index_intermediate_joint", + "L_middle_intermediate_joint", + "L_pinky_intermediate_joint", + "L_ring_intermediate_joint", + "L_thumb_proximal_pitch_joint", + "R_index_intermediate_joint", + "R_middle_intermediate_joint", + "R_pinky_intermediate_joint", + "R_ring_intermediate_joint", + "R_thumb_proximal_pitch_joint", + "L_thumb_distal_joint", + "R_thumb_distal_joint", + ], + target_eef_link_names={ + "left_wrist": "left_hand_pitch_link", + "right_wrist": "right_hand_pitch_link", + }, + # the robot in the sim scene we are controlling + asset_name="robot", + # Configuration for the IK controller + # The frames names are the ones present in the URDF file + # The urdf has to be generated from the USD that is being used in the scene + controller=PinkIKControllerCfg( + articulation_name="robot", + base_link_name="base_link", + num_hand_joints=22, + show_ik_warnings=False, + fail_on_joint_limit_violation=False, # Determines whether to pink solver will fail due to a joint limit violation + variable_input_tasks=[ + FrameTask( + "GR1T2_fourier_hand_6dof_left_hand_pitch_link", + position_cost=8.0, # [cost] / [m] + orientation_cost=1.0, # [cost] / [rad] + lm_damping=12, # dampening for solver for step jumps + gain=0.5, + ), + FrameTask( + "GR1T2_fourier_hand_6dof_right_hand_pitch_link", + position_cost=8.0, # [cost] / [m] + orientation_cost=1.0, # [cost] / [rad] + lm_damping=12, # dampening for solver for step jumps + gain=0.5, + ), + DampingTask( + cost=0.5, # [cost] * [s] / [rad] + ), + NullSpacePostureTask( + cost=0.5, + lm_damping=1, + controlled_frames=[ + "GR1T2_fourier_hand_6dof_left_hand_pitch_link", + "GR1T2_fourier_hand_6dof_right_hand_pitch_link", + ], + controlled_joints=[ + "left_shoulder_pitch_joint", + "left_shoulder_roll_joint", + "left_shoulder_yaw_joint", + "left_elbow_pitch_joint", + "right_shoulder_pitch_joint", + "right_shoulder_roll_joint", + "right_shoulder_yaw_joint", + "right_elbow_pitch_joint", + "waist_yaw_joint", + "waist_pitch_joint", + "waist_roll_joint", + ], + ), + ], + fixed_input_tasks=[], + ), + ) + + +@configclass +class ObservationsCfg: + """Observation specifications for the MDP.""" + + @configclass + class PolicyCfg(ObsGroup): + """Observations for policy group with state values.""" + + actions = ObsTerm(func=mdp.last_action) + robot_joint_pos = ObsTerm( + func=base_mdp.joint_pos, + params={"asset_cfg": SceneEntityCfg("robot")}, + ) + robot_root_pos = ObsTerm(func=base_mdp.root_pos_w, params={"asset_cfg": SceneEntityCfg("robot")}) + robot_root_rot = ObsTerm(func=base_mdp.root_quat_w, params={"asset_cfg": SceneEntityCfg("robot")}) + # TODO: comment back in when streering wheel asset can be loaded + # object_pos = ObsTerm(func=base_mdp.root_pos_w, params={"asset_cfg": SceneEntityCfg("object")}) + # object_rot = ObsTerm(func=base_mdp.root_quat_w, params={"asset_cfg": SceneEntityCfg("object")}) + robot_links_state = ObsTerm(func=mdp.get_all_robot_link_state) + + left_eef_pos = ObsTerm(func=mdp.get_eef_pos, params={"link_name": "left_hand_roll_link"}) + left_eef_quat = ObsTerm(func=mdp.get_eef_quat, params={"link_name": "left_hand_roll_link"}) + right_eef_pos = ObsTerm(func=mdp.get_eef_pos, params={"link_name": "right_hand_roll_link"}) + right_eef_quat = ObsTerm(func=mdp.get_eef_quat, params={"link_name": "right_hand_roll_link"}) + + hand_joint_state = ObsTerm(func=mdp.get_robot_joint_state, params={"joint_names": ["R_.*", "L_.*"]}) + head_joint_state = ObsTerm( + func=mdp.get_robot_joint_state, + params={"joint_names": ["head_pitch_joint", "head_roll_joint", "head_yaw_joint"]}, + ) + + # TODO: comment back in when streering wheel asset can be loaded + # object = ObsTerm( + # func=mdp.object_obs, + # params={"left_eef_link_name": "left_hand_roll_link", "right_eef_link_name": "right_hand_roll_link"}, + # ) + + def __post_init__(self): + self.enable_corruption = False + self.concatenate_terms = False + + # observation groups + policy: PolicyCfg = PolicyCfg() + + +@configclass +class TerminationsCfg: + """Termination terms for the MDP.""" + + time_out = DoneTerm(func=mdp.time_out, time_out=True) + + # TODO: comment back in when streering wheel asset can be loaded + # object_dropping = DoneTerm( + # func=mdp.root_height_below_minimum, params={"minimum_height": 0.5, "asset_cfg": SceneEntityCfg("object")} + # ) + + # success = DoneTerm(func=mdp.task_done_pick_place, params={"task_link_name": "right_hand_roll_link"}) + + +@configclass +class EventCfg: + """Configuration for events.""" + + reset_all = EventTerm(func=mdp.reset_scene_to_default, mode="reset") + + # TODO: comment back in when streering wheel asset can be loaded + # reset_object = EventTerm( + # func=mdp.reset_root_state_uniform, + # mode="reset", + # params={ + # "pose_range": { + # "x": [-0.01, 0.01], + # "y": [-0.01, 0.01], + # }, + # "velocity_range": {}, + # "asset_cfg": SceneEntityCfg("object"), + # }, + # ) + + +@configclass +class PickPlaceGR1T2EnvCfg(ManagerBasedRLEnvCfg): + """Configuration for the GR1T2 environment.""" + + sim: SimulationCfg = SimulationCfg( + newton_cfg=NewtonCfg( + solver_cfg=MJWarpSolverCfg( + njmax=50, + nconmax=50, + cone="elliptic", + ls_parallel=True, + impratio=100, + iterations=100, + ls_iterations=50, + integrator="implicit", + solver="newton", + ), + ) + ) + + # Scene settings + scene: ObjectTableSceneCfg = ObjectTableSceneCfg(num_envs=1, env_spacing=2.5, replicate_physics=True) + # Basic settings + observations: ObservationsCfg = ObservationsCfg() + actions: ActionsCfg = ActionsCfg() + # MDP settings + terminations: TerminationsCfg = TerminationsCfg() + events = EventCfg() + + # Unused managers + commands = None + rewards = None + curriculum = None + + # TODO: comment back in when XR is supported again + # # Position of the XR anchor in the world frame + # xr: XrCfg = XrCfg( + # anchor_pos=(0.0, 0.0, 0.0), + # anchor_rot=(1.0, 0.0, 0.0, 0.0), + # ) + + # # OpenXR hand tracking has 26 joints per hand + # NUM_OPENXR_HAND_JOINTS = 26 + + # Temporary directory for URDF files + temp_urdf_dir = tempfile.gettempdir() + + # Idle action to hold robot in default pose + # Action format: [left arm pos (3), left arm quat (4), right arm pos (3), right arm quat (4), + # left hand joint pos (11), right hand joint pos (11)] + # idle_action = torch.tensor([ + # -0.22878, + # 0.2536, + # 1.0953, + # 0.5, + # 0.5, + # -0.5, + # 0.5, + # 0.22878, + # 0.2536, + # 1.0953, + # 0.5, + # 0.5, + # -0.5, + # 0.5, + # 0.0, + # 0.0, + # 0.0, + # 0.0, + # 0.0, + # 0.0, + # 0.0, + # 0.0, + # 0.0, + # 0.0, + # 0.0, + # 0.0, + # 0.0, + # 0.0, + # 0.0, + # 0.0, + # 0.0, + # 0.0, + # 0.0, + # 0.0, + # 0.0, + # 0.0, + # ]) + + def __post_init__(self): + """Post initialization.""" + # general settings + self.decimation = 6 + self.episode_length_s = 20.0 + # simulation settings + self.sim.dt = 1 / 120 # 120Hz + self.sim.render_interval = 2 + + # Convert USD to URDF and change revolute joints to fixed + # temp_urdf_output_path, temp_urdf_meshes_output_path = ControllerUtils.convert_usd_to_urdf( + # self.scene.robot.spawn.usd_path, self.temp_urdf_dir, force_conversion=True + # ) + + # Set the URDF and mesh paths for the IK controller + self.actions.upper_body_ik.controller.urdf_path = "/tmp/urdf/GR1T2_fourier_hand_6dof.urdf" + # self.actions.upper_body_ik.controller.urdf_path = temp_urdf_output_path + # self.actions.upper_body_ik.controller.mesh_path = temp_urdf_meshes_output_path + + # TODO: disabling teleop devices until it is supported again + # self.teleop_devices = DevicesCfg( + # devices={ + # "handtracking": OpenXRDeviceCfg( + # retargeters=[ + # GR1T2RetargeterCfg( + # enable_visualization=True, + # # number of joints in both hands + # num_open_xr_hand_joints=2 * self.NUM_OPENXR_HAND_JOINTS, + # sim_device=self.sim.device, + # hand_joint_names=self.actions.upper_body_ik.hand_joint_names, + # ), + # ], + # sim_device=self.sim.device, + # xr_cfg=self.xr, + # ), + # "manusvive": ManusViveCfg( + # retargeters=[ + # GR1T2RetargeterCfg( + # enable_visualization=True, + # num_open_xr_hand_joints=2 * 26, + # sim_device=self.sim.device, + # hand_joint_names=self.actions.upper_body_ik.hand_joint_names, + # ), + # ], + # sim_device=self.sim.device, + # xr_cfg=self.xr, + # ), + # } + # ) diff --git a/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/pick_place/pickplace_gr1t2_waist_enabled_env_cfg.py b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/pick_place/pickplace_gr1t2_waist_enabled_env_cfg.py new file mode 100644 index 00000000000..f51684b5c2a --- /dev/null +++ b/source/isaaclab_tasks/isaaclab_tasks/manager_based/manipulation/pick_place/pickplace_gr1t2_waist_enabled_env_cfg.py @@ -0,0 +1,57 @@ +# Copyright (c) 2022-2026, The Isaac Lab Project Developers (https://github.com/isaac-sim/IsaacLab/blob/main/CONTRIBUTORS.md). +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +import tempfile + +import isaaclab.controllers.utils as ControllerUtils +from isaaclab.envs import ManagerBasedRLEnvCfg +from isaaclab.utils import configclass + +from .pickplace_gr1t2_env_cfg import ActionsCfg, EventCfg, ObjectTableSceneCfg, ObservationsCfg, TerminationsCfg + + +@configclass +class PickPlaceGR1T2WaistEnabledEnvCfg(ManagerBasedRLEnvCfg): + """Configuration for the GR1T2 environment.""" + + # Scene settings + scene: ObjectTableSceneCfg = ObjectTableSceneCfg(num_envs=1, env_spacing=2.5, replicate_physics=True) + # Basic settings + observations: ObservationsCfg = ObservationsCfg() + actions: ActionsCfg = ActionsCfg() + # MDP settings + terminations: TerminationsCfg = TerminationsCfg() + events = EventCfg() + + # Unused managers + commands = None + rewards = None + curriculum = None + + # Temporary directory for URDF files + temp_urdf_dir = tempfile.gettempdir() + + def __post_init__(self): + """Post initialization.""" + # general settings + self.decimation = 6 + self.episode_length_s = 20.0 + # simulation settings + self.sim.dt = 1 / 120 # 120Hz + self.sim.render_interval = 2 + + # Add waist joint to pink_ik_cfg + waist_joint_names = ["waist_yaw_joint", "waist_pitch_joint", "waist_roll_joint"] + for joint_name in waist_joint_names: + self.actions.upper_body_ik.pink_controlled_joint_names.append(joint_name) + + # Convert USD to URDF and change revolute joints to fixed + temp_urdf_output_path, temp_urdf_meshes_output_path = ControllerUtils.convert_usd_to_urdf( + self.scene.robot.spawn.usd_path, self.temp_urdf_dir, force_conversion=True + ) + + # Set the URDF and mesh paths for the IK controller + self.actions.upper_body_ik.controller.urdf_path = temp_urdf_output_path + self.actions.upper_body_ik.controller.mesh_path = temp_urdf_meshes_output_path