Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
195 changes: 136 additions & 59 deletions scripts/benchmarks/benchmark_non_rl.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,10 +12,8 @@
import sys
import time

from isaaclab.app import AppLauncher

# add argparse arguments
parser = argparse.ArgumentParser(description="Train an RL agent with RL-Games.")
parser = argparse.ArgumentParser(description="Benchmark non-RL environment.")
parser.add_argument("--video", action="store_true", default=False, help="Record videos during training.")
parser.add_argument("--video_length", type=int, default=200, help="Length of the recorded video (in steps).")
parser.add_argument("--video_interval", type=int, default=2000, help="Interval between video recordings (in steps).")
Expand All @@ -34,11 +32,22 @@
help="Benchmarking backend options, defaults OmniPerfKPIFile",
)
parser.add_argument("--output_folder", type=str, default=None, help="Output folder for the benchmark.")
parser.add_argument(
"--kit",
action="store_true",
default=False,
help="Enable Isaac Sim Kit and use isaacsim.benchmark.services. Default: False (uses standalone benchmark).",
)

# Conditionally add AppLauncher args only if --kit is enabled
if "--kit" in sys.argv:
from isaaclab.app import AppLauncher

AppLauncher.add_app_launcher_args(parser)

# append AppLauncher cli args
AppLauncher.add_app_launcher_args(parser)
# parse the arguments
args_cli, hydra_args = parser.parse_known_args()

# always enable cameras to record video
if args_cli.video:
args_cli.enable_cameras = True
Expand All @@ -48,49 +57,68 @@

app_start_time_begin = time.perf_counter_ns()

# launch omniverse app
app_launcher = AppLauncher(args_cli)
simulation_app = app_launcher.app
# Conditionally launch Isaac Sim
simulation_app = None
app_launcher = None
if args_cli.kit:
# Force Omniverse mode by setting environment variable
# This ensures SimulationApp is launched even without explicit visualizers
os.environ["LAUNCH_OV_APP"] = "1"

# launch omniverse app
app_launcher = AppLauncher(args_cli)
simulation_app = app_launcher.app

app_start_time_end = time.perf_counter_ns()

"""Rest everything follows."""

# enable benchmarking extension
from isaacsim.core.utils.extensions import enable_extension
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), "../.."))

enable_extension("isaacsim.benchmark.services")
# Import benchmark infrastructure based on kit flag
if args_cli.kit:
# enable benchmarking extension
from isaacsim.core.utils.extensions import enable_extension

# Set the benchmark settings according to the inputs
import carb
enable_extension("isaacsim.benchmark.services")

settings = carb.settings.get_settings()
settings.set("/exts/isaacsim.benchmark.services/metrics/metrics_output_folder", args_cli.output_folder)
settings.set("/exts/isaacsim.benchmark.services/metrics/randomize_filename_prefix", True)
# Set the benchmark settings according to the inputs
import carb

from isaacsim.benchmark.services import BaseIsaacBenchmark
settings = carb.settings.get_settings()
settings.set("/exts/isaacsim.benchmark.services/metrics/metrics_output_folder", args_cli.output_folder)
settings.set("/exts/isaacsim.benchmark.services/metrics/randomize_filename_prefix", True)

sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), "../.."))
from isaacsim.benchmark.services import BaseIsaacBenchmark

from isaaclab.utils.timer import Timer
from scripts.benchmarks.utils import (
get_isaaclab_version,
get_mujoco_warp_version,
get_newton_version,
log_app_start_time,
log_python_imports_time,
log_runtime_step_times,
log_scene_creation_time,
log_simulation_start_time,
log_task_start_time,
log_total_start_time,
)
from scripts.benchmarks.utils.benchmark_utils import create_kit_logging_functions, get_timer_value

# Get all logging functions for kit mode
log_funcs = create_kit_logging_functions()
else:
# Use standalone benchmark services
from scripts.benchmarks.utils.benchmark_utils import create_standalone_logging_functions, get_timer_value
from scripts.benchmarks.utils.standalone_benchmark import StandaloneBenchmark

# Get all logging functions for standalone mode
log_funcs = create_standalone_logging_functions()

# Extract individual functions from the dictionary for easier use
get_isaaclab_version = log_funcs["get_isaaclab_version"]
get_mujoco_warp_version = log_funcs["get_mujoco_warp_version"]
get_newton_version = log_funcs["get_newton_version"]
log_app_start_time = log_funcs["log_app_start_time"]
log_python_imports_time = log_funcs["log_python_imports_time"]
log_task_start_time = log_funcs["log_task_start_time"]
log_scene_creation_time = log_funcs["log_scene_creation_time"]
log_simulation_start_time = log_funcs["log_simulation_start_time"]
log_total_start_time = log_funcs["log_total_start_time"]
log_runtime_step_times = log_funcs["log_runtime_step_times"]

imports_time_begin = time.perf_counter_ns()

import gymnasium as gym
import numpy as np
import os
import torch
from datetime import datetime

Expand All @@ -104,21 +132,40 @@


# Create the benchmark
benchmark = BaseIsaacBenchmark(
benchmark_name="benchmark_non_rl",
workflow_metadata={
"metadata": [
{"name": "task", "data": args_cli.task},
{"name": "seed", "data": args_cli.seed},
{"name": "num_envs", "data": args_cli.num_envs},
{"name": "num_frames", "data": args_cli.num_frames},
{"name": "Mujoco Warp Info", "data": get_mujoco_warp_version()},
{"name": "Isaac Lab Info", "data": get_isaaclab_version()},
{"name": "Newton Info", "data": get_newton_version()},
]
},
backend_type=args_cli.benchmark_backend,
)
if args_cli.kit:
benchmark = BaseIsaacBenchmark(
benchmark_name="benchmark_non_rl",
workflow_metadata={
"metadata": [
{"name": "task", "data": args_cli.task},
{"name": "seed", "data": args_cli.seed},
{"name": "num_envs", "data": args_cli.num_envs},
{"name": "num_frames", "data": args_cli.num_frames},
{"name": "Mujoco Warp Info", "data": get_mujoco_warp_version()},
{"name": "Isaac Lab Info", "data": get_isaaclab_version()},
{"name": "Newton Info", "data": get_newton_version()},
]
},
backend_type=args_cli.benchmark_backend,
)
else:
benchmark = StandaloneBenchmark(
benchmark_name="benchmark_non_rl",
workflow_metadata={
"metadata": [
{"name": "task", "data": args_cli.task},
{"name": "seed", "data": args_cli.seed},
{"name": "num_envs", "data": args_cli.num_envs},
{"name": "num_frames", "data": args_cli.num_frames},
{"name": "Mujoco Warp Info", "data": get_mujoco_warp_version()},
{"name": "Isaac Lab Info", "data": get_isaaclab_version()},
{"name": "Newton Info", "data": get_newton_version()},
]
},
backend_type=args_cli.benchmark_backend,
output_folder=args_cli.output_folder,
randomize_filename_prefix=True,
)


@hydra_task_config(args_cli.task, None)
Expand All @@ -127,23 +174,27 @@ def main(env_cfg: ManagerBasedRLEnvCfg | DirectRLEnvCfg, agent_cfg: dict):

# override configurations with non-hydra CLI arguments
env_cfg.scene.num_envs = args_cli.num_envs if args_cli.num_envs is not None else env_cfg.scene.num_envs
env_cfg.sim.device = args_cli.device if args_cli.device is not None else env_cfg.sim.device
if args_cli.kit and hasattr(args_cli, "device") and args_cli.device is not None:
env_cfg.sim.device = args_cli.device

# process distributed
world_size = 1
world_rank = 0
if args_cli.distributed:
env_cfg.sim.device = f"cuda:{app_launcher.local_rank}"
world_size = int(os.getenv("WORLD_SIZE", 1))
world_rank = app_launcher.global_rank
if args_cli.kit:
env_cfg.sim.device = f"cuda:{app_launcher.local_rank}"
world_size = int(os.getenv("WORLD_SIZE", 1))
world_rank = app_launcher.global_rank
else:
print("[WARNING] Distributed mode is only supported with --kit flag.")

task_startup_time_begin = time.perf_counter_ns()

# create isaac environment
env = gym.make(args_cli.task, cfg=env_cfg, render_mode="rgb_array" if args_cli.video else None)
# wrap for video recording
if args_cli.video:
log_root_path = os.path.abs(f"benchmark/{args_cli.task}")
log_root_path = os.path.abspath(f"benchmark/{args_cli.task}")
log_dir = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
video_kwargs = {
"video_folder": os.path.join(log_root_path, log_dir, "videos"),
Expand All @@ -165,7 +216,30 @@ def main(env_cfg: ManagerBasedRLEnvCfg | DirectRLEnvCfg, agent_cfg: dict):
num_frames = 0
# log frame times
step_times = []
while simulation_app.is_running():

# Run loop depends on whether we're using kit or not
if args_cli.kit:
while simulation_app.is_running():
while num_frames < args_cli.num_frames:
# get upper and lower bounds of action space, sample actions randomly on this interval
action_high = 1
action_low = -1
actions = (action_high - action_low) * torch.rand(
env.unwrapped.num_envs, env.unwrapped.single_action_space.shape[0], device=env.unwrapped.device
) - action_high

# env stepping
env_step_time_begin = time.perf_counter_ns()
_ = env.step(actions)
end_step_time_end = time.perf_counter_ns()
step_times.append(end_step_time_end - env_step_time_begin)

num_frames += 1

# terminate
break
else:
# Standalone mode - simple loop
while num_frames < args_cli.num_frames:
# get upper and lower bounds of action space, sample actions randomly on this interval
action_high = 1
Expand All @@ -182,9 +256,6 @@ def main(env_cfg: ManagerBasedRLEnvCfg | DirectRLEnvCfg, agent_cfg: dict):

num_frames += 1

# terminate
break

if world_rank == 0:
benchmark.store_measurements()

Expand All @@ -203,8 +274,13 @@ def main(env_cfg: ManagerBasedRLEnvCfg | DirectRLEnvCfg, agent_cfg: dict):
log_app_start_time(benchmark, (app_start_time_end - app_start_time_begin) / 1e6)
log_python_imports_time(benchmark, (imports_time_end - imports_time_begin) / 1e6)
log_task_start_time(benchmark, (task_startup_time_end - task_startup_time_begin) / 1e6)
log_scene_creation_time(benchmark, Timer.get_timer_info("scene_creation") * 1000)
log_simulation_start_time(benchmark, Timer.get_timer_info("simulation_start") * 1000)

# Timer may not be available in standalone mode
scene_creation_time = get_timer_value("scene_creation")
simulation_start_time = get_timer_value("simulation_start")

log_scene_creation_time(benchmark, scene_creation_time * 1000 if scene_creation_time else None)
log_simulation_start_time(benchmark, simulation_start_time * 1000 if simulation_start_time else None)
log_total_start_time(benchmark, (task_startup_time_end - app_start_time_begin) / 1e6)
log_runtime_step_times(benchmark, environment_step_times, compute_stats=True)

Expand All @@ -218,4 +294,5 @@ def main(env_cfg: ManagerBasedRLEnvCfg | DirectRLEnvCfg, agent_cfg: dict):
# run the main function
main()
# close sim app
simulation_app.close()
if simulation_app is not None:
simulation_app.close()
Loading
Loading