From 0e844983f7e33cc64c895c30e7508b23ed5e4b96 Mon Sep 17 00:00:00 2001 From: UWLab BOT Date: Sun, 15 Mar 2026 18:17:18 -0700 Subject: [PATCH] Prepares pre-merge --- .vscode/.gitignore | 8 +- .vscode/extensions.json | 16 + .vscode/tasks.json | 26 + .vscode/tools/settings.template.json | 108 ++++ scripts/tutorials/00_sim/create_empty.py | 61 +++ scripts/tutorials/00_sim/launch_app.py | 96 ++++ scripts/tutorials/00_sim/log_time.py | 84 +++ .../tutorials/00_sim/set_rendering_mode.py | 83 +++ scripts/tutorials/00_sim/spawn_prims.py | 116 +++++ scripts/tutorials/01_assets/add_new_robot.py | 179 +++++++ .../tutorials/01_assets/run_articulation.py | 143 ++++++ .../01_assets/run_deformable_object.py | 168 ++++++ .../tutorials/01_assets/run_rigid_object.py | 148 ++++++ .../01_assets/run_surface_gripper.py | 183 +++++++ scripts/tutorials/02_scene/create_scene.py | 132 +++++ .../03_envs/create_cartpole_base_env.py | 174 +++++++ .../tutorials/03_envs/create_cube_base_env.py | 353 +++++++++++++ .../03_envs/create_quadruped_base_env.py | 245 +++++++++ .../03_envs/policy_inference_in_usd.py | 87 ++++ .../tutorials/03_envs/run_cartpole_rl_env.py | 79 +++ .../04_sensors/add_sensors_on_robot.py | 179 +++++++ .../04_sensors/run_frame_transformer.py | 185 +++++++ .../tutorials/04_sensors/run_ray_caster.py | 151 ++++++ .../04_sensors/run_ray_caster_camera.py | 184 +++++++ .../tutorials/04_sensors/run_usd_camera.py | 289 +++++++++++ .../tutorials/05_controllers/run_diff_ik.py | 212 ++++++++ scripts/tutorials/05_controllers/run_osc.py | 484 ++++++++++++++++++ 27 files changed, 4169 insertions(+), 4 deletions(-) create mode 100644 .vscode/extensions.json create mode 100644 .vscode/tasks.json create mode 100644 .vscode/tools/settings.template.json create mode 100644 scripts/tutorials/00_sim/create_empty.py create mode 100644 scripts/tutorials/00_sim/launch_app.py create mode 100644 scripts/tutorials/00_sim/log_time.py create mode 100644 scripts/tutorials/00_sim/set_rendering_mode.py create mode 100644 scripts/tutorials/00_sim/spawn_prims.py create mode 100644 scripts/tutorials/01_assets/add_new_robot.py create mode 100644 scripts/tutorials/01_assets/run_articulation.py create mode 100644 scripts/tutorials/01_assets/run_deformable_object.py create mode 100644 scripts/tutorials/01_assets/run_rigid_object.py create mode 100644 scripts/tutorials/01_assets/run_surface_gripper.py create mode 100644 scripts/tutorials/02_scene/create_scene.py create mode 100644 scripts/tutorials/03_envs/create_cartpole_base_env.py create mode 100644 scripts/tutorials/03_envs/create_cube_base_env.py create mode 100644 scripts/tutorials/03_envs/create_quadruped_base_env.py create mode 100644 scripts/tutorials/03_envs/policy_inference_in_usd.py create mode 100644 scripts/tutorials/03_envs/run_cartpole_rl_env.py create mode 100644 scripts/tutorials/04_sensors/add_sensors_on_robot.py create mode 100644 scripts/tutorials/04_sensors/run_frame_transformer.py create mode 100644 scripts/tutorials/04_sensors/run_ray_caster.py create mode 100644 scripts/tutorials/04_sensors/run_ray_caster_camera.py create mode 100644 scripts/tutorials/04_sensors/run_usd_camera.py create mode 100644 scripts/tutorials/05_controllers/run_diff_ik.py create mode 100644 scripts/tutorials/05_controllers/run_osc.py diff --git a/.vscode/.gitignore b/.vscode/.gitignore index 290b1bd9..236a8546 100644 --- a/.vscode/.gitignore +++ b/.vscode/.gitignore @@ -1,10 +1,10 @@ +# Ignore all other files +.python.env +*.json + # Note: These files are kept for development purposes only. !tools/settings.template.json !tools/setup_vscode.py !extensions.json !launch.json !tasks.json - -# Ignore all other files -.python.env -*.json diff --git a/.vscode/extensions.json b/.vscode/extensions.json new file mode 100644 index 00000000..aa5de15a --- /dev/null +++ b/.vscode/extensions.json @@ -0,0 +1,16 @@ +{ + // See http://go.microsoft.com/fwlink/?LinkId=827846 + // for the documentation about the extensions.json format + "recommendations": [ + "ms-vscode.cpptools", + "ms-python.python", + "ms-python.vscode-pylance", + "ban.spellright", + "streetsidesoftware.code-spell-checker", + "ms-iot.vscode-ros", + "ExecutableBookProject.myst-highlight", + "ms-python.black-formatter", + "ms-python.flake8", + "njpwerner.autodocstring" + ] +} diff --git a/.vscode/tasks.json b/.vscode/tasks.json new file mode 100644 index 00000000..598bb83f --- /dev/null +++ b/.vscode/tasks.json @@ -0,0 +1,26 @@ +{ + "version": "2.0.0", + "tasks": [ + { + "label": "setup_python_env", + "type": "shell", + "linux": { + "command": "${workspaceFolder}/../IsaacLab/isaaclab.sh -p ${workspaceFolder}/.vscode/tools/setup_vscode.py" + }, + "windows": { + "command": "${workspaceFolder}/../IsaacLab/isaaclab.bat -p ${workspaceFolder}/.vscode/tools/setup_vscode.py" + } + }, + { + // run formatter + "label": "run_formatter", + "type": "shell", + "linux": { + "command": "${workspaceFolder}/../IsaacLab/isaaclab.sh --format" + }, + "windows": { + "command": "${workspaceFolder}/../IsaacLab/isaaclab.bat --format" + } + } + ], +} diff --git a/.vscode/tools/settings.template.json b/.vscode/tools/settings.template.json new file mode 100644 index 00000000..13182194 --- /dev/null +++ b/.vscode/tools/settings.template.json @@ -0,0 +1,108 @@ +{ + "files.exclude": { + "**/.mypy_cache": true, + "**/__pycache__": true, + "**/*.egg-info": true + }, + "files.associations": { + "*.tpp": "cpp", + "*.kit": "toml", + "*.rst": "restructuredtext" + }, + "editor.rulers": [120], + + // files to be ignored by the linter + "files.watcherExclude": { + "**/.git/objects/**": true, + "**/.git/subtree-cache/**": true, + "**/node_modules/**": true, + "**/_isaac_sim/**": true, + "**/_compiler/**": true + }, + // Configuration for spelling checker + "spellright.language": [ + "en-US-10-1." + ], + "spellright.documentTypes": [ + "markdown", + "latex", + "plaintext", + "cpp", + "asciidoc", + "python", + "restructuredtext" + ], + "cSpell.words": [ + "literalinclude", + "linenos", + "instanceable", + "isaacSim", + "jacobians", + "pointcloud", + "ridgeback", + "rllib", + "robomimic", + "teleoperation", + "xform", + "numpy", + "flatcache", + "physx", + "dpad", + "gamepad", + "linspace", + "upsampled", + "downsampled", + "arange", + "discretization", + "trimesh", + "uninstanceable", + "***SUPPLIMENTARY_KEYWORDS***", + "configclass", + "isaaclab", + "***UWLAB_KEYWORDS***", + "teleop", + "octi", + "uwlab", + "realsense", + "rokoko", + "ikabsolute", + "ikabs", + "ikdelta", + "ikdel", + "***ROBOTS_KEYWORDS***", + "franka", + "tycho", + "hebi", + "xarm" + ], + // This enables python language server. Seems to work slightly better than jedi: + "python.languageServer": "Pylance", + // We use "black" as a formatter: + "python.formatting.provider": "black", + "python.formatting.blackArgs": ["--line-length", "120"], + // Use flake8 for linting + "python.linting.pylintEnabled": false, + "python.linting.flake8Enabled": true, + "python.linting.flake8Args": [ + "--max-line-length=120" + ], + // Use docstring generator + "autoDocstring.docstringFormat": "google", + "autoDocstring.guessTypes": true, + // Python environment path + // note: the default interpreter is overridden when user selects a workspace interpreter + // in the status bar. For example, the virtual environment python interpreter + "python.defaultInterpreterPath": "${workspaceFolder}/_isaac_sim/python.sh", + // ROS distribution + "ros.distro": "noetic", + // Language specific settings + "[python]": { + "editor.tabSize": 4 + }, + "[restructuredtext]": { + "editor.tabSize": 2 + }, + // Python extra paths + // Note: this is filled up when "./isaaclab.sh -i" is run + "python.analysis.extraPaths": [] +} diff --git a/scripts/tutorials/00_sim/create_empty.py b/scripts/tutorials/00_sim/create_empty.py new file mode 100644 index 00000000..e922acc1 --- /dev/null +++ b/scripts/tutorials/00_sim/create_empty.py @@ -0,0 +1,61 @@ +# Copyright (c) 2024-2025, The UW Lab Project Developers. (https://github.com/uw-lab/UWLab/blob/main/CONTRIBUTORS.md). +# All Rights Reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""This script demonstrates how to create a simple stage in Isaac Sim. + +.. code-block:: bash + + # Usage + ./isaaclab.sh -p scripts/tutorials/00_sim/create_empty.py + +""" + +"""Launch Isaac Sim Simulator first.""" + + +import argparse + +from isaaclab.app import AppLauncher + +# create argparser +parser = argparse.ArgumentParser(description="Tutorial on creating an empty stage.") +# append AppLauncher cli args +AppLauncher.add_app_launcher_args(parser) +# parse the arguments +args_cli = parser.parse_args() +# launch omniverse app +app_launcher = AppLauncher(args_cli) +simulation_app = app_launcher.app + +"""Rest everything follows.""" + +from isaaclab.sim import SimulationCfg, SimulationContext + + +def main(): + """Main function.""" + + # Initialize the simulation context + sim_cfg = SimulationCfg(dt=0.01) + sim = SimulationContext(sim_cfg) + # Set main camera + sim.set_camera_view([2.5, 2.5, 2.5], [0.0, 0.0, 0.0]) + + # Play the simulator + sim.reset() + # Now we are ready! + print("[INFO]: Setup complete...") + + # Simulate physics + while simulation_app.is_running(): + # perform step + sim.step() + + +if __name__ == "__main__": + # run the main function + main() + # close sim app + simulation_app.close() diff --git a/scripts/tutorials/00_sim/launch_app.py b/scripts/tutorials/00_sim/launch_app.py new file mode 100644 index 00000000..ddea3c7c --- /dev/null +++ b/scripts/tutorials/00_sim/launch_app.py @@ -0,0 +1,96 @@ +# Copyright (c) 2024-2025, The UW Lab Project Developers. (https://github.com/uw-lab/UWLab/blob/main/CONTRIBUTORS.md). +# All Rights Reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +""" +This script demonstrates how to run IsaacSim via the AppLauncher + +.. code-block:: bash + + # Usage + ./isaaclab.sh -p scripts/tutorials/00_sim/launch_app.py + +""" + +"""Launch Isaac Sim Simulator first.""" + + +import argparse + +from isaaclab.app import AppLauncher + +# create argparser +parser = argparse.ArgumentParser(description="Tutorial on running IsaacSim via the AppLauncher.") +parser.add_argument("--size", type=float, default=1.0, help="Side-length of cuboid") +# SimulationApp arguments https://docs.omniverse.nvidia.com/py/isaacsim/source/isaacsim.simulation_app/docs/index.html?highlight=simulationapp#isaacsim.simulation_app.SimulationApp +parser.add_argument( + "--width", type=int, default=1280, help="Width of the viewport and generated images. Defaults to 1280" +) +parser.add_argument( + "--height", type=int, default=720, help="Height of the viewport and generated images. Defaults to 720" +) + +# append AppLauncher cli args +AppLauncher.add_app_launcher_args(parser) +# parse the arguments +args_cli = parser.parse_args() +# launch omniverse app +app_launcher = AppLauncher(args_cli) +simulation_app = app_launcher.app + +"""Rest everything follows.""" + +import isaaclab.sim as sim_utils + + +def design_scene(): + """Designs the scene by spawning ground plane, light, objects and meshes from usd files.""" + # Ground-plane + cfg_ground = sim_utils.GroundPlaneCfg() + cfg_ground.func("/World/defaultGroundPlane", cfg_ground) + + # spawn distant light + cfg_light_distant = sim_utils.DistantLightCfg( + intensity=3000.0, + color=(0.75, 0.75, 0.75), + ) + cfg_light_distant.func("/World/lightDistant", cfg_light_distant, translation=(1, 0, 10)) + + # spawn a cuboid + cfg_cuboid = sim_utils.CuboidCfg( + size=[args_cli.size] * 3, + visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(1.0, 1.0, 1.0)), + ) + # Spawn cuboid, altering translation on the z-axis to scale to its size + cfg_cuboid.func("/World/Object", cfg_cuboid, translation=(0.0, 0.0, args_cli.size / 2)) + + +def main(): + """Main function.""" + + # Initialize the simulation context + sim_cfg = sim_utils.SimulationCfg(dt=0.01, device=args_cli.device) + sim = sim_utils.SimulationContext(sim_cfg) + # Set main camera + sim.set_camera_view([2.0, 0.0, 2.5], [-0.5, 0.0, 0.5]) + + # Design scene by adding assets to it + design_scene() + + # Play the simulator + sim.reset() + # Now we are ready! + print("[INFO]: Setup complete...") + + # Simulate physics + while simulation_app.is_running(): + # perform step + sim.step() + + +if __name__ == "__main__": + # run the main function + main() + # close sim app + simulation_app.close() diff --git a/scripts/tutorials/00_sim/log_time.py b/scripts/tutorials/00_sim/log_time.py new file mode 100644 index 00000000..4169c9fe --- /dev/null +++ b/scripts/tutorials/00_sim/log_time.py @@ -0,0 +1,84 @@ +# Copyright (c) 2024-2025, The UW Lab Project Developers. (https://github.com/uw-lab/UWLab/blob/main/CONTRIBUTORS.md). +# All Rights Reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +""" +This script demonstrates how to generate log outputs while the simulation plays. +It accompanies the tutorial on docker usage. + +.. code-block:: bash + + # Usage + ./isaaclab.sh -p scripts/tutorials/00_sim/log_time.py + +""" + +"""Launch Isaac Sim Simulator first.""" + + +import argparse +import os + +from isaaclab.app import AppLauncher + +# create argparser +parser = argparse.ArgumentParser(description="Tutorial on creating logs from within the docker container.") +# append AppLauncher cli args +AppLauncher.add_app_launcher_args(parser) +# parse the arguments +args_cli = parser.parse_args() +# launch omniverse app +app_launcher = AppLauncher(args_cli) +simulation_app = app_launcher.app + +"""Rest everything follows.""" + +from isaaclab.sim import SimulationCfg, SimulationContext + + +def main(): + """Main function.""" + # Specify that the logs must be in logs/docker_tutorial + log_dir_path = os.path.join("logs") + if not os.path.isdir(log_dir_path): + os.mkdir(log_dir_path) + # In the container, the absolute path will be + # /workspace/isaaclab/logs/docker_tutorial, because + # all python execution is done through /workspace/isaaclab/isaaclab.sh + # and the calling process' path will be /workspace/isaaclab + log_dir_path = os.path.abspath(os.path.join(log_dir_path, "docker_tutorial")) + if not os.path.isdir(log_dir_path): + os.mkdir(log_dir_path) + print(f"[INFO] Logging experiment to directory: {log_dir_path}") + + # Initialize the simulation context + sim_cfg = SimulationCfg(dt=0.01) + sim = SimulationContext(sim_cfg) + # Set main camera + sim.set_camera_view([2.5, 2.5, 2.5], [0.0, 0.0, 0.0]) + + # Play the simulator + sim.reset() + # Now we are ready! + print("[INFO]: Setup complete...") + + # Prepare to count sim_time + sim_dt = sim.get_physics_dt() + sim_time = 0.0 + + # Open logging file + with open(os.path.join(log_dir_path, "log.txt"), "w") as log_file: + # Simulate physics + while simulation_app.is_running(): + log_file.write(f"{sim_time}" + "\n") + # perform step + sim.step() + sim_time += sim_dt + + +if __name__ == "__main__": + # run the main function + main() + # close sim app + simulation_app.close() diff --git a/scripts/tutorials/00_sim/set_rendering_mode.py b/scripts/tutorials/00_sim/set_rendering_mode.py new file mode 100644 index 00000000..49125d91 --- /dev/null +++ b/scripts/tutorials/00_sim/set_rendering_mode.py @@ -0,0 +1,83 @@ +# Copyright (c) 2024-2025, The UW Lab Project Developers. (https://github.com/uw-lab/UWLab/blob/main/CONTRIBUTORS.md). +# All Rights Reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""This script demonstrates how to spawn prims into the scene. + +.. code-block:: bash + + # Usage + ./isaaclab.sh -p scripts/tutorials/00_sim/set_rendering_mode.py + +""" + +"""Launch Isaac Sim Simulator first.""" + + +import argparse + +from isaaclab.app import AppLauncher + +# create argparser +parser = argparse.ArgumentParser( + description="Tutorial on viewing a warehouse scene with a given rendering mode preset." +) +# append AppLauncher cli args +AppLauncher.add_app_launcher_args(parser) +# parse the arguments +args_cli = parser.parse_args() +# launch omniverse app +app_launcher = AppLauncher(args_cli) +simulation_app = app_launcher.app + +"""Rest everything follows.""" + +import isaaclab.sim as sim_utils +from isaaclab.utils.assets import ISAAC_NUCLEUS_DIR + + +def main(): + """Main function.""" + + # rendering modes include performance, balanced, and quality + # note, the rendering_mode specified in the CLI argument (--rendering_mode) takes precedence over this Render Config setting + rendering_mode = "performance" + + # carb setting dictionary can include any rtx carb setting which will overwrite the native preset setting + carb_settings = {"rtx.reflections.enabled": True} + + # Initialize render config + render_cfg = sim_utils.RenderCfg( + rendering_mode=rendering_mode, + carb_settings=carb_settings, + ) + + # Initialize the simulation context with render coofig + sim_cfg = sim_utils.SimulationCfg(render=render_cfg) + sim = sim_utils.SimulationContext(sim_cfg) + + # Pose camera in the hospital lobby area + sim.set_camera_view([-11, -0.5, 2], [0, 0, 0.5]) + + # Load hospital scene + hospital_usd_path = f"{ISAAC_NUCLEUS_DIR}/Environments/Hospital/hospital.usd" + cfg = sim_utils.UsdFileCfg(usd_path=hospital_usd_path) + cfg.func("/Scene", cfg) + + # Play the simulator + sim.reset() + + # Now we are ready! + print("[INFO]: Setup complete...") + + # Run simulation and view scene + while simulation_app.is_running(): + sim.step() + + +if __name__ == "__main__": + # run the main function + main() + # close sim app + simulation_app.close() diff --git a/scripts/tutorials/00_sim/spawn_prims.py b/scripts/tutorials/00_sim/spawn_prims.py new file mode 100644 index 00000000..0e1f02c3 --- /dev/null +++ b/scripts/tutorials/00_sim/spawn_prims.py @@ -0,0 +1,116 @@ +# Copyright (c) 2024-2025, The UW Lab Project Developers. (https://github.com/uw-lab/UWLab/blob/main/CONTRIBUTORS.md). +# All Rights Reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""This script demonstrates how to spawn prims into the scene. + +.. code-block:: bash + + # Usage + ./isaaclab.sh -p scripts/tutorials/00_sim/spawn_prims.py + +""" + +"""Launch Isaac Sim Simulator first.""" + + +import argparse + +from isaaclab.app import AppLauncher + +# create argparser +parser = argparse.ArgumentParser(description="Tutorial on spawning prims into the scene.") +# append AppLauncher cli args +AppLauncher.add_app_launcher_args(parser) +# parse the arguments +args_cli = parser.parse_args() +# launch omniverse app +app_launcher = AppLauncher(args_cli) +simulation_app = app_launcher.app + +"""Rest everything follows.""" + +import isaacsim.core.utils.prims as prim_utils + +import isaaclab.sim as sim_utils +from isaaclab.utils.assets import ISAAC_NUCLEUS_DIR + + +def design_scene(): + """Designs the scene by spawning ground plane, light, objects and meshes from usd files.""" + # Ground-plane + cfg_ground = sim_utils.GroundPlaneCfg() + cfg_ground.func("/World/defaultGroundPlane", cfg_ground) + + # spawn distant light + cfg_light_distant = sim_utils.DistantLightCfg( + intensity=3000.0, + color=(0.75, 0.75, 0.75), + ) + cfg_light_distant.func("/World/lightDistant", cfg_light_distant, translation=(1, 0, 10)) + + # create a new xform prim for all objects to be spawned under + prim_utils.create_prim("/World/Objects", "Xform") + # spawn a red cone + cfg_cone = sim_utils.ConeCfg( + radius=0.15, + height=0.5, + visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(1.0, 0.0, 0.0)), + ) + cfg_cone.func("/World/Objects/Cone1", cfg_cone, translation=(-1.0, 1.0, 1.0)) + cfg_cone.func("/World/Objects/Cone2", cfg_cone, translation=(-1.0, -1.0, 1.0)) + + # spawn a green cone with colliders and rigid body + cfg_cone_rigid = sim_utils.ConeCfg( + radius=0.15, + height=0.5, + rigid_props=sim_utils.RigidBodyPropertiesCfg(), + mass_props=sim_utils.MassPropertiesCfg(mass=1.0), + collision_props=sim_utils.CollisionPropertiesCfg(), + visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(0.0, 1.0, 0.0)), + ) + cfg_cone_rigid.func( + "/World/Objects/ConeRigid", cfg_cone_rigid, translation=(-0.2, 0.0, 2.0), orientation=(0.5, 0.0, 0.5, 0.0) + ) + + # spawn a blue cuboid with deformable body + cfg_cuboid_deformable = sim_utils.MeshCuboidCfg( + size=(0.2, 0.5, 0.2), + deformable_props=sim_utils.DeformableBodyPropertiesCfg(), + visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(0.0, 0.0, 1.0)), + physics_material=sim_utils.DeformableBodyMaterialCfg(), + ) + cfg_cuboid_deformable.func("/World/Objects/CuboidDeformable", cfg_cuboid_deformable, translation=(0.15, 0.0, 2.0)) + + # spawn a usd file of a table into the scene + cfg = sim_utils.UsdFileCfg(usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/Mounts/SeattleLabTable/table_instanceable.usd") + cfg.func("/World/Objects/Table", cfg, translation=(0.0, 0.0, 1.05)) + + +def main(): + """Main function.""" + + # Initialize the simulation context + sim_cfg = sim_utils.SimulationCfg(dt=0.01, device=args_cli.device) + sim = sim_utils.SimulationContext(sim_cfg) + # Set main camera + sim.set_camera_view([2.0, 0.0, 2.5], [-0.5, 0.0, 0.5]) + # Design scene + design_scene() + # Play the simulator + sim.reset() + # Now we are ready! + print("[INFO]: Setup complete...") + + # Simulate physics + while simulation_app.is_running(): + # perform step + sim.step() + + +if __name__ == "__main__": + # run the main function + main() + # close sim app + simulation_app.close() diff --git a/scripts/tutorials/01_assets/add_new_robot.py b/scripts/tutorials/01_assets/add_new_robot.py new file mode 100644 index 00000000..43d31154 --- /dev/null +++ b/scripts/tutorials/01_assets/add_new_robot.py @@ -0,0 +1,179 @@ +# Copyright (c) 2024-2025, The UW Lab Project Developers. (https://github.com/uw-lab/UWLab/blob/main/CONTRIBUTORS.md). +# All Rights Reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +import argparse + +from isaaclab.app import AppLauncher + +# add argparse arguments +parser = argparse.ArgumentParser( + description="This script demonstrates adding a custom robot to an Isaac Lab environment." +) +parser.add_argument("--num_envs", type=int, default=1, help="Number of environments to spawn.") +# append AppLauncher cli args +AppLauncher.add_app_launcher_args(parser) +# parse the arguments +args_cli = parser.parse_args() + +# launch omniverse app +app_launcher = AppLauncher(args_cli) +simulation_app = app_launcher.app + +import numpy as np +import torch + +import isaaclab.sim as sim_utils +from isaaclab.actuators import ImplicitActuatorCfg +from isaaclab.assets import AssetBaseCfg +from isaaclab.assets.articulation import ArticulationCfg +from isaaclab.scene import InteractiveScene, InteractiveSceneCfg +from isaaclab.utils.assets import ISAAC_NUCLEUS_DIR + +JETBOT_CONFIG = ArticulationCfg( + spawn=sim_utils.UsdFileCfg(usd_path=f"{ISAAC_NUCLEUS_DIR}/Robots/NVIDIA/Jetbot/jetbot.usd"), + actuators={"wheel_acts": ImplicitActuatorCfg(joint_names_expr=[".*"], damping=None, stiffness=None)}, +) + +DOFBOT_CONFIG = ArticulationCfg( + spawn=sim_utils.UsdFileCfg( + usd_path=f"{ISAAC_NUCLEUS_DIR}/Robots/Yahboom/Dofbot/dofbot.usd", + rigid_props=sim_utils.RigidBodyPropertiesCfg( + disable_gravity=False, + max_depenetration_velocity=5.0, + ), + articulation_props=sim_utils.ArticulationRootPropertiesCfg( + enabled_self_collisions=True, solver_position_iteration_count=8, solver_velocity_iteration_count=0 + ), + ), + init_state=ArticulationCfg.InitialStateCfg( + joint_pos={ + "joint1": 0.0, + "joint2": 0.0, + "joint3": 0.0, + "joint4": 0.0, + }, + pos=(0.25, -0.25, 0.0), + ), + actuators={ + "front_joints": ImplicitActuatorCfg( + joint_names_expr=["joint[1-2]"], + effort_limit_sim=100.0, + velocity_limit_sim=100.0, + stiffness=10000.0, + damping=100.0, + ), + "joint3_act": ImplicitActuatorCfg( + joint_names_expr=["joint3"], + effort_limit_sim=100.0, + velocity_limit_sim=100.0, + stiffness=10000.0, + damping=100.0, + ), + "joint4_act": ImplicitActuatorCfg( + joint_names_expr=["joint4"], + effort_limit_sim=100.0, + velocity_limit_sim=100.0, + stiffness=10000.0, + damping=100.0, + ), + }, +) + + +class NewRobotsSceneCfg(InteractiveSceneCfg): + """Designs the scene.""" + + # Ground-plane + ground = AssetBaseCfg(prim_path="/World/defaultGroundPlane", spawn=sim_utils.GroundPlaneCfg()) + + # lights + dome_light = AssetBaseCfg( + prim_path="/World/Light", spawn=sim_utils.DomeLightCfg(intensity=3000.0, color=(0.75, 0.75, 0.75)) + ) + + # robot + Jetbot = JETBOT_CONFIG.replace(prim_path="{ENV_REGEX_NS}/Jetbot") + Dofbot = DOFBOT_CONFIG.replace(prim_path="{ENV_REGEX_NS}/Dofbot") + + +def run_simulator(sim: sim_utils.SimulationContext, scene: InteractiveScene): + sim_dt = sim.get_physics_dt() + sim_time = 0.0 + count = 0 + + while simulation_app.is_running(): + # reset + if count % 500 == 0: + # reset counters + count = 0 + # reset the scene entities to their initial positions offset by the environment origins + root_jetbot_state = scene["Jetbot"].data.default_root_state.clone() + root_jetbot_state[:, :3] += scene.env_origins + root_dofbot_state = scene["Dofbot"].data.default_root_state.clone() + root_dofbot_state[:, :3] += scene.env_origins + + # copy the default root state to the sim for the jetbot's orientation and velocity + scene["Jetbot"].write_root_pose_to_sim(root_jetbot_state[:, :7]) + scene["Jetbot"].write_root_velocity_to_sim(root_jetbot_state[:, 7:]) + scene["Dofbot"].write_root_pose_to_sim(root_dofbot_state[:, :7]) + scene["Dofbot"].write_root_velocity_to_sim(root_dofbot_state[:, 7:]) + + # copy the default joint states to the sim + joint_pos, joint_vel = ( + scene["Jetbot"].data.default_joint_pos.clone(), + scene["Jetbot"].data.default_joint_vel.clone(), + ) + scene["Jetbot"].write_joint_state_to_sim(joint_pos, joint_vel) + joint_pos, joint_vel = ( + scene["Dofbot"].data.default_joint_pos.clone(), + scene["Dofbot"].data.default_joint_vel.clone(), + ) + scene["Dofbot"].write_joint_state_to_sim(joint_pos, joint_vel) + # clear internal buffers + scene.reset() + print("[INFO]: Resetting Jetbot and Dofbot state...") + + # drive around + if count % 100 < 75: + # Drive straight by setting equal wheel velocities + action = torch.Tensor([[10.0, 10.0]]) + else: + # Turn by applying different velocities + action = torch.Tensor([[5.0, -5.0]]) + + scene["Jetbot"].set_joint_velocity_target(action) + + # wave + wave_action = scene["Dofbot"].data.default_joint_pos + wave_action[:, 0:4] = 0.25 * np.sin(2 * np.pi * 0.5 * sim_time) + scene["Dofbot"].set_joint_position_target(wave_action) + + scene.write_data_to_sim() + sim.step() + sim_time += sim_dt + count += 1 + scene.update(sim_dt) + + +def main(): + """Main function.""" + # Initialize the simulation context + sim_cfg = sim_utils.SimulationCfg(device=args_cli.device) + sim = sim_utils.SimulationContext(sim_cfg) + sim.set_camera_view([3.5, 0.0, 3.2], [0.0, 0.0, 0.5]) + # Design scene + scene_cfg = NewRobotsSceneCfg(args_cli.num_envs, env_spacing=2.0) + scene = InteractiveScene(scene_cfg) + # Play the simulator + sim.reset() + # Now we are ready! + print("[INFO]: Setup complete...") + # Run the simulator + run_simulator(sim, scene) + + +if __name__ == "__main__": + main() + simulation_app.close() diff --git a/scripts/tutorials/01_assets/run_articulation.py b/scripts/tutorials/01_assets/run_articulation.py new file mode 100644 index 00000000..869ef033 --- /dev/null +++ b/scripts/tutorials/01_assets/run_articulation.py @@ -0,0 +1,143 @@ +# Copyright (c) 2024-2025, The UW Lab Project Developers. (https://github.com/uw-lab/UWLab/blob/main/CONTRIBUTORS.md). +# All Rights Reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""This script demonstrates how to spawn a cart-pole and interact with it. + +.. code-block:: bash + + # Usage + ./isaaclab.sh -p scripts/tutorials/01_assets/run_articulation.py + +""" + +"""Launch Isaac Sim Simulator first.""" + + +import argparse + +from isaaclab.app import AppLauncher + +# add argparse arguments +parser = argparse.ArgumentParser(description="Tutorial on spawning and interacting with an articulation.") +# append AppLauncher cli args +AppLauncher.add_app_launcher_args(parser) +# parse the arguments +args_cli = parser.parse_args() + +# launch omniverse app +app_launcher = AppLauncher(args_cli) +simulation_app = app_launcher.app + +"""Rest everything follows.""" + +import torch + +import isaacsim.core.utils.prims as prim_utils + +import isaaclab.sim as sim_utils +from isaaclab.assets import Articulation +from isaaclab.sim import SimulationContext + +## +# Pre-defined configs +## +from isaaclab_assets import CARTPOLE_CFG # isort:skip + + +def design_scene() -> tuple[dict, list[list[float]]]: + """Designs the scene.""" + # Ground-plane + cfg = sim_utils.GroundPlaneCfg() + cfg.func("/World/defaultGroundPlane", cfg) + # Lights + cfg = sim_utils.DomeLightCfg(intensity=3000.0, color=(0.75, 0.75, 0.75)) + cfg.func("/World/Light", cfg) + + # Create separate groups called "Origin1", "Origin2" + # Each group will have a robot in it + origins = [[0.0, 0.0, 0.0], [-1.0, 0.0, 0.0]] + # Origin 1 + prim_utils.create_prim("/World/Origin1", "Xform", translation=origins[0]) + # Origin 2 + prim_utils.create_prim("/World/Origin2", "Xform", translation=origins[1]) + + # Articulation + cartpole_cfg = CARTPOLE_CFG.copy() + cartpole_cfg.prim_path = "/World/Origin.*/Robot" + cartpole = Articulation(cfg=cartpole_cfg) + + # return the scene information + scene_entities = {"cartpole": cartpole} + return scene_entities, origins + + +def run_simulator(sim: sim_utils.SimulationContext, entities: dict[str, Articulation], origins: torch.Tensor): + """Runs the simulation loop.""" + # Extract scene entities + # note: we only do this here for readability. In general, it is better to access the entities directly from + # the dictionary. This dictionary is replaced by the InteractiveScene class in the next tutorial. + robot = entities["cartpole"] + # Define simulation stepping + sim_dt = sim.get_physics_dt() + count = 0 + # Simulation loop + while simulation_app.is_running(): + # Reset + if count % 500 == 0: + # reset counter + count = 0 + # reset the scene entities + # root state + # we offset the root state by the origin since the states are written in simulation world frame + # if this is not done, then the robots will be spawned at the (0, 0, 0) of the simulation world + root_state = robot.data.default_root_state.clone() + root_state[:, :3] += origins + robot.write_root_pose_to_sim(root_state[:, :7]) + robot.write_root_velocity_to_sim(root_state[:, 7:]) + # set joint positions with some noise + joint_pos, joint_vel = robot.data.default_joint_pos.clone(), robot.data.default_joint_vel.clone() + joint_pos += torch.rand_like(joint_pos) * 0.1 + robot.write_joint_state_to_sim(joint_pos, joint_vel) + # clear internal buffers + robot.reset() + print("[INFO]: Resetting robot state...") + # Apply random action + # -- generate random joint efforts + efforts = torch.randn_like(robot.data.joint_pos) * 5.0 + # -- apply action to the robot + robot.set_joint_effort_target(efforts) + # -- write data to sim + robot.write_data_to_sim() + # Perform step + sim.step() + # Increment counter + count += 1 + # Update buffers + robot.update(sim_dt) + + +def main(): + """Main function.""" + # Load kit helper + sim_cfg = sim_utils.SimulationCfg(device=args_cli.device) + sim = SimulationContext(sim_cfg) + # Set main camera + sim.set_camera_view([2.5, 0.0, 4.0], [0.0, 0.0, 2.0]) + # Design scene + scene_entities, scene_origins = design_scene() + scene_origins = torch.tensor(scene_origins, device=sim.device) + # Play the simulator + sim.reset() + # Now we are ready! + print("[INFO]: Setup complete...") + # Run the simulator + run_simulator(sim, scene_entities, scene_origins) + + +if __name__ == "__main__": + # run the main function + main() + # close sim app + simulation_app.close() diff --git a/scripts/tutorials/01_assets/run_deformable_object.py b/scripts/tutorials/01_assets/run_deformable_object.py new file mode 100644 index 00000000..ca7b841d --- /dev/null +++ b/scripts/tutorials/01_assets/run_deformable_object.py @@ -0,0 +1,168 @@ +# Copyright (c) 2024-2025, The UW Lab Project Developers. (https://github.com/uw-lab/UWLab/blob/main/CONTRIBUTORS.md). +# All Rights Reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +""" +This script demonstrates how to work with the deformable object and interact with it. + +.. code-block:: bash + + # Usage + ./isaaclab.sh -p scripts/tutorials/01_assets/run_deformable_object.py + +""" + +"""Launch Isaac Sim Simulator first.""" + + +import argparse + +from isaaclab.app import AppLauncher + +# add argparse arguments +parser = argparse.ArgumentParser(description="Tutorial on interacting with a deformable object.") +# append AppLauncher cli args +AppLauncher.add_app_launcher_args(parser) +# parse the arguments +args_cli = parser.parse_args() + +# launch omniverse app +app_launcher = AppLauncher(args_cli) +simulation_app = app_launcher.app + +"""Rest everything follows.""" + +import torch + +import isaacsim.core.utils.prims as prim_utils + +import isaaclab.sim as sim_utils +import isaaclab.utils.math as math_utils +from isaaclab.assets import DeformableObject, DeformableObjectCfg +from isaaclab.sim import SimulationContext + + +def design_scene(): + """Designs the scene.""" + # Ground-plane + cfg = sim_utils.GroundPlaneCfg() + cfg.func("/World/defaultGroundPlane", cfg) + # Lights + cfg = sim_utils.DomeLightCfg(intensity=2000.0, color=(0.8, 0.8, 0.8)) + cfg.func("/World/Light", cfg) + + # Create separate groups called "Origin1", "Origin2", "Origin3" + # Each group will have a robot in it + origins = [[0.25, 0.25, 0.0], [-0.25, 0.25, 0.0], [0.25, -0.25, 0.0], [-0.25, -0.25, 0.0]] + for i, origin in enumerate(origins): + prim_utils.create_prim(f"/World/Origin{i}", "Xform", translation=origin) + + # Deformable Object + cfg = DeformableObjectCfg( + prim_path="/World/Origin.*/Cube", + spawn=sim_utils.MeshCuboidCfg( + size=(0.2, 0.2, 0.2), + deformable_props=sim_utils.DeformableBodyPropertiesCfg(rest_offset=0.0, contact_offset=0.001), + visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(0.5, 0.1, 0.0)), + physics_material=sim_utils.DeformableBodyMaterialCfg(poissons_ratio=0.4, youngs_modulus=1e5), + ), + init_state=DeformableObjectCfg.InitialStateCfg(pos=(0.0, 0.0, 1.0)), + debug_vis=True, + ) + cube_object = DeformableObject(cfg=cfg) + + # return the scene information + scene_entities = {"cube_object": cube_object} + return scene_entities, origins + + +def run_simulator(sim: sim_utils.SimulationContext, entities: dict[str, DeformableObject], origins: torch.Tensor): + """Runs the simulation loop.""" + # Extract scene entities + # note: we only do this here for readability. In general, it is better to access the entities directly from + # the dictionary. This dictionary is replaced by the InteractiveScene class in the next tutorial. + cube_object = entities["cube_object"] + # Define simulation stepping + sim_dt = sim.get_physics_dt() + sim_time = 0.0 + count = 0 + + # Nodal kinematic targets of the deformable bodies + nodal_kinematic_target = cube_object.data.nodal_kinematic_target.clone() + + # Simulate physics + while simulation_app.is_running(): + # reset + if count % 250 == 0: + # reset counters + sim_time = 0.0 + count = 0 + + # reset the nodal state of the object + nodal_state = cube_object.data.default_nodal_state_w.clone() + # apply random pose to the object + pos_w = torch.rand(cube_object.num_instances, 3, device=sim.device) * 0.1 + origins + quat_w = math_utils.random_orientation(cube_object.num_instances, device=sim.device) + nodal_state[..., :3] = cube_object.transform_nodal_pos(nodal_state[..., :3], pos_w, quat_w) + + # write nodal state to simulation + cube_object.write_nodal_state_to_sim(nodal_state) + + # Write the nodal state to the kinematic target and free all vertices + nodal_kinematic_target[..., :3] = nodal_state[..., :3] + nodal_kinematic_target[..., 3] = 1.0 + cube_object.write_nodal_kinematic_target_to_sim(nodal_kinematic_target) + + # reset buffers + cube_object.reset() + + print("----------------------------------------") + print("[INFO]: Resetting object state...") + + # update the kinematic target for cubes at index 0 and 3 + # we slightly move the cube in the z-direction by picking the vertex at index 0 + nodal_kinematic_target[[0, 3], 0, 2] += 0.001 + # set vertex at index 0 to be kinematically constrained + # 0: constrained, 1: free + nodal_kinematic_target[[0, 3], 0, 3] = 0.0 + # write kinematic target to simulation + cube_object.write_nodal_kinematic_target_to_sim(nodal_kinematic_target) + + # write internal data to simulation + cube_object.write_data_to_sim() + # perform step + sim.step() + # update sim-time + sim_time += sim_dt + count += 1 + # update buffers + cube_object.update(sim_dt) + # print the root position + if count % 50 == 0: + print(f"Root position (in world): {cube_object.data.root_pos_w[:, :3]}") + + +def main(): + """Main function.""" + # Load kit helper + sim_cfg = sim_utils.SimulationCfg(device=args_cli.device) + sim = SimulationContext(sim_cfg) + # Set main camera + sim.set_camera_view(eye=[3.0, 0.0, 1.0], target=[0.0, 0.0, 0.5]) + # Design scene + scene_entities, scene_origins = design_scene() + scene_origins = torch.tensor(scene_origins, device=sim.device) + # Play the simulator + sim.reset() + # Now we are ready! + print("[INFO]: Setup complete...") + # Run the simulator + run_simulator(sim, scene_entities, scene_origins) + + +if __name__ == "__main__": + # run the main function + main() + # close sim app + simulation_app.close() diff --git a/scripts/tutorials/01_assets/run_rigid_object.py b/scripts/tutorials/01_assets/run_rigid_object.py new file mode 100644 index 00000000..0ab19de7 --- /dev/null +++ b/scripts/tutorials/01_assets/run_rigid_object.py @@ -0,0 +1,148 @@ +# Copyright (c) 2024-2025, The UW Lab Project Developers. (https://github.com/uw-lab/UWLab/blob/main/CONTRIBUTORS.md). +# All Rights Reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +""" +This script demonstrates how to create a rigid object and interact with it. + +.. code-block:: bash + + # Usage + ./isaaclab.sh -p scripts/tutorials/01_assets/run_rigid_object.py + +""" + +"""Launch Isaac Sim Simulator first.""" + + +import argparse + +from isaaclab.app import AppLauncher + +# add argparse arguments +parser = argparse.ArgumentParser(description="Tutorial on spawning and interacting with a rigid object.") +# append AppLauncher cli args +AppLauncher.add_app_launcher_args(parser) +# parse the arguments +args_cli = parser.parse_args() + +# launch omniverse app +app_launcher = AppLauncher(args_cli) +simulation_app = app_launcher.app + +"""Rest everything follows.""" + +import torch + +import isaacsim.core.utils.prims as prim_utils + +import isaaclab.sim as sim_utils +import isaaclab.utils.math as math_utils +from isaaclab.assets import RigidObject, RigidObjectCfg +from isaaclab.sim import SimulationContext + + +def design_scene(): + """Designs the scene.""" + # Ground-plane + cfg = sim_utils.GroundPlaneCfg() + cfg.func("/World/defaultGroundPlane", cfg) + # Lights + cfg = sim_utils.DomeLightCfg(intensity=2000.0, color=(0.8, 0.8, 0.8)) + cfg.func("/World/Light", cfg) + + # Create separate groups called "Origin1", "Origin2", "Origin3" + # Each group will have a robot in it + origins = [[0.25, 0.25, 0.0], [-0.25, 0.25, 0.0], [0.25, -0.25, 0.0], [-0.25, -0.25, 0.0]] + for i, origin in enumerate(origins): + prim_utils.create_prim(f"/World/Origin{i}", "Xform", translation=origin) + + # Rigid Object + cone_cfg = RigidObjectCfg( + prim_path="/World/Origin.*/Cone", + spawn=sim_utils.ConeCfg( + radius=0.1, + height=0.2, + rigid_props=sim_utils.RigidBodyPropertiesCfg(), + mass_props=sim_utils.MassPropertiesCfg(mass=1.0), + collision_props=sim_utils.CollisionPropertiesCfg(), + visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(0.0, 1.0, 0.0), metallic=0.2), + ), + init_state=RigidObjectCfg.InitialStateCfg(), + ) + cone_object = RigidObject(cfg=cone_cfg) + + # return the scene information + scene_entities = {"cone": cone_object} + return scene_entities, origins + + +def run_simulator(sim: sim_utils.SimulationContext, entities: dict[str, RigidObject], origins: torch.Tensor): + """Runs the simulation loop.""" + # Extract scene entities + # note: we only do this here for readability. In general, it is better to access the entities directly from + # the dictionary. This dictionary is replaced by the InteractiveScene class in the next tutorial. + cone_object = entities["cone"] + # Define simulation stepping + sim_dt = sim.get_physics_dt() + sim_time = 0.0 + count = 0 + # Simulate physics + while simulation_app.is_running(): + # reset + if count % 250 == 0: + # reset counters + sim_time = 0.0 + count = 0 + # reset root state + root_state = cone_object.data.default_root_state.clone() + # sample a random position on a cylinder around the origins + root_state[:, :3] += origins + root_state[:, :3] += math_utils.sample_cylinder( + radius=0.1, h_range=(0.25, 0.5), size=cone_object.num_instances, device=cone_object.device + ) + # write root state to simulation + cone_object.write_root_pose_to_sim(root_state[:, :7]) + cone_object.write_root_velocity_to_sim(root_state[:, 7:]) + # reset buffers + cone_object.reset() + print("----------------------------------------") + print("[INFO]: Resetting object state...") + # apply sim data + cone_object.write_data_to_sim() + # perform step + sim.step() + # update sim-time + sim_time += sim_dt + count += 1 + # update buffers + cone_object.update(sim_dt) + # print the root position + if count % 50 == 0: + print(f"Root position (in world): {cone_object.data.root_pos_w}") + + +def main(): + """Main function.""" + # Load kit helper + sim_cfg = sim_utils.SimulationCfg(device=args_cli.device) + sim = SimulationContext(sim_cfg) + # Set main camera + sim.set_camera_view(eye=[1.5, 0.0, 1.0], target=[0.0, 0.0, 0.0]) + # Design scene + scene_entities, scene_origins = design_scene() + scene_origins = torch.tensor(scene_origins, device=sim.device) + # Play the simulator + sim.reset() + # Now we are ready! + print("[INFO]: Setup complete...") + # Run the simulator + run_simulator(sim, scene_entities, scene_origins) + + +if __name__ == "__main__": + # run the main function + main() + # close sim app + simulation_app.close() diff --git a/scripts/tutorials/01_assets/run_surface_gripper.py b/scripts/tutorials/01_assets/run_surface_gripper.py new file mode 100644 index 00000000..50a01431 --- /dev/null +++ b/scripts/tutorials/01_assets/run_surface_gripper.py @@ -0,0 +1,183 @@ +# Copyright (c) 2024-2025, The UW Lab Project Developers. (https://github.com/uw-lab/UWLab/blob/main/CONTRIBUTORS.md). +# All Rights Reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""This script demonstrates how to spawn a pick-and-place robot equipped with a surface gripper and interact with it. + +.. code-block:: bash + + # Usage + ./isaaclab.sh -p scripts/tutorials/01_assets/run_surface_gripper.py --device=cpu + +When running this script make sure the --device flag is set to cpu. This is because the surface gripper is +currently only supported on the CPU. +""" + +"""Launch Isaac Sim Simulator first.""" + +import argparse + +from isaaclab.app import AppLauncher + +# add argparse arguments +parser = argparse.ArgumentParser(description="Tutorial on spawning and interacting with a Surface Gripper.") +# append AppLauncher cli args +AppLauncher.add_app_launcher_args(parser) +# parse the arguments +args_cli = parser.parse_args() + +# launch omniverse app +app_launcher = AppLauncher(args_cli) +simulation_app = app_launcher.app + +"""Rest everything follows.""" + +import torch + +import isaacsim.core.utils.prims as prim_utils + +import isaaclab.sim as sim_utils +from isaaclab.assets import Articulation, SurfaceGripper, SurfaceGripperCfg +from isaaclab.sim import SimulationContext + +## +# Pre-defined configs +## +from isaaclab_assets import PICK_AND_PLACE_CFG # isort:skip + + +def design_scene(): + """Designs the scene.""" + # Ground-plane + cfg = sim_utils.GroundPlaneCfg() + cfg.func("/World/defaultGroundPlane", cfg) + # Lights + cfg = sim_utils.DomeLightCfg(intensity=3000.0, color=(0.75, 0.75, 0.75)) + cfg.func("/World/Light", cfg) + + # Create separate groups called "Origin1", "Origin2" + # Each group will have a robot in it + origins = [[2.75, 0.0, 0.0], [-2.75, 0.0, 0.0]] + # Origin 1 + prim_utils.create_prim("/World/Origin1", "Xform", translation=origins[0]) + # Origin 2 + prim_utils.create_prim("/World/Origin2", "Xform", translation=origins[1]) + + # Articulation: First we define the robot config + pick_and_place_robot_cfg = PICK_AND_PLACE_CFG.copy() + pick_and_place_robot_cfg.prim_path = "/World/Origin.*/Robot" + pick_and_place_robot = Articulation(cfg=pick_and_place_robot_cfg) + + # Surface Gripper: Next we define the surface gripper config + surface_gripper_cfg = SurfaceGripperCfg() + # We need to tell the View which prim to use for the surface gripper + surface_gripper_cfg.prim_path = "/World/Origin.*/Robot/picker_head/SurfaceGripper" + # We can then set different parameters for the surface gripper, note that if these parameters are not set, + # the View will try to read them from the prim. + surface_gripper_cfg.max_grip_distance = 0.1 # [m] (Maximum distance at which the gripper can grasp an object) + surface_gripper_cfg.shear_force_limit = 500.0 # [N] (Force limit in the direction perpendicular direction) + surface_gripper_cfg.coaxial_force_limit = 500.0 # [N] (Force limit in the direction of the gripper's axis) + surface_gripper_cfg.retry_interval = 0.1 # seconds (Time the gripper will stay in a grasping state) + # We can now spawn the surface gripper + surface_gripper = SurfaceGripper(cfg=surface_gripper_cfg) + + # return the scene information + scene_entities = {"pick_and_place_robot": pick_and_place_robot, "surface_gripper": surface_gripper} + return scene_entities, origins + + +def run_simulator( + sim: sim_utils.SimulationContext, entities: dict[str, Articulation | SurfaceGripper], origins: torch.Tensor +): + """Runs the simulation loop.""" + # Extract scene entities + robot: Articulation = entities["pick_and_place_robot"] + surface_gripper: SurfaceGripper = entities["surface_gripper"] + + # Define simulation stepping + sim_dt = sim.get_physics_dt() + count = 0 + # Simulation loop + while simulation_app.is_running(): + # Reset + if count % 500 == 0: + # reset counter + count = 0 + # reset the scene entities + # root state + # we offset the root state by the origin since the states are written in simulation world frame + # if this is not done, then the robots will be spawned at the (0, 0, 0) of the simulation world + root_state = robot.data.default_root_state.clone() + root_state[:, :3] += origins + robot.write_root_pose_to_sim(root_state[:, :7]) + robot.write_root_velocity_to_sim(root_state[:, 7:]) + # set joint positions with some noise + joint_pos, joint_vel = robot.data.default_joint_pos.clone(), robot.data.default_joint_vel.clone() + joint_pos += torch.rand_like(joint_pos) * 0.1 + robot.write_joint_state_to_sim(joint_pos, joint_vel) + # clear internal buffers + robot.reset() + print("[INFO]: Resetting robot state...") + # Opens the gripper and makes sure the gripper is in the open state + surface_gripper.reset() + print("[INFO]: Resetting gripper state...") + + # Sample a random command between -1 and 1. + gripper_commands = torch.rand(surface_gripper.num_instances) * 2.0 - 1.0 + # The gripper behavior is as follows: + # -1 < command < -0.3 --> Gripper is Opening + # -0.3 < command < 0.3 --> Gripper is Idle + # 0.3 < command < 1 --> Gripper is Closing + print(f"[INFO]: Gripper commands: {gripper_commands}") + mapped_commands = [ + "Opening" if command < -0.3 else "Closing" if command > 0.3 else "Idle" for command in gripper_commands + ] + print(f"[INFO]: Mapped commands: {mapped_commands}") + # Set the gripper command + surface_gripper.set_grippers_command(gripper_commands) + # Write data to sim + surface_gripper.write_data_to_sim() + # Perform step + sim.step() + # Increment counter + count += 1 + # Read the gripper state from the simulation + surface_gripper.update(sim_dt) + # Read the gripper state from the buffer + surface_gripper_state = surface_gripper.state + # The gripper state is a list of integers that can be mapped to the following: + # -1 --> Open + # 0 --> Closing + # 1 --> Closed + # Print the gripper state + print(f"[INFO]: Gripper state: {surface_gripper_state}") + mapped_commands = [ + "Open" if state == -1 else "Closing" if state == 0 else "Closed" for state in surface_gripper_state.tolist() + ] + print(f"[INFO]: Mapped commands: {mapped_commands}") + + +def main(): + """Main function.""" + # Load kit helper + sim_cfg = sim_utils.SimulationCfg(device=args_cli.device) + sim = SimulationContext(sim_cfg) + # Set main camera + sim.set_camera_view([2.75, 7.5, 10.0], [2.75, 0.0, 0.0]) + # Design scene + scene_entities, scene_origins = design_scene() + scene_origins = torch.tensor(scene_origins, device=sim.device) + # Play the simulator + sim.reset() + # Now we are ready! + print("[INFO]: Setup complete...") + # Run the simulator + run_simulator(sim, scene_entities, scene_origins) + + +if __name__ == "__main__": + # run the main function + main() + # close sim app + simulation_app.close() diff --git a/scripts/tutorials/02_scene/create_scene.py b/scripts/tutorials/02_scene/create_scene.py new file mode 100644 index 00000000..57d91a3e --- /dev/null +++ b/scripts/tutorials/02_scene/create_scene.py @@ -0,0 +1,132 @@ +# Copyright (c) 2024-2025, The UW Lab Project Developers. (https://github.com/uw-lab/UWLab/blob/main/CONTRIBUTORS.md). +# All Rights Reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""This script demonstrates how to use the interactive scene interface to setup a scene with multiple prims. + +.. code-block:: bash + + # Usage + ./isaaclab.sh -p scripts/tutorials/02_scene/create_scene.py --num_envs 32 + +""" + +"""Launch Isaac Sim Simulator first.""" + + +import argparse + +from isaaclab.app import AppLauncher + +# add argparse arguments +parser = argparse.ArgumentParser(description="Tutorial on using the interactive scene interface.") +parser.add_argument("--num_envs", type=int, default=2, help="Number of environments to spawn.") +# append AppLauncher cli args +AppLauncher.add_app_launcher_args(parser) +# parse the arguments +args_cli = parser.parse_args() + +# launch omniverse app +app_launcher = AppLauncher(args_cli) +simulation_app = app_launcher.app + +"""Rest everything follows.""" + +import torch + +import isaaclab.sim as sim_utils +from isaaclab.assets import ArticulationCfg, AssetBaseCfg +from isaaclab.scene import InteractiveScene, InteractiveSceneCfg +from isaaclab.sim import SimulationContext +from isaaclab.utils import configclass + +## +# Pre-defined configs +## +from isaaclab_assets import CARTPOLE_CFG # isort:skip + + +@configclass +class CartpoleSceneCfg(InteractiveSceneCfg): + """Configuration for a cart-pole scene.""" + + # ground plane + ground = AssetBaseCfg(prim_path="/World/defaultGroundPlane", spawn=sim_utils.GroundPlaneCfg()) + + # lights + dome_light = AssetBaseCfg( + prim_path="/World/Light", spawn=sim_utils.DomeLightCfg(intensity=3000.0, color=(0.75, 0.75, 0.75)) + ) + + # articulation + cartpole: ArticulationCfg = CARTPOLE_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot") + + +def run_simulator(sim: sim_utils.SimulationContext, scene: InteractiveScene): + """Runs the simulation loop.""" + # Extract scene entities + # note: we only do this here for readability. + robot = scene["cartpole"] + # Define simulation stepping + sim_dt = sim.get_physics_dt() + count = 0 + # Simulation loop + while simulation_app.is_running(): + # Reset + if count % 500 == 0: + # reset counter + count = 0 + # reset the scene entities + # root state + # we offset the root state by the origin since the states are written in simulation world frame + # if this is not done, then the robots will be spawned at the (0, 0, 0) of the simulation world + root_state = robot.data.default_root_state.clone() + root_state[:, :3] += scene.env_origins + robot.write_root_pose_to_sim(root_state[:, :7]) + robot.write_root_velocity_to_sim(root_state[:, 7:]) + # set joint positions with some noise + joint_pos, joint_vel = robot.data.default_joint_pos.clone(), robot.data.default_joint_vel.clone() + joint_pos += torch.rand_like(joint_pos) * 0.1 + robot.write_joint_state_to_sim(joint_pos, joint_vel) + # clear internal buffers + scene.reset() + print("[INFO]: Resetting robot state...") + # Apply random action + # -- generate random joint efforts + efforts = torch.randn_like(robot.data.joint_pos) * 5.0 + # -- apply action to the robot + robot.set_joint_effort_target(efforts) + # -- write data to sim + scene.write_data_to_sim() + # Perform step + sim.step() + # Increment counter + count += 1 + # Update buffers + scene.update(sim_dt) + + +def main(): + """Main function.""" + # Load kit helper + sim_cfg = sim_utils.SimulationCfg(device=args_cli.device) + sim = SimulationContext(sim_cfg) + # Set main camera + sim.set_camera_view([2.5, 0.0, 4.0], [0.0, 0.0, 2.0]) + # Design scene + scene_cfg = CartpoleSceneCfg(num_envs=args_cli.num_envs, env_spacing=2.0) + scene = InteractiveScene(scene_cfg) + # Play the simulator + sim.reset() + # Now we are ready! + print("[INFO]: Setup complete...") + # Run the simulator + run_simulator(sim, scene) + + +if __name__ == "__main__": + # run the main function + main() + # close sim app + simulation_app.close() diff --git a/scripts/tutorials/03_envs/create_cartpole_base_env.py b/scripts/tutorials/03_envs/create_cartpole_base_env.py new file mode 100644 index 00000000..b48357ee --- /dev/null +++ b/scripts/tutorials/03_envs/create_cartpole_base_env.py @@ -0,0 +1,174 @@ +# Copyright (c) 2024-2025, The UW Lab Project Developers. (https://github.com/uw-lab/UWLab/blob/main/CONTRIBUTORS.md). +# All Rights Reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +""" +This script demonstrates how to create a simple environment with a cartpole. It combines the concepts of +scene, action, observation and event managers to create an environment. + +.. code-block:: bash + + ./isaaclab.sh -p scripts/tutorials/03_envs/create_cartpole_base_env.py --num_envs 32 + +""" + +"""Launch Isaac Sim Simulator first.""" + + +import argparse + +from isaaclab.app import AppLauncher + +# add argparse arguments +parser = argparse.ArgumentParser(description="Tutorial on creating a cartpole base environment.") +parser.add_argument("--num_envs", type=int, default=16, help="Number of environments to spawn.") + +# append AppLauncher cli args +AppLauncher.add_app_launcher_args(parser) +# parse the arguments +args_cli = parser.parse_args() + +# launch omniverse app +app_launcher = AppLauncher(args_cli) +simulation_app = app_launcher.app + +"""Rest everything follows.""" + +import math +import torch + +import isaaclab.envs.mdp as mdp +from isaaclab.envs import ManagerBasedEnv, ManagerBasedEnvCfg +from isaaclab.managers import EventTermCfg as EventTerm +from isaaclab.managers import ObservationGroupCfg as ObsGroup +from isaaclab.managers import ObservationTermCfg as ObsTerm +from isaaclab.managers import SceneEntityCfg +from isaaclab.utils import configclass + +from isaaclab_tasks.manager_based.classic.cartpole.cartpole_env_cfg import CartpoleSceneCfg + + +@configclass +class ActionsCfg: + """Action specifications for the environment.""" + + joint_efforts = mdp.JointEffortActionCfg(asset_name="robot", joint_names=["slider_to_cart"], scale=5.0) + + +@configclass +class ObservationsCfg: + """Observation specifications for the environment.""" + + @configclass + class PolicyCfg(ObsGroup): + """Observations for policy group.""" + + # observation terms (order preserved) + joint_pos_rel = ObsTerm(func=mdp.joint_pos_rel) + joint_vel_rel = ObsTerm(func=mdp.joint_vel_rel) + + def __post_init__(self) -> None: + self.enable_corruption = False + self.concatenate_terms = True + + # observation groups + policy: PolicyCfg = PolicyCfg() + + +@configclass +class EventCfg: + """Configuration for events.""" + + # on startup + add_pole_mass = EventTerm( + func=mdp.randomize_rigid_body_mass, + mode="startup", + params={ + "asset_cfg": SceneEntityCfg("robot", body_names=["pole"]), + "mass_distribution_params": (0.1, 0.5), + "operation": "add", + }, + ) + + # on reset + reset_cart_position = EventTerm( + func=mdp.reset_joints_by_offset, + mode="reset", + params={ + "asset_cfg": SceneEntityCfg("robot", joint_names=["slider_to_cart"]), + "position_range": (-1.0, 1.0), + "velocity_range": (-0.1, 0.1), + }, + ) + + reset_pole_position = EventTerm( + func=mdp.reset_joints_by_offset, + mode="reset", + params={ + "asset_cfg": SceneEntityCfg("robot", joint_names=["cart_to_pole"]), + "position_range": (-0.125 * math.pi, 0.125 * math.pi), + "velocity_range": (-0.01 * math.pi, 0.01 * math.pi), + }, + ) + + +@configclass +class CartpoleEnvCfg(ManagerBasedEnvCfg): + """Configuration for the cartpole environment.""" + + # Scene settings + scene = CartpoleSceneCfg(num_envs=1024, env_spacing=2.5) + # Basic settings + observations = ObservationsCfg() + actions = ActionsCfg() + events = EventCfg() + + def __post_init__(self): + """Post initialization.""" + # viewer settings + self.viewer.eye = [4.5, 0.0, 6.0] + self.viewer.lookat = [0.0, 0.0, 2.0] + # step settings + self.decimation = 4 # env step every 4 sim steps: 200Hz / 4 = 50Hz + # simulation settings + self.sim.dt = 0.005 # sim step every 5ms: 200Hz + + +def main(): + """Main function.""" + # parse the arguments + env_cfg = CartpoleEnvCfg() + env_cfg.scene.num_envs = args_cli.num_envs + env_cfg.sim.device = args_cli.device + # setup base environment + env = ManagerBasedEnv(cfg=env_cfg) + + # simulate physics + count = 0 + while simulation_app.is_running(): + with torch.inference_mode(): + # reset + if count % 300 == 0: + count = 0 + env.reset() + print("-" * 80) + print("[INFO]: Resetting environment...") + # sample random actions + joint_efforts = torch.randn_like(env.action_manager.action) + # step the environment + obs, _ = env.step(joint_efforts) + # print current orientation of pole + print("[Env 0]: Pole joint: ", obs["policy"][0][1].item()) + # update counter + count += 1 + + # close the environment + env.close() + + +if __name__ == "__main__": + # run the main function + main() + # close sim app + simulation_app.close() diff --git a/scripts/tutorials/03_envs/create_cube_base_env.py b/scripts/tutorials/03_envs/create_cube_base_env.py new file mode 100644 index 00000000..803d627f --- /dev/null +++ b/scripts/tutorials/03_envs/create_cube_base_env.py @@ -0,0 +1,353 @@ +# Copyright (c) 2024-2025, The UW Lab Project Developers. (https://github.com/uw-lab/UWLab/blob/main/CONTRIBUTORS.md). +# All Rights Reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +""" +This script creates a simple environment with a floating cube. The cube is controlled by a PD +controller to track an arbitrary target position. + +While going through this tutorial, we recommend you to pay attention to how a custom action term +is defined. The action term is responsible for processing the raw actions and applying them to the +scene entities. + +We also define an event term called 'randomize_scale' that randomizes the scale of +the cube. This event term has the mode 'prestartup', which means that it is applied on the USD stage +before the simulation starts. Additionally, the flag 'replicate_physics' is set to False, +which means that the cube is not replicated across multiple environments but rather each +environment gets its own cube instance. + +The rest of the environment is similar to the previous tutorials. + +.. code-block:: bash + + # Run the script + ./isaaclab.sh -p scripts/tutorials/03_envs/create_cube_base_env.py --num_envs 32 + +""" + +from __future__ import annotations + +"""Launch Isaac Sim Simulator first.""" + + +import argparse + +from isaaclab.app import AppLauncher + +# add argparse arguments +parser = argparse.ArgumentParser(description="Tutorial on creating a floating cube environment.") +parser.add_argument("--num_envs", type=int, default=64, help="Number of environments to spawn.") + +# append AppLauncher cli args +AppLauncher.add_app_launcher_args(parser) +# parse the arguments +args_cli = parser.parse_args() + +# launch omniverse app +app_launcher = AppLauncher(args_cli) +simulation_app = app_launcher.app + +"""Rest everything follows.""" + +import torch + +import isaaclab.envs.mdp as mdp +import isaaclab.sim as sim_utils +from isaaclab.assets import AssetBaseCfg, RigidObject, RigidObjectCfg +from isaaclab.envs import ManagerBasedEnv, ManagerBasedEnvCfg +from isaaclab.managers import ActionTerm, ActionTermCfg +from isaaclab.managers import EventTermCfg as EventTerm +from isaaclab.managers import ObservationGroupCfg as ObsGroup +from isaaclab.managers import ObservationTermCfg as ObsTerm +from isaaclab.managers import SceneEntityCfg +from isaaclab.scene import InteractiveSceneCfg +from isaaclab.terrains import TerrainImporterCfg +from isaaclab.utils import configclass + +## +# Custom action term +## + + +class CubeActionTerm(ActionTerm): + """Simple action term that implements a PD controller to track a target position. + + The action term is applied to the cube asset. It involves two steps: + + 1. **Process the raw actions**: Typically, this includes any transformations of the raw actions + that are required to map them to the desired space. This is called once per environment step. + 2. **Apply the processed actions**: This step applies the processed actions to the asset. + It is called once per simulation step. + + In this case, the action term simply applies the raw actions to the cube asset. The raw actions + are the desired target positions of the cube in the environment frame. The pre-processing step + simply copies the raw actions to the processed actions as no additional processing is required. + The processed actions are then applied to the cube asset by implementing a PD controller to + track the target position. + """ + + _asset: RigidObject + """The articulation asset on which the action term is applied.""" + + def __init__(self, cfg: CubeActionTermCfg, env: ManagerBasedEnv): + # call super constructor + super().__init__(cfg, env) + # create buffers + self._raw_actions = torch.zeros(env.num_envs, 3, device=self.device) + self._processed_actions = torch.zeros(env.num_envs, 3, device=self.device) + self._vel_command = torch.zeros(self.num_envs, 6, device=self.device) + # gains of controller + self.p_gain = cfg.p_gain + self.d_gain = cfg.d_gain + + """ + Properties. + """ + + @property + def action_dim(self) -> int: + return self._raw_actions.shape[1] + + @property + def raw_actions(self) -> torch.Tensor: + return self._raw_actions + + @property + def processed_actions(self) -> torch.Tensor: + return self._processed_actions + + """ + Operations + """ + + def process_actions(self, actions: torch.Tensor): + # store the raw actions + self._raw_actions[:] = actions + # no-processing of actions + self._processed_actions[:] = self._raw_actions[:] + + def apply_actions(self): + # implement a PD controller to track the target position + pos_error = self._processed_actions - (self._asset.data.root_pos_w - self._env.scene.env_origins) + vel_error = -self._asset.data.root_lin_vel_w + # set velocity targets + self._vel_command[:, :3] = self.p_gain * pos_error + self.d_gain * vel_error + self._asset.write_root_velocity_to_sim(self._vel_command) + + +@configclass +class CubeActionTermCfg(ActionTermCfg): + """Configuration for the cube action term.""" + + class_type: type = CubeActionTerm + """The class corresponding to the action term.""" + + p_gain: float = 5.0 + """Proportional gain of the PD controller.""" + d_gain: float = 0.5 + """Derivative gain of the PD controller.""" + + +## +# Custom observation term +## + + +def base_position(env: ManagerBasedEnv, asset_cfg: SceneEntityCfg) -> torch.Tensor: + """Root linear velocity in the asset's root frame.""" + # extract the used quantities (to enable type-hinting) + asset: RigidObject = env.scene[asset_cfg.name] + return asset.data.root_pos_w - env.scene.env_origins + + +## +# Scene definition +## + + +@configclass +class MySceneCfg(InteractiveSceneCfg): + """Example scene configuration. + + The scene comprises of a ground plane, light source and floating cubes (gravity disabled). + """ + + # add terrain + terrain = TerrainImporterCfg(prim_path="/World/ground", terrain_type="plane", debug_vis=False) + + # add cube + cube: RigidObjectCfg = RigidObjectCfg( + prim_path="{ENV_REGEX_NS}/cube", + spawn=sim_utils.CuboidCfg( + size=(0.2, 0.2, 0.2), + rigid_props=sim_utils.RigidBodyPropertiesCfg(max_depenetration_velocity=1.0, disable_gravity=True), + mass_props=sim_utils.MassPropertiesCfg(mass=1.0), + physics_material=sim_utils.RigidBodyMaterialCfg(), + visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(0.5, 0.0, 0.0)), + ), + init_state=RigidObjectCfg.InitialStateCfg(pos=(0.0, 0.0, 5)), + ) + + # lights + light = AssetBaseCfg( + prim_path="/World/light", + spawn=sim_utils.DomeLightCfg(color=(0.75, 0.75, 0.75), intensity=2000.0), + ) + + +## +# Environment settings +## + + +@configclass +class ActionsCfg: + """Action specifications for the MDP.""" + + joint_pos = CubeActionTermCfg(asset_name="cube") + + +@configclass +class ObservationsCfg: + """Observation specifications for the MDP.""" + + @configclass + class PolicyCfg(ObsGroup): + """Observations for policy group.""" + + # cube velocity + position = ObsTerm(func=base_position, params={"asset_cfg": SceneEntityCfg("cube")}) + + def __post_init__(self): + self.enable_corruption = True + self.concatenate_terms = True + + # observation groups + policy: PolicyCfg = PolicyCfg() + + +@configclass +class EventCfg: + """Configuration for events.""" + + # This event term resets the base position of the cube. + # The mode is set to 'reset', which means that the base position is reset whenever + # the environment instance is reset (because of terminations defined in 'TerminationCfg'). + reset_base = EventTerm( + func=mdp.reset_root_state_uniform, + mode="reset", + params={ + "pose_range": {"x": (-0.5, 0.5), "y": (-0.5, 0.5), "yaw": (-3.14, 3.14)}, + "velocity_range": { + "x": (-0.5, 0.5), + "y": (-0.5, 0.5), + "z": (-0.5, 0.5), + }, + "asset_cfg": SceneEntityCfg("cube"), + }, + ) + + # This event term randomizes the scale of the cube. + # The mode is set to 'prestartup', which means that the scale is randomize on the USD stage before the + # simulation starts. + # Note: USD-level randomizations require the flag 'replicate_physics' to be set to False. + randomize_scale = EventTerm( + func=mdp.randomize_rigid_body_scale, + mode="prestartup", + params={ + "scale_range": {"x": (0.5, 1.5), "y": (0.5, 1.5), "z": (0.5, 1.5)}, + "asset_cfg": SceneEntityCfg("cube"), + }, + ) + + # This event term randomizes the visual color of the cube. + # Similar to the scale randomization, this is also a USD-level randomization and requires the flag + # 'replicate_physics' to be set to False. + randomize_color = EventTerm( + func=mdp.randomize_visual_color, + mode="prestartup", + params={ + "colors": {"r": (0.0, 1.0), "g": (0.0, 1.0), "b": (0.0, 1.0)}, + "asset_cfg": SceneEntityCfg("cube"), + "mesh_name": "geometry/mesh", + "event_name": "rep_cube_randomize_color", + }, + ) + + +## +# Environment configuration +## + + +@configclass +class CubeEnvCfg(ManagerBasedEnvCfg): + """Configuration for the locomotion velocity-tracking environment.""" + + # Scene settings + # The flag 'replicate_physics' is set to False, which means that the cube is not replicated + # across multiple environments but rather each environment gets its own cube instance. + # This allows modifying the cube's properties independently for each environment. + scene: MySceneCfg = MySceneCfg(num_envs=args_cli.num_envs, env_spacing=2.5, replicate_physics=False) + + # Basic settings + observations: ObservationsCfg = ObservationsCfg() + actions: ActionsCfg = ActionsCfg() + events: EventCfg = EventCfg() + + def __post_init__(self): + """Post initialization.""" + # general settings + self.decimation = 2 + # simulation settings + self.sim.dt = 0.01 + self.sim.physics_material = self.scene.terrain.physics_material + self.sim.render_interval = 2 # render interval should be a multiple of decimation + self.sim.device = args_cli.device + # viewer settings + self.viewer.eye = (5.0, 5.0, 5.0) + self.viewer.lookat = (0.0, 0.0, 2.0) + + +def main(): + """Main function.""" + + # setup base environment + env_cfg = CubeEnvCfg() + env = ManagerBasedEnv(cfg=env_cfg) + + # setup target position commands + target_position = torch.rand(env.num_envs, 3, device=env.device) * 2 + target_position[:, 2] += 2.0 + # offset all targets so that they move to the world origin + target_position -= env.scene.env_origins + + # simulate physics + count = 0 + obs, _ = env.reset() + while simulation_app.is_running(): + with torch.inference_mode(): + # reset + if count % 300 == 0: + count = 0 + obs, _ = env.reset() + print("-" * 80) + print("[INFO]: Resetting environment...") + # step env + obs, _ = env.step(target_position) + # print mean squared position error between target and current position + error = torch.norm(obs["policy"] - target_position).mean().item() + print(f"[Step: {count:04d}]: Mean position error: {error:.4f}") + # update counter + count += 1 + + # close the environment + env.close() + + +if __name__ == "__main__": + # run the main function + main() + # close sim app + simulation_app.close() diff --git a/scripts/tutorials/03_envs/create_quadruped_base_env.py b/scripts/tutorials/03_envs/create_quadruped_base_env.py new file mode 100644 index 00000000..5aac02a8 --- /dev/null +++ b/scripts/tutorials/03_envs/create_quadruped_base_env.py @@ -0,0 +1,245 @@ +# Copyright (c) 2024-2025, The UW Lab Project Developers. (https://github.com/uw-lab/UWLab/blob/main/CONTRIBUTORS.md). +# All Rights Reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +""" +This script demonstrates the environment for a quadruped robot with height-scan sensor. + +In this example, we use a locomotion policy to control the robot. The robot is commanded to +move forward at a constant velocity. The height-scan sensor is used to detect the height of +the terrain. + +.. code-block:: bash + + # Run the script + ./isaaclab.sh -p scripts/tutorials/03_envs/create_quadruped_base_env.py --num_envs 32 + +""" + +"""Launch Isaac Sim Simulator first.""" + + +import argparse + +from isaaclab.app import AppLauncher + +# add argparse arguments +parser = argparse.ArgumentParser(description="Tutorial on creating a quadruped base environment.") +parser.add_argument("--num_envs", type=int, default=64, help="Number of environments to spawn.") + +# append AppLauncher cli args +AppLauncher.add_app_launcher_args(parser) +# parse the arguments +args_cli = parser.parse_args() + +# launch omniverse app +app_launcher = AppLauncher(args_cli) +simulation_app = app_launcher.app + +"""Rest everything follows.""" + +import torch + +import isaaclab.envs.mdp as mdp +import isaaclab.sim as sim_utils +from isaaclab.assets import ArticulationCfg, AssetBaseCfg +from isaaclab.envs import ManagerBasedEnv, ManagerBasedEnvCfg +from isaaclab.managers import EventTermCfg as EventTerm +from isaaclab.managers import ObservationGroupCfg as ObsGroup +from isaaclab.managers import ObservationTermCfg as ObsTerm +from isaaclab.managers import SceneEntityCfg +from isaaclab.scene import InteractiveSceneCfg +from isaaclab.sensors import RayCasterCfg, patterns +from isaaclab.terrains import TerrainImporterCfg +from isaaclab.utils import configclass +from isaaclab.utils.assets import ISAACLAB_NUCLEUS_DIR, check_file_path, read_file +from isaaclab.utils.noise import AdditiveUniformNoiseCfg as Unoise + +## +# Pre-defined configs +## +from isaaclab.terrains.config.rough import ROUGH_TERRAINS_CFG # isort: skip +from isaaclab_assets.robots.anymal import ANYMAL_C_CFG # isort: skip + + +## +# Custom observation terms +## + + +def constant_commands(env: ManagerBasedEnv) -> torch.Tensor: + """The generated command from the command generator.""" + return torch.tensor([[1, 0, 0]], device=env.device).repeat(env.num_envs, 1) + + +## +# Scene definition +## + + +@configclass +class MySceneCfg(InteractiveSceneCfg): + """Example scene configuration.""" + + # add terrain + terrain = TerrainImporterCfg( + prim_path="/World/ground", + terrain_type="generator", + terrain_generator=ROUGH_TERRAINS_CFG, + max_init_terrain_level=5, + collision_group=-1, + physics_material=sim_utils.RigidBodyMaterialCfg( + friction_combine_mode="multiply", + restitution_combine_mode="multiply", + static_friction=1.0, + dynamic_friction=1.0, + ), + debug_vis=False, + ) + + # add robot + robot: ArticulationCfg = ANYMAL_C_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot") + + # sensors + height_scanner = RayCasterCfg( + prim_path="{ENV_REGEX_NS}/Robot/base", + offset=RayCasterCfg.OffsetCfg(pos=(0.0, 0.0, 20.0)), + ray_alignment="yaw", + pattern_cfg=patterns.GridPatternCfg(resolution=0.1, size=[1.6, 1.0]), + debug_vis=True, + mesh_prim_paths=["/World/ground"], + ) + + # lights + light = AssetBaseCfg( + prim_path="/World/light", + spawn=sim_utils.DistantLightCfg(color=(0.75, 0.75, 0.75), intensity=3000.0), + ) + + +## +# MDP settings +## + + +@configclass +class ActionsCfg: + """Action specifications for the MDP.""" + + joint_pos = mdp.JointPositionActionCfg(asset_name="robot", joint_names=[".*"], scale=0.5, use_default_offset=True) + + +@configclass +class ObservationsCfg: + """Observation specifications for the MDP.""" + + @configclass + class PolicyCfg(ObsGroup): + """Observations for policy group.""" + + # observation terms (order preserved) + base_lin_vel = ObsTerm(func=mdp.base_lin_vel, noise=Unoise(n_min=-0.1, n_max=0.1)) + base_ang_vel = ObsTerm(func=mdp.base_ang_vel, noise=Unoise(n_min=-0.2, n_max=0.2)) + projected_gravity = ObsTerm( + func=mdp.projected_gravity, + noise=Unoise(n_min=-0.05, n_max=0.05), + ) + velocity_commands = ObsTerm(func=constant_commands) + joint_pos = ObsTerm(func=mdp.joint_pos_rel, noise=Unoise(n_min=-0.01, n_max=0.01)) + joint_vel = ObsTerm(func=mdp.joint_vel_rel, noise=Unoise(n_min=-1.5, n_max=1.5)) + actions = ObsTerm(func=mdp.last_action) + height_scan = ObsTerm( + func=mdp.height_scan, + params={"sensor_cfg": SceneEntityCfg("height_scanner")}, + noise=Unoise(n_min=-0.1, n_max=0.1), + clip=(-1.0, 1.0), + ) + + def __post_init__(self): + self.enable_corruption = True + self.concatenate_terms = True + + # observation groups + policy: PolicyCfg = PolicyCfg() + + +@configclass +class EventCfg: + """Configuration for events.""" + + reset_scene = EventTerm(func=mdp.reset_scene_to_default, mode="reset") + + +## +# Environment configuration +## + + +@configclass +class QuadrupedEnvCfg(ManagerBasedEnvCfg): + """Configuration for the locomotion velocity-tracking environment.""" + + # Scene settings + scene: MySceneCfg = MySceneCfg(num_envs=args_cli.num_envs, env_spacing=2.5) + # Basic settings + observations: ObservationsCfg = ObservationsCfg() + actions: ActionsCfg = ActionsCfg() + events: EventCfg = EventCfg() + + def __post_init__(self): + """Post initialization.""" + # general settings + self.decimation = 4 # env decimation -> 50 Hz control + # simulation settings + self.sim.dt = 0.005 # simulation timestep -> 200 Hz physics + self.sim.physics_material = self.scene.terrain.physics_material + self.sim.device = args_cli.device + # update sensor update periods + # we tick all the sensors based on the smallest update period (physics update period) + if self.scene.height_scanner is not None: + self.scene.height_scanner.update_period = self.decimation * self.sim.dt # 50 Hz + + +def main(): + """Main function.""" + # setup base environment + env_cfg = QuadrupedEnvCfg() + env = ManagerBasedEnv(cfg=env_cfg) + + # load level policy + policy_path = ISAACLAB_NUCLEUS_DIR + "/Policies/ANYmal-C/HeightScan/policy.pt" + # check if policy file exists + if not check_file_path(policy_path): + raise FileNotFoundError(f"Policy file '{policy_path}' does not exist.") + file_bytes = read_file(policy_path) + # jit load the policy + policy = torch.jit.load(file_bytes).to(env.device).eval() + + # simulate physics + count = 0 + obs, _ = env.reset() + while simulation_app.is_running(): + with torch.inference_mode(): + # reset + if count % 1000 == 0: + obs, _ = env.reset() + count = 0 + print("-" * 80) + print("[INFO]: Resetting environment...") + # infer action + action = policy(obs["policy"]) + # step env + obs, _ = env.step(action) + # update counter + count += 1 + + # close the environment + env.close() + + +if __name__ == "__main__": + # run the main function + main() + # close sim app + simulation_app.close() diff --git a/scripts/tutorials/03_envs/policy_inference_in_usd.py b/scripts/tutorials/03_envs/policy_inference_in_usd.py new file mode 100644 index 00000000..cab84e62 --- /dev/null +++ b/scripts/tutorials/03_envs/policy_inference_in_usd.py @@ -0,0 +1,87 @@ +# Copyright (c) 2024-2025, The UW Lab Project Developers. (https://github.com/uw-lab/UWLab/blob/main/CONTRIBUTORS.md). +# All Rights Reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +""" +This script demonstrates policy inference in a prebuilt USD environment. + +In this example, we use a locomotion policy to control the H1 robot. The robot was trained +using Isaac-Velocity-Rough-H1-v0. The robot is commanded to move forward at a constant velocity. + +.. code-block:: bash + + # Run the script + ./isaaclab.sh -p scripts/tutorials/03_envs/policy_inference_in_usd.py --checkpoint /path/to/jit/checkpoint.pt + +""" + +"""Launch Isaac Sim Simulator first.""" + + +import argparse + +from isaaclab.app import AppLauncher + +# add argparse arguments +parser = argparse.ArgumentParser(description="Tutorial on inferencing a policy on an H1 robot in a warehouse.") +parser.add_argument("--checkpoint", type=str, help="Path to model checkpoint exported as jit.", required=True) + +# append AppLauncher cli args +AppLauncher.add_app_launcher_args(parser) +# parse the arguments +args_cli = parser.parse_args() + +# launch omniverse app +app_launcher = AppLauncher(args_cli) +simulation_app = app_launcher.app + +"""Rest everything follows.""" +import io +import os +import torch + +import omni + +from isaaclab.envs import ManagerBasedRLEnv +from isaaclab.terrains import TerrainImporterCfg +from isaaclab.utils.assets import ISAAC_NUCLEUS_DIR + +from isaaclab_tasks.manager_based.locomotion.velocity.config.h1.rough_env_cfg import H1RoughEnvCfg_PLAY + + +def main(): + """Main function.""" + # load the trained jit policy + policy_path = os.path.abspath(args_cli.checkpoint) + file_content = omni.client.read_file(policy_path)[2] + file = io.BytesIO(memoryview(file_content).tobytes()) + policy = torch.jit.load(file, map_location=args_cli.device) + + # setup environment + env_cfg = H1RoughEnvCfg_PLAY() + env_cfg.scene.num_envs = 1 + env_cfg.curriculum = None + env_cfg.scene.terrain = TerrainImporterCfg( + prim_path="/World/ground", + terrain_type="usd", + usd_path=f"{ISAAC_NUCLEUS_DIR}/Environments/Simple_Warehouse/warehouse.usd", + ) + env_cfg.sim.device = args_cli.device + if args_cli.device == "cpu": + env_cfg.sim.use_fabric = False + + # create environment + env = ManagerBasedRLEnv(cfg=env_cfg) + + # run inference with the policy + obs, _ = env.reset() + with torch.inference_mode(): + while simulation_app.is_running(): + action = policy(obs["policy"]) + obs, _, _, _, _ = env.step(action) + + +if __name__ == "__main__": + main() + simulation_app.close() diff --git a/scripts/tutorials/03_envs/run_cartpole_rl_env.py b/scripts/tutorials/03_envs/run_cartpole_rl_env.py new file mode 100644 index 00000000..87cee28a --- /dev/null +++ b/scripts/tutorials/03_envs/run_cartpole_rl_env.py @@ -0,0 +1,79 @@ +# Copyright (c) 2024-2025, The UW Lab Project Developers. (https://github.com/uw-lab/UWLab/blob/main/CONTRIBUTORS.md). +# All Rights Reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +""" +This script demonstrates how to run the RL environment for the cartpole balancing task. + +.. code-block:: bash + + ./isaaclab.sh -p scripts/tutorials/03_envs/run_cartpole_rl_env.py --num_envs 32 + +""" + +"""Launch Isaac Sim Simulator first.""" + +import argparse + +from isaaclab.app import AppLauncher + +# add argparse arguments +parser = argparse.ArgumentParser(description="Tutorial on running the cartpole RL environment.") +parser.add_argument("--num_envs", type=int, default=16, help="Number of environments to spawn.") + +# append AppLauncher cli args +AppLauncher.add_app_launcher_args(parser) +# parse the arguments +args_cli = parser.parse_args() + +# launch omniverse app +app_launcher = AppLauncher(args_cli) +simulation_app = app_launcher.app + +"""Rest everything follows.""" + +import torch + +from isaaclab.envs import ManagerBasedRLEnv + +from isaaclab_tasks.manager_based.classic.cartpole.cartpole_env_cfg import CartpoleEnvCfg + + +def main(): + """Main function.""" + # create environment configuration + env_cfg = CartpoleEnvCfg() + env_cfg.scene.num_envs = args_cli.num_envs + env_cfg.sim.device = args_cli.device + # setup RL environment + env = ManagerBasedRLEnv(cfg=env_cfg) + + # simulate physics + count = 0 + while simulation_app.is_running(): + with torch.inference_mode(): + # reset + if count % 300 == 0: + count = 0 + env.reset() + print("-" * 80) + print("[INFO]: Resetting environment...") + # sample random actions + joint_efforts = torch.randn_like(env.action_manager.action) + # step the environment + obs, rew, terminated, truncated, info = env.step(joint_efforts) + # print current orientation of pole + print("[Env 0]: Pole joint: ", obs["policy"][0][1].item()) + # update counter + count += 1 + + # close the environment + env.close() + + +if __name__ == "__main__": + # run the main function + main() + # close sim app + simulation_app.close() diff --git a/scripts/tutorials/04_sensors/add_sensors_on_robot.py b/scripts/tutorials/04_sensors/add_sensors_on_robot.py new file mode 100644 index 00000000..c877566b --- /dev/null +++ b/scripts/tutorials/04_sensors/add_sensors_on_robot.py @@ -0,0 +1,179 @@ +# Copyright (c) 2024-2025, The UW Lab Project Developers. (https://github.com/uw-lab/UWLab/blob/main/CONTRIBUTORS.md). +# All Rights Reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +""" +This script demonstrates how to add and simulate on-board sensors for a robot. + +We add the following sensors on the quadruped robot, ANYmal-C (ANYbotics): + +* USD-Camera: This is a camera sensor that is attached to the robot's base. +* Height Scanner: This is a height scanner sensor that is attached to the robot's base. +* Contact Sensor: This is a contact sensor that is attached to the robot's feet. + +.. code-block:: bash + + # Usage + ./isaaclab.sh -p scripts/tutorials/04_sensors/add_sensors_on_robot.py --enable_cameras + +""" + +"""Launch Isaac Sim Simulator first.""" + +import argparse + +from isaaclab.app import AppLauncher + +# add argparse arguments +parser = argparse.ArgumentParser(description="Tutorial on adding sensors on a robot.") +parser.add_argument("--num_envs", type=int, default=2, help="Number of environments to spawn.") +# append AppLauncher cli args +AppLauncher.add_app_launcher_args(parser) +# parse the arguments +args_cli = parser.parse_args() + +# launch omniverse app +app_launcher = AppLauncher(args_cli) +simulation_app = app_launcher.app + +"""Rest everything follows.""" + +import torch + +import isaaclab.sim as sim_utils +from isaaclab.assets import ArticulationCfg, AssetBaseCfg +from isaaclab.scene import InteractiveScene, InteractiveSceneCfg +from isaaclab.sensors import CameraCfg, ContactSensorCfg, RayCasterCfg, patterns +from isaaclab.utils import configclass + +## +# Pre-defined configs +## +from isaaclab_assets.robots.anymal import ANYMAL_C_CFG # isort: skip + + +@configclass +class SensorsSceneCfg(InteractiveSceneCfg): + """Design the scene with sensors on the robot.""" + + # ground plane + ground = AssetBaseCfg(prim_path="/World/defaultGroundPlane", spawn=sim_utils.GroundPlaneCfg()) + + # lights + dome_light = AssetBaseCfg( + prim_path="/World/Light", spawn=sim_utils.DomeLightCfg(intensity=3000.0, color=(0.75, 0.75, 0.75)) + ) + + # robot + robot: ArticulationCfg = ANYMAL_C_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot") + + # sensors + camera = CameraCfg( + prim_path="{ENV_REGEX_NS}/Robot/base/front_cam", + update_period=0.1, + height=480, + width=640, + data_types=["rgb", "distance_to_image_plane"], + spawn=sim_utils.PinholeCameraCfg( + focal_length=24.0, focus_distance=400.0, horizontal_aperture=20.955, clipping_range=(0.1, 1.0e5) + ), + offset=CameraCfg.OffsetCfg(pos=(0.510, 0.0, 0.015), rot=(0.5, -0.5, 0.5, -0.5), convention="ros"), + ) + height_scanner = RayCasterCfg( + prim_path="{ENV_REGEX_NS}/Robot/base", + update_period=0.02, + offset=RayCasterCfg.OffsetCfg(pos=(0.0, 0.0, 20.0)), + ray_alignment="yaw", + pattern_cfg=patterns.GridPatternCfg(resolution=0.1, size=[1.6, 1.0]), + debug_vis=True, + mesh_prim_paths=["/World/defaultGroundPlane"], + ) + contact_forces = ContactSensorCfg( + prim_path="{ENV_REGEX_NS}/Robot/.*_FOOT", update_period=0.0, history_length=6, debug_vis=True + ) + + +def run_simulator(sim: sim_utils.SimulationContext, scene: InteractiveScene): + """Run the simulator.""" + # Define simulation stepping + sim_dt = sim.get_physics_dt() + sim_time = 0.0 + count = 0 + + # Simulate physics + while simulation_app.is_running(): + # Reset + if count % 500 == 0: + # reset counter + count = 0 + # reset the scene entities + # root state + # we offset the root state by the origin since the states are written in simulation world frame + # if this is not done, then the robots will be spawned at the (0, 0, 0) of the simulation world + root_state = scene["robot"].data.default_root_state.clone() + root_state[:, :3] += scene.env_origins + scene["robot"].write_root_pose_to_sim(root_state[:, :7]) + scene["robot"].write_root_velocity_to_sim(root_state[:, 7:]) + # set joint positions with some noise + joint_pos, joint_vel = ( + scene["robot"].data.default_joint_pos.clone(), + scene["robot"].data.default_joint_vel.clone(), + ) + joint_pos += torch.rand_like(joint_pos) * 0.1 + scene["robot"].write_joint_state_to_sim(joint_pos, joint_vel) + # clear internal buffers + scene.reset() + print("[INFO]: Resetting robot state...") + # Apply default actions to the robot + # -- generate actions/commands + targets = scene["robot"].data.default_joint_pos + # -- apply action to the robot + scene["robot"].set_joint_position_target(targets) + # -- write data to sim + scene.write_data_to_sim() + # perform step + sim.step() + # update sim-time + sim_time += sim_dt + count += 1 + # update buffers + scene.update(sim_dt) + + # print information from the sensors + print("-------------------------------") + print(scene["camera"]) + print("Received shape of rgb image: ", scene["camera"].data.output["rgb"].shape) + print("Received shape of depth image: ", scene["camera"].data.output["distance_to_image_plane"].shape) + print("-------------------------------") + print(scene["height_scanner"]) + print("Received max height value: ", torch.max(scene["height_scanner"].data.ray_hits_w[..., -1]).item()) + print("-------------------------------") + print(scene["contact_forces"]) + print("Received max contact force of: ", torch.max(scene["contact_forces"].data.net_forces_w).item()) + + +def main(): + """Main function.""" + + # Initialize the simulation context + sim_cfg = sim_utils.SimulationCfg(dt=0.005, device=args_cli.device) + sim = sim_utils.SimulationContext(sim_cfg) + # Set main camera + sim.set_camera_view(eye=[3.5, 3.5, 3.5], target=[0.0, 0.0, 0.0]) + # Design scene + scene_cfg = SensorsSceneCfg(num_envs=args_cli.num_envs, env_spacing=2.0) + scene = InteractiveScene(scene_cfg) + # Play the simulator + sim.reset() + # Now we are ready! + print("[INFO]: Setup complete...") + # Run the simulator + run_simulator(sim, scene) + + +if __name__ == "__main__": + # run the main function + main() + # close sim app + simulation_app.close() diff --git a/scripts/tutorials/04_sensors/run_frame_transformer.py b/scripts/tutorials/04_sensors/run_frame_transformer.py new file mode 100644 index 00000000..41890e9d --- /dev/null +++ b/scripts/tutorials/04_sensors/run_frame_transformer.py @@ -0,0 +1,185 @@ +# Copyright (c) 2024-2025, The UW Lab Project Developers. (https://github.com/uw-lab/UWLab/blob/main/CONTRIBUTORS.md). +# All Rights Reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +""" +This script demonstrates the FrameTransformer sensor by visualizing the frames that it creates. + +.. code-block:: bash + + # Usage + ./isaaclab.sh -p scripts/tutorials/04_sensors/run_frame_transformer.py + +""" + +"""Launch Isaac Sim Simulator first.""" + +import argparse + +from isaaclab.app import AppLauncher + +# add argparse arguments +parser = argparse.ArgumentParser( + description="This script checks the FrameTransformer sensor by visualizing the frames that it creates." +) +AppLauncher.add_app_launcher_args(parser) +args_cli = parser.parse_args() + +# launch omniverse app +app_launcher = AppLauncher(headless=args_cli.headless) +simulation_app = app_launcher.app + +"""Rest everything follows.""" + +import math +import torch + +import isaacsim.util.debug_draw._debug_draw as omni_debug_draw + +import isaaclab.sim as sim_utils +import isaaclab.utils.math as math_utils +from isaaclab.assets import Articulation +from isaaclab.markers import VisualizationMarkers +from isaaclab.markers.config import FRAME_MARKER_CFG +from isaaclab.sensors import FrameTransformer, FrameTransformerCfg, OffsetCfg +from isaaclab.sim import SimulationContext + +## +# Pre-defined configs +## +from isaaclab_assets.robots.anymal import ANYMAL_C_CFG # isort:skip + + +def define_sensor() -> FrameTransformer: + """Defines the FrameTransformer sensor to add to the scene.""" + # define offset + rot_offset = math_utils.quat_from_euler_xyz(torch.zeros(1), torch.zeros(1), torch.tensor(-math.pi / 2)) + pos_offset = math_utils.quat_apply(rot_offset, torch.tensor([0.08795, 0.01305, -0.33797])) + + # Example using .* to get full body + LF_FOOT + frame_transformer_cfg = FrameTransformerCfg( + prim_path="/World/Robot/base", + target_frames=[ + FrameTransformerCfg.FrameCfg(prim_path="/World/Robot/.*"), + FrameTransformerCfg.FrameCfg( + prim_path="/World/Robot/LF_SHANK", + name="LF_FOOT_USER", + offset=OffsetCfg(pos=tuple(pos_offset.tolist()), rot=tuple(rot_offset[0].tolist())), + ), + ], + debug_vis=False, + ) + frame_transformer = FrameTransformer(frame_transformer_cfg) + + return frame_transformer + + +def design_scene() -> dict: + """Design the scene.""" + # Populate scene + # -- Ground-plane + cfg = sim_utils.GroundPlaneCfg() + cfg.func("/World/defaultGroundPlane", cfg) + # -- Lights + cfg = sim_utils.DistantLightCfg(intensity=3000.0, color=(0.75, 0.75, 0.75)) + cfg.func("/World/Light", cfg) + # -- Robot + robot = Articulation(ANYMAL_C_CFG.replace(prim_path="/World/Robot")) + # -- Sensors + frame_transformer = define_sensor() + + # return the scene information + scene_entities = {"robot": robot, "frame_transformer": frame_transformer} + return scene_entities + + +def run_simulator(sim: sim_utils.SimulationContext, scene_entities: dict): + """Run the simulator.""" + # Define simulation stepping + sim_dt = sim.get_physics_dt() + sim_time = 0.0 + count = 0 + + # extract entities for simplified notation + robot: Articulation = scene_entities["robot"] + frame_transformer: FrameTransformer = scene_entities["frame_transformer"] + + # We only want one visualization at a time. This visualizer will be used + # to step through each frame so the user can verify that the correct frame + # is being visualized as the frame names are printing to console + if not args_cli.headless: + cfg = FRAME_MARKER_CFG.replace(prim_path="/Visuals/FrameVisualizerFromScript") + cfg.markers["frame"].scale = (0.1, 0.1, 0.1) + transform_visualizer = VisualizationMarkers(cfg) + # debug drawing for lines connecting the frame + draw_interface = omni_debug_draw.acquire_debug_draw_interface() + else: + transform_visualizer = None + draw_interface = None + + frame_index = 0 + # Simulate physics + while simulation_app.is_running(): + # perform this loop at policy control freq (50 Hz) + robot.set_joint_position_target(robot.data.default_joint_pos.clone()) + robot.write_data_to_sim() + # perform step + sim.step() + # update sim-time + sim_time += sim_dt + count += 1 + # read data from sim + robot.update(sim_dt) + frame_transformer.update(dt=sim_dt) + + # Change the frame that we are visualizing to ensure that frame names + # are correctly associated with the frames + if not args_cli.headless: + if count % 50 == 0: + # get frame names + frame_names = frame_transformer.data.target_frame_names + # increment frame index + frame_index += 1 + frame_index = frame_index % len(frame_names) + print(f"Displaying Frame ID {frame_index}: {frame_names[frame_index]}") + + # visualize frame + source_pos = frame_transformer.data.source_pos_w + source_quat = frame_transformer.data.source_quat_w + target_pos = frame_transformer.data.target_pos_w[:, frame_index] + target_quat = frame_transformer.data.target_quat_w[:, frame_index] + # draw the frames + transform_visualizer.visualize( + torch.cat([source_pos, target_pos], dim=0), torch.cat([source_quat, target_quat], dim=0) + ) + # draw the line connecting the frames + draw_interface.clear_lines() + # plain color for lines + lines_colors = [[1.0, 1.0, 0.0, 1.0]] * source_pos.shape[0] + line_thicknesses = [5.0] * source_pos.shape[0] + draw_interface.draw_lines(source_pos.tolist(), target_pos.tolist(), lines_colors, line_thicknesses) + + +def main(): + """Main function.""" + # Load kit helper + sim_cfg = sim_utils.SimulationCfg(dt=0.005, device=args_cli.device) + sim = SimulationContext(sim_cfg) + # Set main camera + sim.set_camera_view(eye=[2.5, 2.5, 2.5], target=[0.0, 0.0, 0.0]) + # Design scene + scene_entities = design_scene() + # Play the simulator + sim.reset() + # Now we are ready! + print("[INFO]: Setup complete...") + # Run the simulator + run_simulator(sim, scene_entities) + + +if __name__ == "__main__": + # Run the main function + main() + # Close the simulator + simulation_app.close() diff --git a/scripts/tutorials/04_sensors/run_ray_caster.py b/scripts/tutorials/04_sensors/run_ray_caster.py new file mode 100644 index 00000000..9916fa97 --- /dev/null +++ b/scripts/tutorials/04_sensors/run_ray_caster.py @@ -0,0 +1,151 @@ +# Copyright (c) 2024-2025, The UW Lab Project Developers. (https://github.com/uw-lab/UWLab/blob/main/CONTRIBUTORS.md). +# All Rights Reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +""" +This script demonstrates how to use the ray-caster sensor. + +.. code-block:: bash + + # Usage + ./isaaclab.sh -p scripts/tutorials/04_sensors/run_ray_caster.py + +""" + +"""Launch Isaac Sim Simulator first.""" + +import argparse + +from isaaclab.app import AppLauncher + +# add argparse arguments +parser = argparse.ArgumentParser(description="Ray Caster Test Script") +# append AppLauncher cli args +AppLauncher.add_app_launcher_args(parser) +# parse the arguments +args_cli = parser.parse_args() +# launch omniverse app +app_launcher = AppLauncher(args_cli) +simulation_app = app_launcher.app + +"""Rest everything follows.""" + +import torch + +import isaacsim.core.utils.prims as prim_utils + +import isaaclab.sim as sim_utils +from isaaclab.assets import RigidObject, RigidObjectCfg +from isaaclab.sensors.ray_caster import RayCaster, RayCasterCfg, patterns +from isaaclab.utils.assets import ISAAC_NUCLEUS_DIR +from isaaclab.utils.timer import Timer + + +def define_sensor() -> RayCaster: + """Defines the ray-caster sensor to add to the scene.""" + # Create a ray-caster sensor + ray_caster_cfg = RayCasterCfg( + prim_path="/World/Origin.*/ball", + mesh_prim_paths=["/World/ground"], + pattern_cfg=patterns.GridPatternCfg(resolution=0.1, size=(2.0, 2.0)), + ray_alignment="yaw", + debug_vis=not args_cli.headless, + ) + ray_caster = RayCaster(cfg=ray_caster_cfg) + + return ray_caster + + +def design_scene() -> dict: + """Design the scene.""" + # Populate scene + # -- Rough terrain + cfg = sim_utils.UsdFileCfg(usd_path=f"{ISAAC_NUCLEUS_DIR}/Environments/Terrains/rough_plane.usd") + cfg.func("/World/ground", cfg) + # -- Light + cfg = sim_utils.DistantLightCfg(intensity=2000) + cfg.func("/World/light", cfg) + + # Create separate groups called "Origin1", "Origin2", "Origin3" + # Each group will have a robot in it + origins = [[0.25, 0.25, 0.0], [-0.25, 0.25, 0.0], [0.25, -0.25, 0.0], [-0.25, -0.25, 0.0]] + for i, origin in enumerate(origins): + prim_utils.create_prim(f"/World/Origin{i}", "Xform", translation=origin) + # -- Balls + cfg = RigidObjectCfg( + prim_path="/World/Origin.*/ball", + spawn=sim_utils.SphereCfg( + radius=0.25, + rigid_props=sim_utils.RigidBodyPropertiesCfg(), + mass_props=sim_utils.MassPropertiesCfg(mass=0.5), + collision_props=sim_utils.CollisionPropertiesCfg(), + visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(0.0, 0.0, 1.0)), + ), + ) + balls = RigidObject(cfg) + # -- Sensors + ray_caster = define_sensor() + + # return the scene information + scene_entities = {"balls": balls, "ray_caster": ray_caster} + return scene_entities + + +def run_simulator(sim: sim_utils.SimulationContext, scene_entities: dict): + """Run the simulator.""" + # Extract scene_entities for simplified notation + ray_caster: RayCaster = scene_entities["ray_caster"] + balls: RigidObject = scene_entities["balls"] + + # define an initial position of the sensor + ball_default_state = balls.data.default_root_state.clone() + ball_default_state[:, :3] = torch.rand_like(ball_default_state[:, :3]) * 10 + + # Create a counter for resetting the scene + step_count = 0 + # Simulate physics + while simulation_app.is_running(): + # Reset the scene + if step_count % 250 == 0: + # reset the balls + balls.write_root_pose_to_sim(ball_default_state[:, :7]) + balls.write_root_velocity_to_sim(ball_default_state[:, 7:]) + # reset the sensor + ray_caster.reset() + # reset the counter + step_count = 0 + # Step simulation + sim.step() + # Update the ray-caster + with Timer( + f"Ray-caster update with {4} x {ray_caster.num_rays} rays with max height of" + f" {torch.max(ray_caster.data.pos_w).item():.2f}" + ): + ray_caster.update(dt=sim.get_physics_dt(), force_recompute=True) + # Update counter + step_count += 1 + + +def main(): + """Main function.""" + # Load simulation context + sim_cfg = sim_utils.SimulationCfg(device=args_cli.device) + sim = sim_utils.SimulationContext(sim_cfg) + # Set main camera + sim.set_camera_view([0.0, 15.0, 15.0], [0.0, 0.0, -2.5]) + # Design scene + scene_entities = design_scene() + # Play simulator + sim.reset() + # Now we are ready! + print("[INFO]: Setup complete...") + # Run simulator + run_simulator(sim=sim, scene_entities=scene_entities) + + +if __name__ == "__main__": + # run the main function + main() + # close sim app + simulation_app.close() diff --git a/scripts/tutorials/04_sensors/run_ray_caster_camera.py b/scripts/tutorials/04_sensors/run_ray_caster_camera.py new file mode 100644 index 00000000..3cfe3905 --- /dev/null +++ b/scripts/tutorials/04_sensors/run_ray_caster_camera.py @@ -0,0 +1,184 @@ +# Copyright (c) 2024-2025, The UW Lab Project Developers. (https://github.com/uw-lab/UWLab/blob/main/CONTRIBUTORS.md). +# All Rights Reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +""" +This script shows how to use the ray-cast camera sensor from the Isaac Lab framework. + +The camera sensor is based on using Warp kernels which do ray-casting against static meshes. + +.. code-block:: bash + + # Usage + ./isaaclab.sh -p scripts/tutorials/04_sensors/run_ray_caster_camera.py + +""" + +"""Launch Isaac Sim Simulator first.""" + +import argparse + +from isaaclab.app import AppLauncher + +# add argparse arguments +parser = argparse.ArgumentParser(description="This script demonstrates how to use the ray-cast camera sensor.") +parser.add_argument("--num_envs", type=int, default=16, help="Number of environments to generate.") +parser.add_argument("--save", action="store_true", default=False, help="Save the obtained data to disk.") +# append AppLauncher cli args +AppLauncher.add_app_launcher_args(parser) +# parse the arguments +args_cli = parser.parse_args() +# launch omniverse app +app_launcher = AppLauncher(args_cli) +simulation_app = app_launcher.app + +"""Rest everything follows.""" + +import os +import torch + +import isaacsim.core.utils.prims as prim_utils +import omni.replicator.core as rep + +import isaaclab.sim as sim_utils +from isaaclab.sensors.ray_caster import RayCasterCamera, RayCasterCameraCfg, patterns +from isaaclab.utils import convert_dict_to_backend +from isaaclab.utils.assets import ISAAC_NUCLEUS_DIR +from isaaclab.utils.math import project_points, unproject_depth + + +def define_sensor() -> RayCasterCamera: + """Defines the ray-cast camera sensor to add to the scene.""" + # Camera base frames + # In contras to the USD camera, we associate the sensor to the prims at these locations. + # This means that parent prim of the sensor is the prim at this location. + prim_utils.create_prim("/World/Origin_00/CameraSensor", "Xform") + prim_utils.create_prim("/World/Origin_01/CameraSensor", "Xform") + + # Setup camera sensor + camera_cfg = RayCasterCameraCfg( + prim_path="/World/Origin_.*/CameraSensor", + mesh_prim_paths=["/World/ground"], + update_period=0.1, + offset=RayCasterCameraCfg.OffsetCfg(pos=(0.0, 0.0, 0.0), rot=(1.0, 0.0, 0.0, 0.0)), + data_types=["distance_to_image_plane", "normals", "distance_to_camera"], + debug_vis=True, + pattern_cfg=patterns.PinholeCameraPatternCfg( + focal_length=24.0, + horizontal_aperture=20.955, + height=480, + width=640, + ), + ) + # Create camera + camera = RayCasterCamera(cfg=camera_cfg) + + return camera + + +def design_scene(): + # Populate scene + # -- Rough terrain + cfg = sim_utils.UsdFileCfg(usd_path=f"{ISAAC_NUCLEUS_DIR}/Environments/Terrains/rough_plane.usd") + cfg.func("/World/ground", cfg) + # -- Lights + cfg = sim_utils.DistantLightCfg(intensity=600.0, color=(0.75, 0.75, 0.75)) + cfg.func("/World/Light", cfg) + # -- Sensors + camera = define_sensor() + + # return the scene information + scene_entities = {"camera": camera} + return scene_entities + + +def run_simulator(sim: sim_utils.SimulationContext, scene_entities: dict): + """Run the simulator.""" + # extract entities for simplified notation + camera: RayCasterCamera = scene_entities["camera"] + + # Create replicator writer + output_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "output", "ray_caster_camera") + rep_writer = rep.BasicWriter(output_dir=output_dir, frame_padding=3) + + # Set pose: There are two ways to set the pose of the camera. + # -- Option-1: Set pose using view + eyes = torch.tensor([[2.5, 2.5, 2.5], [-2.5, -2.5, 2.5]], device=sim.device) + targets = torch.tensor([[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]], device=sim.device) + camera.set_world_poses_from_view(eyes, targets) + # -- Option-2: Set pose using ROS + # position = torch.tensor([[2.5, 2.5, 2.5]], device=sim.device) + # orientation = torch.tensor([[-0.17591989, 0.33985114, 0.82047325, -0.42470819]], device=sim.device) + # camera.set_world_poses(position, orientation, indices=[0], convention="ros") + + # Simulate physics + while simulation_app.is_running(): + # Step simulation + sim.step() + # Update camera data + camera.update(dt=sim.get_physics_dt()) + + # Print camera info + print(camera) + print("Received shape of depth image: ", camera.data.output["distance_to_image_plane"].shape) + print("-------------------------------") + + # Extract camera data + if args_cli.save: + # Extract camera data + camera_index = 0 + # note: BasicWriter only supports saving data in numpy format, so we need to convert the data to numpy. + single_cam_data = convert_dict_to_backend( + {k: v[camera_index] for k, v in camera.data.output.items()}, backend="numpy" + ) + # Extract the other information + single_cam_info = camera.data.info[camera_index] + + # Pack data back into replicator format to save them using its writer + rep_output = {"annotators": {}} + for key, data, info in zip(single_cam_data.keys(), single_cam_data.values(), single_cam_info.values()): + if info is not None: + rep_output["annotators"][key] = {"render_product": {"data": data, **info}} + else: + rep_output["annotators"][key] = {"render_product": {"data": data}} + # Save images + rep_output["trigger_outputs"] = {"on_time": camera.frame[camera_index]} + rep_writer.write(rep_output) + + # Pointcloud in world frame + points_3d_cam = unproject_depth( + camera.data.output["distance_to_image_plane"], camera.data.intrinsic_matrices + ) + + # Check methods are valid + im_height, im_width = camera.image_shape + # -- project points to (u, v, d) + reproj_points = project_points(points_3d_cam, camera.data.intrinsic_matrices) + reproj_depths = reproj_points[..., -1].view(-1, im_width, im_height).transpose_(1, 2) + sim_depths = camera.data.output["distance_to_image_plane"].squeeze(-1) + torch.testing.assert_close(reproj_depths, sim_depths) + + +def main(): + """Main function.""" + # Load kit helper + sim_cfg = sim_utils.SimulationCfg() + sim = sim_utils.SimulationContext(sim_cfg) + # Set main camera + sim.set_camera_view([2.5, 2.5, 3.5], [0.0, 0.0, 0.0]) + # Design scene + scene_entities = design_scene() + # Play simulator + sim.reset() + # Now we are ready! + print("[INFO]: Setup complete...") + # Run simulator + run_simulator(sim=sim, scene_entities=scene_entities) + + +if __name__ == "__main__": + # run the main function + main() + # close sim app + simulation_app.close() diff --git a/scripts/tutorials/04_sensors/run_usd_camera.py b/scripts/tutorials/04_sensors/run_usd_camera.py new file mode 100644 index 00000000..1d360231 --- /dev/null +++ b/scripts/tutorials/04_sensors/run_usd_camera.py @@ -0,0 +1,289 @@ +# Copyright (c) 2024-2025, The UW Lab Project Developers. (https://github.com/uw-lab/UWLab/blob/main/CONTRIBUTORS.md). +# All Rights Reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +""" +This script shows how to use the camera sensor from the Isaac Lab framework. + +The camera sensor is created and interfaced through the Omniverse Replicator API. However, instead of using +the simulator or OpenGL convention for the camera, we use the robotics or ROS convention. + +.. code-block:: bash + + # Usage with GUI + ./isaaclab.sh -p scripts/tutorials/04_sensors/run_usd_camera.py --enable_cameras + + # Usage with headless + ./isaaclab.sh -p scripts/tutorials/04_sensors/run_usd_camera.py --headless --enable_cameras + +""" + +"""Launch Isaac Sim Simulator first.""" + +import argparse + +from isaaclab.app import AppLauncher + +# add argparse arguments +parser = argparse.ArgumentParser(description="This script demonstrates how to use the camera sensor.") +parser.add_argument( + "--draw", + action="store_true", + default=False, + help="Draw the pointcloud from camera at index specified by ``--camera_id``.", +) +parser.add_argument( + "--save", + action="store_true", + default=False, + help="Save the data from camera at index specified by ``--camera_id``.", +) +parser.add_argument( + "--camera_id", + type=int, + choices={0, 1}, + default=0, + help=( + "The camera ID to use for displaying points or saving the camera data. Default is 0." + " The viewport will always initialize with the perspective of camera 0." + ), +) +# append AppLauncher cli args +AppLauncher.add_app_launcher_args(parser) +# parse the arguments +args_cli = parser.parse_args() + +# launch omniverse app +app_launcher = AppLauncher(args_cli) +simulation_app = app_launcher.app + +"""Rest everything follows.""" + +import numpy as np +import os +import random +import torch + +import isaacsim.core.utils.prims as prim_utils +import omni.replicator.core as rep + +import isaaclab.sim as sim_utils +from isaaclab.assets import RigidObject, RigidObjectCfg +from isaaclab.markers import VisualizationMarkers +from isaaclab.markers.config import RAY_CASTER_MARKER_CFG +from isaaclab.sensors.camera import Camera, CameraCfg +from isaaclab.sensors.camera.utils import create_pointcloud_from_depth +from isaaclab.utils import convert_dict_to_backend + + +def define_sensor() -> Camera: + """Defines the camera sensor to add to the scene.""" + # Setup camera sensor + # In contrast to the ray-cast camera, we spawn the prim at these locations. + # This means the camera sensor will be attached to these prims. + prim_utils.create_prim("/World/Origin_00", "Xform") + prim_utils.create_prim("/World/Origin_01", "Xform") + camera_cfg = CameraCfg( + prim_path="/World/Origin_.*/CameraSensor", + update_period=0, + height=480, + width=640, + data_types=[ + "rgb", + "distance_to_image_plane", + "normals", + "semantic_segmentation", + "instance_segmentation_fast", + "instance_id_segmentation_fast", + ], + colorize_semantic_segmentation=True, + colorize_instance_id_segmentation=True, + colorize_instance_segmentation=True, + spawn=sim_utils.PinholeCameraCfg( + focal_length=24.0, focus_distance=400.0, horizontal_aperture=20.955, clipping_range=(0.1, 1.0e5) + ), + ) + # Create camera + camera = Camera(cfg=camera_cfg) + + return camera + + +def design_scene() -> dict: + """Design the scene.""" + # Populate scene + # -- Ground-plane + cfg = sim_utils.GroundPlaneCfg() + cfg.func("/World/defaultGroundPlane", cfg) + # -- Lights + cfg = sim_utils.DistantLightCfg(intensity=3000.0, color=(0.75, 0.75, 0.75)) + cfg.func("/World/Light", cfg) + + # Create a dictionary for the scene entities + scene_entities = {} + + # Xform to hold objects + prim_utils.create_prim("/World/Objects", "Xform") + # Random objects + for i in range(8): + # sample random position + position = np.random.rand(3) - np.asarray([0.05, 0.05, -1.0]) + position *= np.asarray([1.5, 1.5, 0.5]) + # sample random color + color = (random.random(), random.random(), random.random()) + # choose random prim type + prim_type = random.choice(["Cube", "Cone", "Cylinder"]) + common_properties = { + "rigid_props": sim_utils.RigidBodyPropertiesCfg(), + "mass_props": sim_utils.MassPropertiesCfg(mass=5.0), + "collision_props": sim_utils.CollisionPropertiesCfg(), + "visual_material": sim_utils.PreviewSurfaceCfg(diffuse_color=color, metallic=0.5), + "semantic_tags": [("class", prim_type)], + } + if prim_type == "Cube": + shape_cfg = sim_utils.CuboidCfg(size=(0.25, 0.25, 0.25), **common_properties) + elif prim_type == "Cone": + shape_cfg = sim_utils.ConeCfg(radius=0.1, height=0.25, **common_properties) + elif prim_type == "Cylinder": + shape_cfg = sim_utils.CylinderCfg(radius=0.25, height=0.25, **common_properties) + # Rigid Object + obj_cfg = RigidObjectCfg( + prim_path=f"/World/Objects/Obj_{i:02d}", + spawn=shape_cfg, + init_state=RigidObjectCfg.InitialStateCfg(pos=position), + ) + scene_entities[f"rigid_object{i}"] = RigidObject(cfg=obj_cfg) + + # Sensors + camera = define_sensor() + + # return the scene information + scene_entities["camera"] = camera + return scene_entities + + +def run_simulator(sim: sim_utils.SimulationContext, scene_entities: dict): + """Run the simulator.""" + # extract entities for simplified notation + camera: Camera = scene_entities["camera"] + + # Create replicator writer + output_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "output", "camera") + rep_writer = rep.BasicWriter( + output_dir=output_dir, + frame_padding=0, + colorize_instance_id_segmentation=camera.cfg.colorize_instance_id_segmentation, + colorize_instance_segmentation=camera.cfg.colorize_instance_segmentation, + colorize_semantic_segmentation=camera.cfg.colorize_semantic_segmentation, + ) + + # Camera positions, targets, orientations + camera_positions = torch.tensor([[2.5, 2.5, 2.5], [-2.5, -2.5, 2.5]], device=sim.device) + camera_targets = torch.tensor([[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]], device=sim.device) + # These orientations are in ROS-convention, and will position the cameras to view the origin + camera_orientations = torch.tensor( # noqa: F841 + [[-0.1759, 0.3399, 0.8205, -0.4247], [-0.4247, 0.8205, -0.3399, 0.1759]], device=sim.device + ) + + # Set pose: There are two ways to set the pose of the camera. + # -- Option-1: Set pose using view + camera.set_world_poses_from_view(camera_positions, camera_targets) + # -- Option-2: Set pose using ROS + # camera.set_world_poses(camera_positions, camera_orientations, convention="ros") + + # Index of the camera to use for visualization and saving + camera_index = args_cli.camera_id + + # Create the markers for the --draw option outside of is_running() loop + if sim.has_gui() and args_cli.draw: + cfg = RAY_CASTER_MARKER_CFG.replace(prim_path="/Visuals/CameraPointCloud") + cfg.markers["hit"].radius = 0.002 + pc_markers = VisualizationMarkers(cfg) + + # Simulate physics + while simulation_app.is_running(): + # Step simulation + sim.step() + # Update camera data + camera.update(dt=sim.get_physics_dt()) + + # Print camera info + print(camera) + if "rgb" in camera.data.output.keys(): + print("Received shape of rgb image : ", camera.data.output["rgb"].shape) + if "distance_to_image_plane" in camera.data.output.keys(): + print("Received shape of depth image : ", camera.data.output["distance_to_image_plane"].shape) + if "normals" in camera.data.output.keys(): + print("Received shape of normals : ", camera.data.output["normals"].shape) + if "semantic_segmentation" in camera.data.output.keys(): + print("Received shape of semantic segm. : ", camera.data.output["semantic_segmentation"].shape) + if "instance_segmentation_fast" in camera.data.output.keys(): + print("Received shape of instance segm. : ", camera.data.output["instance_segmentation_fast"].shape) + if "instance_id_segmentation_fast" in camera.data.output.keys(): + print("Received shape of instance id segm.: ", camera.data.output["instance_id_segmentation_fast"].shape) + print("-------------------------------") + + # Extract camera data + if args_cli.save: + # Save images from camera at camera_index + # note: BasicWriter only supports saving data in numpy format, so we need to convert the data to numpy. + single_cam_data = convert_dict_to_backend( + {k: v[camera_index] for k, v in camera.data.output.items()}, backend="numpy" + ) + + # Extract the other information + single_cam_info = camera.data.info[camera_index] + + # Pack data back into replicator format to save them using its writer + rep_output = {"annotators": {}} + for key, data, info in zip(single_cam_data.keys(), single_cam_data.values(), single_cam_info.values()): + if info is not None: + rep_output["annotators"][key] = {"render_product": {"data": data, **info}} + else: + rep_output["annotators"][key] = {"render_product": {"data": data}} + # Save images + # Note: We need to provide On-time data for Replicator to save the images. + rep_output["trigger_outputs"] = {"on_time": camera.frame[camera_index]} + rep_writer.write(rep_output) + + # Draw pointcloud if there is a GUI and --draw has been passed + if sim.has_gui() and args_cli.draw and "distance_to_image_plane" in camera.data.output.keys(): + # Derive pointcloud from camera at camera_index + pointcloud = create_pointcloud_from_depth( + intrinsic_matrix=camera.data.intrinsic_matrices[camera_index], + depth=camera.data.output["distance_to_image_plane"][camera_index], + position=camera.data.pos_w[camera_index], + orientation=camera.data.quat_w_ros[camera_index], + device=sim.device, + ) + + # In the first few steps, things are still being instanced and Camera.data + # can be empty. If we attempt to visualize an empty pointcloud it will crash + # the sim, so we check that the pointcloud is not empty. + if pointcloud.size()[0] > 0: + pc_markers.visualize(translations=pointcloud) + + +def main(): + """Main function.""" + # Load simulation context + sim_cfg = sim_utils.SimulationCfg(device=args_cli.device) + sim = sim_utils.SimulationContext(sim_cfg) + # Set main camera + sim.set_camera_view([2.5, 2.5, 2.5], [0.0, 0.0, 0.0]) + # Design scene + scene_entities = design_scene() + # Play simulator + sim.reset() + # Now we are ready! + print("[INFO]: Setup complete...") + # Run simulator + run_simulator(sim, scene_entities) + + +if __name__ == "__main__": + # run the main function + main() + # close sim app + simulation_app.close() diff --git a/scripts/tutorials/05_controllers/run_diff_ik.py b/scripts/tutorials/05_controllers/run_diff_ik.py new file mode 100644 index 00000000..180b4c48 --- /dev/null +++ b/scripts/tutorials/05_controllers/run_diff_ik.py @@ -0,0 +1,212 @@ +# Copyright (c) 2024-2025, The UW Lab Project Developers. (https://github.com/uw-lab/UWLab/blob/main/CONTRIBUTORS.md). +# All Rights Reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +""" +This script demonstrates how to use the differential inverse kinematics controller with the simulator. + +The differential IK controller can be configured in different modes. It uses the Jacobians computed by +PhysX. This helps perform parallelized computation of the inverse kinematics. + +.. code-block:: bash + + # Usage + ./isaaclab.sh -p scripts/tutorials/05_controllers/run_diff_ik.py + +""" + +"""Launch Isaac Sim Simulator first.""" + +import argparse + +from isaaclab.app import AppLauncher + +# add argparse arguments +parser = argparse.ArgumentParser(description="Tutorial on using the differential IK controller.") +parser.add_argument("--robot", type=str, default="franka_panda", help="Name of the robot.") +parser.add_argument("--num_envs", type=int, default=128, help="Number of environments to spawn.") +# append AppLauncher cli args +AppLauncher.add_app_launcher_args(parser) +# parse the arguments +args_cli = parser.parse_args() + +# launch omniverse app +app_launcher = AppLauncher(args_cli) +simulation_app = app_launcher.app + +"""Rest everything follows.""" + +import torch + +import isaaclab.sim as sim_utils +from isaaclab.assets import AssetBaseCfg +from isaaclab.controllers import DifferentialIKController, DifferentialIKControllerCfg +from isaaclab.managers import SceneEntityCfg +from isaaclab.markers import VisualizationMarkers +from isaaclab.markers.config import FRAME_MARKER_CFG +from isaaclab.scene import InteractiveScene, InteractiveSceneCfg +from isaaclab.utils import configclass +from isaaclab.utils.assets import ISAAC_NUCLEUS_DIR +from isaaclab.utils.math import subtract_frame_transforms + +## +# Pre-defined configs +## +from isaaclab_assets import FRANKA_PANDA_HIGH_PD_CFG, UR10_CFG # isort:skip + + +@configclass +class TableTopSceneCfg(InteractiveSceneCfg): + """Configuration for a cart-pole scene.""" + + # ground plane + ground = AssetBaseCfg( + prim_path="/World/defaultGroundPlane", + spawn=sim_utils.GroundPlaneCfg(), + init_state=AssetBaseCfg.InitialStateCfg(pos=(0.0, 0.0, -1.05)), + ) + + # lights + dome_light = AssetBaseCfg( + prim_path="/World/Light", spawn=sim_utils.DomeLightCfg(intensity=3000.0, color=(0.75, 0.75, 0.75)) + ) + + # mount + table = AssetBaseCfg( + prim_path="{ENV_REGEX_NS}/Table", + spawn=sim_utils.UsdFileCfg( + usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/Mounts/Stand/stand_instanceable.usd", scale=(2.0, 2.0, 2.0) + ), + ) + + # articulation + if args_cli.robot == "franka_panda": + robot = FRANKA_PANDA_HIGH_PD_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot") + elif args_cli.robot == "ur10": + robot = UR10_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot") + else: + raise ValueError(f"Robot {args_cli.robot} is not supported. Valid: franka_panda, ur10") + + +def run_simulator(sim: sim_utils.SimulationContext, scene: InteractiveScene): + """Runs the simulation loop.""" + # Extract scene entities + # note: we only do this here for readability. + robot = scene["robot"] + + # Create controller + diff_ik_cfg = DifferentialIKControllerCfg(command_type="pose", use_relative_mode=False, ik_method="dls") + diff_ik_controller = DifferentialIKController(diff_ik_cfg, num_envs=scene.num_envs, device=sim.device) + + # Markers + frame_marker_cfg = FRAME_MARKER_CFG.copy() + frame_marker_cfg.markers["frame"].scale = (0.1, 0.1, 0.1) + ee_marker = VisualizationMarkers(frame_marker_cfg.replace(prim_path="/Visuals/ee_current")) + goal_marker = VisualizationMarkers(frame_marker_cfg.replace(prim_path="/Visuals/ee_goal")) + + # Define goals for the arm + ee_goals = [ + [0.5, 0.5, 0.7, 0.707, 0, 0.707, 0], + [0.5, -0.4, 0.6, 0.707, 0.707, 0.0, 0.0], + [0.5, 0, 0.5, 0.0, 1.0, 0.0, 0.0], + ] + ee_goals = torch.tensor(ee_goals, device=sim.device) + # Track the given command + current_goal_idx = 0 + # Create buffers to store actions + ik_commands = torch.zeros(scene.num_envs, diff_ik_controller.action_dim, device=robot.device) + ik_commands[:] = ee_goals[current_goal_idx] + + # Specify robot-specific parameters + if args_cli.robot == "franka_panda": + robot_entity_cfg = SceneEntityCfg("robot", joint_names=["panda_joint.*"], body_names=["panda_hand"]) + elif args_cli.robot == "ur10": + robot_entity_cfg = SceneEntityCfg("robot", joint_names=[".*"], body_names=["ee_link"]) + else: + raise ValueError(f"Robot {args_cli.robot} is not supported. Valid: franka_panda, ur10") + # Resolving the scene entities + robot_entity_cfg.resolve(scene) + # Obtain the frame index of the end-effector + # For a fixed base robot, the frame index is one less than the body index. This is because + # the root body is not included in the returned Jacobians. + if robot.is_fixed_base: + ee_jacobi_idx = robot_entity_cfg.body_ids[0] - 1 + else: + ee_jacobi_idx = robot_entity_cfg.body_ids[0] + + # Define simulation stepping + sim_dt = sim.get_physics_dt() + count = 0 + # Simulation loop + while simulation_app.is_running(): + # reset + if count % 150 == 0: + # reset time + count = 0 + # reset joint state + joint_pos = robot.data.default_joint_pos.clone() + joint_vel = robot.data.default_joint_vel.clone() + robot.write_joint_state_to_sim(joint_pos, joint_vel) + robot.reset() + # reset actions + ik_commands[:] = ee_goals[current_goal_idx] + joint_pos_des = joint_pos[:, robot_entity_cfg.joint_ids].clone() + # reset controller + diff_ik_controller.reset() + diff_ik_controller.set_command(ik_commands) + # change goal + current_goal_idx = (current_goal_idx + 1) % len(ee_goals) + else: + # obtain quantities from simulation + jacobian = robot.root_physx_view.get_jacobians()[:, ee_jacobi_idx, :, robot_entity_cfg.joint_ids] + ee_pose_w = robot.data.body_pose_w[:, robot_entity_cfg.body_ids[0]] + root_pose_w = robot.data.root_pose_w + joint_pos = robot.data.joint_pos[:, robot_entity_cfg.joint_ids] + # compute frame in root frame + ee_pos_b, ee_quat_b = subtract_frame_transforms( + root_pose_w[:, 0:3], root_pose_w[:, 3:7], ee_pose_w[:, 0:3], ee_pose_w[:, 3:7] + ) + # compute the joint commands + joint_pos_des = diff_ik_controller.compute(ee_pos_b, ee_quat_b, jacobian, joint_pos) + + # apply actions + robot.set_joint_position_target(joint_pos_des, joint_ids=robot_entity_cfg.joint_ids) + scene.write_data_to_sim() + # perform step + sim.step() + # update sim-time + count += 1 + # update buffers + scene.update(sim_dt) + + # obtain quantities from simulation + ee_pose_w = robot.data.body_state_w[:, robot_entity_cfg.body_ids[0], 0:7] + # update marker positions + ee_marker.visualize(ee_pose_w[:, 0:3], ee_pose_w[:, 3:7]) + goal_marker.visualize(ik_commands[:, 0:3] + scene.env_origins, ik_commands[:, 3:7]) + + +def main(): + """Main function.""" + # Load kit helper + sim_cfg = sim_utils.SimulationCfg(dt=0.01, device=args_cli.device) + sim = sim_utils.SimulationContext(sim_cfg) + # Set main camera + sim.set_camera_view([2.5, 2.5, 2.5], [0.0, 0.0, 0.0]) + # Design scene + scene_cfg = TableTopSceneCfg(num_envs=args_cli.num_envs, env_spacing=2.0) + scene = InteractiveScene(scene_cfg) + # Play the simulator + sim.reset() + # Now we are ready! + print("[INFO]: Setup complete...") + # Run the simulator + run_simulator(sim, scene) + + +if __name__ == "__main__": + # run the main function + main() + # close sim app + simulation_app.close() diff --git a/scripts/tutorials/05_controllers/run_osc.py b/scripts/tutorials/05_controllers/run_osc.py new file mode 100644 index 00000000..7578fcf9 --- /dev/null +++ b/scripts/tutorials/05_controllers/run_osc.py @@ -0,0 +1,484 @@ +# Copyright (c) 2024-2025, The UW Lab Project Developers. (https://github.com/uw-lab/UWLab/blob/main/CONTRIBUTORS.md). +# All Rights Reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +""" +This script demonstrates how to use the operational space controller (OSC) with the simulator. + +The OSC controller can be configured in different modes. It uses the dynamical quantities such as Jacobians and +mass matricescomputed by PhysX. + +.. code-block:: bash + + # Usage + ./isaaclab.sh -p scripts/tutorials/05_controllers/run_osc.py + +""" + +"""Launch Isaac Sim Simulator first.""" + +import argparse + +from isaaclab.app import AppLauncher + +# add argparse arguments +parser = argparse.ArgumentParser(description="Tutorial on using the operational space controller.") +parser.add_argument("--num_envs", type=int, default=128, help="Number of environments to spawn.") +# append AppLauncher cli args +AppLauncher.add_app_launcher_args(parser) +# parse the arguments +args_cli = parser.parse_args() + +# launch omniverse app +app_launcher = AppLauncher(args_cli) +simulation_app = app_launcher.app + +"""Rest everything follows.""" + +import torch + +import isaaclab.sim as sim_utils +from isaaclab.assets import Articulation, AssetBaseCfg +from isaaclab.controllers import OperationalSpaceController, OperationalSpaceControllerCfg +from isaaclab.markers import VisualizationMarkers +from isaaclab.markers.config import FRAME_MARKER_CFG +from isaaclab.scene import InteractiveScene, InteractiveSceneCfg +from isaaclab.sensors import ContactSensorCfg +from isaaclab.utils import configclass +from isaaclab.utils.math import ( + combine_frame_transforms, + matrix_from_quat, + quat_apply_inverse, + quat_inv, + subtract_frame_transforms, +) + +## +# Pre-defined configs +## +from isaaclab_assets import FRANKA_PANDA_HIGH_PD_CFG # isort:skip + + +@configclass +class SceneCfg(InteractiveSceneCfg): + """Configuration for a simple scene with a tilted wall.""" + + # ground plane + ground = AssetBaseCfg( + prim_path="/World/defaultGroundPlane", + spawn=sim_utils.GroundPlaneCfg(), + ) + + # lights + dome_light = AssetBaseCfg( + prim_path="/World/Light", spawn=sim_utils.DomeLightCfg(intensity=3000.0, color=(0.75, 0.75, 0.75)) + ) + + # Tilted wall + tilted_wall = AssetBaseCfg( + prim_path="{ENV_REGEX_NS}/TiltedWall", + spawn=sim_utils.CuboidCfg( + size=(2.0, 1.5, 0.01), + collision_props=sim_utils.CollisionPropertiesCfg(), + visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(1.0, 0.0, 0.0), opacity=0.1), + rigid_props=sim_utils.RigidBodyPropertiesCfg(kinematic_enabled=True), + activate_contact_sensors=True, + ), + init_state=AssetBaseCfg.InitialStateCfg( + pos=(0.6 + 0.085, 0.0, 0.3), rot=(0.9238795325, 0.0, -0.3826834324, 0.0) + ), + ) + + contact_forces = ContactSensorCfg( + prim_path="/World/envs/env_.*/TiltedWall", + update_period=0.0, + history_length=2, + debug_vis=False, + ) + + robot = FRANKA_PANDA_HIGH_PD_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot") + robot.actuators["panda_shoulder"].stiffness = 0.0 + robot.actuators["panda_shoulder"].damping = 0.0 + robot.actuators["panda_forearm"].stiffness = 0.0 + robot.actuators["panda_forearm"].damping = 0.0 + robot.spawn.rigid_props.disable_gravity = True + + +def run_simulator(sim: sim_utils.SimulationContext, scene: InteractiveScene): + """Runs the simulation loop. + + Args: + sim: (SimulationContext) Simulation context. + scene: (InteractiveScene) Interactive scene. + """ + + # Extract scene entities for readability. + robot = scene["robot"] + contact_forces = scene["contact_forces"] + + # Obtain indices for the end-effector and arm joints + ee_frame_name = "panda_leftfinger" + arm_joint_names = ["panda_joint.*"] + ee_frame_idx = robot.find_bodies(ee_frame_name)[0][0] + arm_joint_ids = robot.find_joints(arm_joint_names)[0] + + # Create the OSC + osc_cfg = OperationalSpaceControllerCfg( + target_types=["pose_abs", "wrench_abs"], + impedance_mode="variable_kp", + inertial_dynamics_decoupling=True, + partial_inertial_dynamics_decoupling=False, + gravity_compensation=False, + motion_damping_ratio_task=1.0, + contact_wrench_stiffness_task=[0.0, 0.0, 0.1, 0.0, 0.0, 0.0], + motion_control_axes_task=[1, 1, 0, 1, 1, 1], + contact_wrench_control_axes_task=[0, 0, 1, 0, 0, 0], + nullspace_control="position", + ) + osc = OperationalSpaceController(osc_cfg, num_envs=scene.num_envs, device=sim.device) + + # Markers + frame_marker_cfg = FRAME_MARKER_CFG.copy() + frame_marker_cfg.markers["frame"].scale = (0.1, 0.1, 0.1) + ee_marker = VisualizationMarkers(frame_marker_cfg.replace(prim_path="/Visuals/ee_current")) + goal_marker = VisualizationMarkers(frame_marker_cfg.replace(prim_path="/Visuals/ee_goal")) + + # Define targets for the arm + ee_goal_pose_set_tilted_b = torch.tensor( + [ + [0.6, 0.15, 0.3, 0.0, 0.92387953, 0.0, 0.38268343], + [0.6, -0.3, 0.3, 0.0, 0.92387953, 0.0, 0.38268343], + [0.8, 0.0, 0.5, 0.0, 0.92387953, 0.0, 0.38268343], + ], + device=sim.device, + ) + ee_goal_wrench_set_tilted_task = torch.tensor( + [ + [0.0, 0.0, 10.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 10.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 10.0, 0.0, 0.0, 0.0], + ], + device=sim.device, + ) + kp_set_task = torch.tensor( + [ + [360.0, 360.0, 360.0, 360.0, 360.0, 360.0], + [420.0, 420.0, 420.0, 420.0, 420.0, 420.0], + [320.0, 320.0, 320.0, 320.0, 320.0, 320.0], + ], + device=sim.device, + ) + ee_target_set = torch.cat([ee_goal_pose_set_tilted_b, ee_goal_wrench_set_tilted_task, kp_set_task], dim=-1) + + # Define simulation stepping + sim_dt = sim.get_physics_dt() + + # Update existing buffers + # Note: We need to update buffers before the first step for the controller. + robot.update(dt=sim_dt) + + # Get the center of the robot soft joint limits + joint_centers = torch.mean(robot.data.soft_joint_pos_limits[:, arm_joint_ids, :], dim=-1) + + # get the updated states + ( + jacobian_b, + mass_matrix, + gravity, + ee_pose_b, + ee_vel_b, + root_pose_w, + ee_pose_w, + ee_force_b, + joint_pos, + joint_vel, + ) = update_states(sim, scene, robot, ee_frame_idx, arm_joint_ids, contact_forces) + + # Track the given target command + current_goal_idx = 0 # Current goal index for the arm + command = torch.zeros( + scene.num_envs, osc.action_dim, device=sim.device + ) # Generic target command, which can be pose, position, force, etc. + ee_target_pose_b = torch.zeros(scene.num_envs, 7, device=sim.device) # Target pose in the body frame + ee_target_pose_w = torch.zeros(scene.num_envs, 7, device=sim.device) # Target pose in the world frame (for marker) + + # Set joint efforts to zero + zero_joint_efforts = torch.zeros(scene.num_envs, robot.num_joints, device=sim.device) + joint_efforts = torch.zeros(scene.num_envs, len(arm_joint_ids), device=sim.device) + + count = 0 + # Simulation loop + while simulation_app.is_running(): + # reset every 500 steps + if count % 500 == 0: + # reset joint state to default + default_joint_pos = robot.data.default_joint_pos.clone() + default_joint_vel = robot.data.default_joint_vel.clone() + robot.write_joint_state_to_sim(default_joint_pos, default_joint_vel) + robot.set_joint_effort_target(zero_joint_efforts) # Set zero torques in the initial step + robot.write_data_to_sim() + robot.reset() + # reset contact sensor + contact_forces.reset() + # reset target pose + robot.update(sim_dt) + _, _, _, ee_pose_b, _, _, _, _, _, _ = update_states( + sim, scene, robot, ee_frame_idx, arm_joint_ids, contact_forces + ) # at reset, the jacobians are not updated to the latest state + command, ee_target_pose_b, ee_target_pose_w, current_goal_idx = update_target( + sim, scene, osc, root_pose_w, ee_target_set, current_goal_idx + ) + # set the osc command + osc.reset() + command, task_frame_pose_b = convert_to_task_frame(osc, command=command, ee_target_pose_b=ee_target_pose_b) + osc.set_command(command=command, current_ee_pose_b=ee_pose_b, current_task_frame_pose_b=task_frame_pose_b) + else: + # get the updated states + ( + jacobian_b, + mass_matrix, + gravity, + ee_pose_b, + ee_vel_b, + root_pose_w, + ee_pose_w, + ee_force_b, + joint_pos, + joint_vel, + ) = update_states(sim, scene, robot, ee_frame_idx, arm_joint_ids, contact_forces) + # compute the joint commands + joint_efforts = osc.compute( + jacobian_b=jacobian_b, + current_ee_pose_b=ee_pose_b, + current_ee_vel_b=ee_vel_b, + current_ee_force_b=ee_force_b, + mass_matrix=mass_matrix, + gravity=gravity, + current_joint_pos=joint_pos, + current_joint_vel=joint_vel, + nullspace_joint_pos_target=joint_centers, + ) + # apply actions + robot.set_joint_effort_target(joint_efforts, joint_ids=arm_joint_ids) + robot.write_data_to_sim() + + # update marker positions + ee_marker.visualize(ee_pose_w[:, 0:3], ee_pose_w[:, 3:7]) + goal_marker.visualize(ee_target_pose_w[:, 0:3], ee_target_pose_w[:, 3:7]) + + # perform step + sim.step(render=True) + # update robot buffers + robot.update(sim_dt) + # update buffers + scene.update(sim_dt) + # update sim-time + count += 1 + + +# Update robot states +def update_states( + sim: sim_utils.SimulationContext, + scene: InteractiveScene, + robot: Articulation, + ee_frame_idx: int, + arm_joint_ids: list[int], + contact_forces, +): + """Update the robot states. + + Args: + sim: (SimulationContext) Simulation context. + scene: (InteractiveScene) Interactive scene. + robot: (Articulation) Robot articulation. + ee_frame_idx: (int) End-effector frame index. + arm_joint_ids: (list[int]) Arm joint indices. + contact_forces: (ContactSensor) Contact sensor. + + Returns: + jacobian_b (torch.tensor): Jacobian in the body frame. + mass_matrix (torch.tensor): Mass matrix. + gravity (torch.tensor): Gravity vector. + ee_pose_b (torch.tensor): End-effector pose in the body frame. + ee_vel_b (torch.tensor): End-effector velocity in the body frame. + root_pose_w (torch.tensor): Root pose in the world frame. + ee_pose_w (torch.tensor): End-effector pose in the world frame. + ee_force_b (torch.tensor): End-effector force in the body frame. + joint_pos (torch.tensor): The joint positions. + joint_vel (torch.tensor): The joint velocities. + + Raises: + ValueError: Undefined target_type. + """ + # obtain dynamics related quantities from simulation + ee_jacobi_idx = ee_frame_idx - 1 + jacobian_w = robot.root_physx_view.get_jacobians()[:, ee_jacobi_idx, :, arm_joint_ids] + mass_matrix = robot.root_physx_view.get_generalized_mass_matrices()[:, arm_joint_ids, :][:, :, arm_joint_ids] + gravity = robot.root_physx_view.get_gravity_compensation_forces()[:, arm_joint_ids] + # Convert the Jacobian from world to root frame + jacobian_b = jacobian_w.clone() + root_rot_matrix = matrix_from_quat(quat_inv(robot.data.root_quat_w)) + jacobian_b[:, :3, :] = torch.bmm(root_rot_matrix, jacobian_b[:, :3, :]) + jacobian_b[:, 3:, :] = torch.bmm(root_rot_matrix, jacobian_b[:, 3:, :]) + + # Compute current pose of the end-effector + root_pos_w = robot.data.root_pos_w + root_quat_w = robot.data.root_quat_w + ee_pos_w = robot.data.body_pos_w[:, ee_frame_idx] + ee_quat_w = robot.data.body_quat_w[:, ee_frame_idx] + ee_pos_b, ee_quat_b = subtract_frame_transforms(root_pos_w, root_quat_w, ee_pos_w, ee_quat_w) + root_pose_w = torch.cat([root_pos_w, root_quat_w], dim=-1) + ee_pose_w = torch.cat([ee_pos_w, ee_quat_w], dim=-1) + ee_pose_b = torch.cat([ee_pos_b, ee_quat_b], dim=-1) + + # Compute the current velocity of the end-effector + ee_vel_w = robot.data.body_vel_w[:, ee_frame_idx, :] # Extract end-effector velocity in the world frame + root_vel_w = robot.data.root_vel_w # Extract root velocity in the world frame + relative_vel_w = ee_vel_w - root_vel_w # Compute the relative velocity in the world frame + ee_lin_vel_b = quat_apply_inverse(robot.data.root_quat_w, relative_vel_w[:, 0:3]) # From world to root frame + ee_ang_vel_b = quat_apply_inverse(robot.data.root_quat_w, relative_vel_w[:, 3:6]) + ee_vel_b = torch.cat([ee_lin_vel_b, ee_ang_vel_b], dim=-1) + + # Calculate the contact force + ee_force_w = torch.zeros(scene.num_envs, 3, device=sim.device) + sim_dt = sim.get_physics_dt() + contact_forces.update(sim_dt) # update contact sensor + # Calculate the contact force by averaging over last four time steps (i.e., to smoothen) and + # taking the max of three surfaces as only one should be the contact of interest + ee_force_w, _ = torch.max(torch.mean(contact_forces.data.net_forces_w_history, dim=1), dim=1) + + # This is a simplification, only for the sake of testing. + ee_force_b = ee_force_w + + # Get joint positions and velocities + joint_pos = robot.data.joint_pos[:, arm_joint_ids] + joint_vel = robot.data.joint_vel[:, arm_joint_ids] + + return ( + jacobian_b, + mass_matrix, + gravity, + ee_pose_b, + ee_vel_b, + root_pose_w, + ee_pose_w, + ee_force_b, + joint_pos, + joint_vel, + ) + + +# Update the target commands +def update_target( + sim: sim_utils.SimulationContext, + scene: InteractiveScene, + osc: OperationalSpaceController, + root_pose_w: torch.tensor, + ee_target_set: torch.tensor, + current_goal_idx: int, +): + """Update the targets for the operational space controller. + + Args: + sim: (SimulationContext) Simulation context. + scene: (InteractiveScene) Interactive scene. + osc: (OperationalSpaceController) Operational space controller. + root_pose_w: (torch.tensor) Root pose in the world frame. + ee_target_set: (torch.tensor) End-effector target set. + current_goal_idx: (int) Current goal index. + + Returns: + command (torch.tensor): Updated target command. + ee_target_pose_b (torch.tensor): Updated target pose in the body frame. + ee_target_pose_w (torch.tensor): Updated target pose in the world frame. + next_goal_idx (int): Next goal index. + + Raises: + ValueError: Undefined target_type. + """ + + # update the ee desired command + command = torch.zeros(scene.num_envs, osc.action_dim, device=sim.device) + command[:] = ee_target_set[current_goal_idx] + + # update the ee desired pose + ee_target_pose_b = torch.zeros(scene.num_envs, 7, device=sim.device) + for target_type in osc.cfg.target_types: + if target_type == "pose_abs": + ee_target_pose_b[:] = command[:, :7] + elif target_type == "wrench_abs": + pass # ee_target_pose_b could stay at the root frame for force control, what matters is ee_target_b + else: + raise ValueError("Undefined target_type within update_target().") + + # update the target desired pose in world frame (for marker) + ee_target_pos_w, ee_target_quat_w = combine_frame_transforms( + root_pose_w[:, 0:3], root_pose_w[:, 3:7], ee_target_pose_b[:, 0:3], ee_target_pose_b[:, 3:7] + ) + ee_target_pose_w = torch.cat([ee_target_pos_w, ee_target_quat_w], dim=-1) + + next_goal_idx = (current_goal_idx + 1) % len(ee_target_set) + + return command, ee_target_pose_b, ee_target_pose_w, next_goal_idx + + +# Convert the target commands to the task frame +def convert_to_task_frame(osc: OperationalSpaceController, command: torch.tensor, ee_target_pose_b: torch.tensor): + """Converts the target commands to the task frame. + + Args: + osc: OperationalSpaceController object. + command: Command to be converted. + ee_target_pose_b: Target pose in the body frame. + + Returns: + command (torch.tensor): Target command in the task frame. + task_frame_pose_b (torch.tensor): Target pose in the task frame. + + Raises: + ValueError: Undefined target_type. + """ + command = command.clone() + task_frame_pose_b = ee_target_pose_b.clone() + + cmd_idx = 0 + for target_type in osc.cfg.target_types: + if target_type == "pose_abs": + command[:, :3], command[:, 3:7] = subtract_frame_transforms( + task_frame_pose_b[:, :3], task_frame_pose_b[:, 3:], command[:, :3], command[:, 3:7] + ) + cmd_idx += 7 + elif target_type == "wrench_abs": + # These are already defined in target frame for ee_goal_wrench_set_tilted_task (since it is + # easier), so not transforming + cmd_idx += 6 + else: + raise ValueError("Undefined target_type within _convert_to_task_frame().") + + return command, task_frame_pose_b + + +def main(): + """Main function.""" + # Load kit helper + sim_cfg = sim_utils.SimulationCfg(dt=0.01, device=args_cli.device) + sim = sim_utils.SimulationContext(sim_cfg) + # Set main camera + sim.set_camera_view([2.5, 2.5, 2.5], [0.0, 0.0, 0.0]) + # Design scene + scene_cfg = SceneCfg(num_envs=args_cli.num_envs, env_spacing=2.0) + scene = InteractiveScene(scene_cfg) + # Play the simulator + sim.reset() + # Now we are ready! + print("[INFO]: Setup complete...") + # Run the simulator + run_simulator(sim, scene) + + +if __name__ == "__main__": + # run the main function + main() + # close sim app + simulation_app.close()