diff --git a/.vscode/.gitignore b/.vscode/.gitignore index 236a8546..736457bd 100644 --- a/.vscode/.gitignore +++ b/.vscode/.gitignore @@ -4,6 +4,7 @@ # Note: These files are kept for development purposes only. !tools/settings.template.json +!tools/launch.template.json !tools/setup_vscode.py !extensions.json !launch.json diff --git a/.vscode/tools/launch.template.json b/.vscode/tools/launch.template.json new file mode 100644 index 00000000..a44f114c --- /dev/null +++ b/.vscode/tools/launch.template.json @@ -0,0 +1,57 @@ +{ + // Use IntelliSense to learn about possible attributes. + // Hover to view descriptions of existing attributes. + // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387 + "version": "0.2.0", + "configurations": [ + { + "name": "Python: Current File", + "type": "python", + "request": "launch", + "program": "${file}", + "console": "integratedTerminal" + }, + { + "name": "Python: Attach (windows-x86_64/linux-x86_64)", + "type": "python", + "request": "attach", + "port": 3000, + "host": "localhost" + }, + { + "name": "Python: Train Environment", + "type": "python", + "request": "launch", + "args" : ["--task", "Isaac-Reach-Franka-v0", "--headless"], + "program": "${workspaceFolder}/scripts/reinforcement_learning/rsl_rl/train.py", + "console": "integratedTerminal" + }, + { + "name": "Python: Play Environment", + "type": "python", + "request": "launch", + "args" : ["--task", "Isaac-Reach-Franka-v0", "--num_envs", "32"], + "program": "${workspaceFolder}/scripts/reinforcement_learning/rsl_rl/play.py", + "console": "integratedTerminal" + }, + { + "name": "Python: SinglePytest", + "type": "python", + "request": "launch", + "module": "pytest", + "args": [ + "${file}" + ], + "console": "integratedTerminal" + }, + { + "name": "Python: ALL Pytest", + "type": "python", + "request": "launch", + "module": "pytest", + "args": ["source/isaaclab/test"], + "console": "integratedTerminal", + "justMyCode": false + } + ] +} diff --git a/docs/source/_static/publications/omnireset/camera_setup.jpg b/docs/source/_static/publications/omnireset/camera_setup.jpg new file mode 100644 index 00000000..76a3686e Binary files /dev/null and b/docs/source/_static/publications/omnireset/camera_setup.jpg differ diff --git a/docs/source/_static/publications/omnireset/cube_success_rate_seeds.jpg b/docs/source/_static/publications/omnireset/cube_success_rate_seeds.jpg new file mode 100644 index 00000000..372346cd Binary files /dev/null and b/docs/source/_static/publications/omnireset/cube_success_rate_seeds.jpg differ diff --git a/docs/source/_static/publications/omnireset/cube_success_rate_seeds_walltime.jpg b/docs/source/_static/publications/omnireset/cube_success_rate_seeds_walltime.jpg new file mode 100644 index 00000000..a225c660 Binary files /dev/null and b/docs/source/_static/publications/omnireset/cube_success_rate_seeds_walltime.jpg differ diff --git a/docs/source/_static/publications/omnireset/cupcake_success_rate_seeds.jpg b/docs/source/_static/publications/omnireset/cupcake_success_rate_seeds.jpg new file mode 100644 index 00000000..92504c9c Binary files /dev/null and b/docs/source/_static/publications/omnireset/cupcake_success_rate_seeds.jpg differ diff --git a/docs/source/_static/publications/omnireset/cupcake_success_rate_seeds_walltime.jpg b/docs/source/_static/publications/omnireset/cupcake_success_rate_seeds_walltime.jpg new file mode 100644 index 00000000..d2399185 Binary files /dev/null and b/docs/source/_static/publications/omnireset/cupcake_success_rate_seeds_walltime.jpg differ diff --git a/docs/source/_static/publications/omnireset/distill_drawer_losses.jpg b/docs/source/_static/publications/omnireset/distill_drawer_losses.jpg new file mode 100644 index 00000000..65118576 Binary files /dev/null and b/docs/source/_static/publications/omnireset/distill_drawer_losses.jpg differ diff --git a/docs/source/_static/publications/omnireset/distill_leg_losses.jpg b/docs/source/_static/publications/omnireset/distill_leg_losses.jpg new file mode 100644 index 00000000..4c971453 Binary files /dev/null and b/docs/source/_static/publications/omnireset/distill_leg_losses.jpg differ diff --git a/docs/source/_static/publications/omnireset/distill_peg_losses.jpg b/docs/source/_static/publications/omnireset/distill_peg_losses.jpg new file mode 100644 index 00000000..d9693ed2 Binary files /dev/null and b/docs/source/_static/publications/omnireset/distill_peg_losses.jpg differ diff --git a/docs/source/_static/publications/omnireset/drawer_success_rate_seeds.jpg b/docs/source/_static/publications/omnireset/drawer_success_rate_seeds.jpg new file mode 100644 index 00000000..6eba60d5 Binary files /dev/null and b/docs/source/_static/publications/omnireset/drawer_success_rate_seeds.jpg differ diff --git a/docs/source/_static/publications/omnireset/drawer_success_rate_seeds_walltime.jpg b/docs/source/_static/publications/omnireset/drawer_success_rate_seeds_walltime.jpg new file mode 100644 index 00000000..3c3970fe Binary files /dev/null and b/docs/source/_static/publications/omnireset/drawer_success_rate_seeds_walltime.jpg differ diff --git a/docs/source/_static/publications/omnireset/example_blend_front_camera.png b/docs/source/_static/publications/omnireset/example_blend_front_camera.png new file mode 100644 index 00000000..b00054a3 Binary files /dev/null and b/docs/source/_static/publications/omnireset/example_blend_front_camera.png differ diff --git a/docs/source/_static/publications/omnireset/example_blend_side_camera.png b/docs/source/_static/publications/omnireset/example_blend_side_camera.png new file mode 100644 index 00000000..19c308f1 Binary files /dev/null and b/docs/source/_static/publications/omnireset/example_blend_side_camera.png differ diff --git a/docs/source/_static/publications/omnireset/example_blend_wrist_camera.png b/docs/source/_static/publications/omnireset/example_blend_wrist_camera.png new file mode 100644 index 00000000..6585d516 Binary files /dev/null and b/docs/source/_static/publications/omnireset/example_blend_wrist_camera.png differ diff --git a/docs/source/_static/publications/omnireset/example_calibration.jpg b/docs/source/_static/publications/omnireset/example_calibration.jpg new file mode 100644 index 00000000..9330daa9 Binary files /dev/null and b/docs/source/_static/publications/omnireset/example_calibration.jpg differ diff --git a/docs/source/_static/publications/omnireset/finetune_drawer_curriculum_seeds.jpg b/docs/source/_static/publications/omnireset/finetune_drawer_curriculum_seeds.jpg new file mode 100644 index 00000000..759b8e67 Binary files /dev/null and b/docs/source/_static/publications/omnireset/finetune_drawer_curriculum_seeds.jpg differ diff --git a/docs/source/_static/publications/omnireset/finetune_drawer_curriculum_seeds_walltime.jpg b/docs/source/_static/publications/omnireset/finetune_drawer_curriculum_seeds_walltime.jpg new file mode 100644 index 00000000..aacc4155 Binary files /dev/null and b/docs/source/_static/publications/omnireset/finetune_drawer_curriculum_seeds_walltime.jpg differ diff --git a/docs/source/_static/publications/omnireset/finetune_leg_curriculum_seeds.jpg b/docs/source/_static/publications/omnireset/finetune_leg_curriculum_seeds.jpg new file mode 100644 index 00000000..02d96f67 Binary files /dev/null and b/docs/source/_static/publications/omnireset/finetune_leg_curriculum_seeds.jpg differ diff --git a/docs/source/_static/publications/omnireset/finetune_leg_curriculum_seeds_walltime.jpg b/docs/source/_static/publications/omnireset/finetune_leg_curriculum_seeds_walltime.jpg new file mode 100644 index 00000000..0bc1f85e Binary files /dev/null and b/docs/source/_static/publications/omnireset/finetune_leg_curriculum_seeds_walltime.jpg differ diff --git a/docs/source/_static/publications/omnireset/finetune_peg_curriculum_seeds.jpg b/docs/source/_static/publications/omnireset/finetune_peg_curriculum_seeds.jpg new file mode 100644 index 00000000..6f4527c6 Binary files /dev/null and b/docs/source/_static/publications/omnireset/finetune_peg_curriculum_seeds.jpg differ diff --git a/docs/source/_static/publications/omnireset/finetune_peg_curriculum_seeds_walltime.jpg b/docs/source/_static/publications/omnireset/finetune_peg_curriculum_seeds_walltime.jpg new file mode 100644 index 00000000..fa088a80 Binary files /dev/null and b/docs/source/_static/publications/omnireset/finetune_peg_curriculum_seeds_walltime.jpg differ diff --git a/docs/source/_static/publications/omnireset/leg_success_rate_seeds.jpg b/docs/source/_static/publications/omnireset/leg_success_rate_seeds.jpg new file mode 100644 index 00000000..f7a982bb Binary files /dev/null and b/docs/source/_static/publications/omnireset/leg_success_rate_seeds.jpg differ diff --git a/docs/source/_static/publications/omnireset/leg_success_rate_seeds_walltime.jpg b/docs/source/_static/publications/omnireset/leg_success_rate_seeds_walltime.jpg new file mode 100644 index 00000000..c2adbf3e Binary files /dev/null and b/docs/source/_static/publications/omnireset/leg_success_rate_seeds_walltime.jpg differ diff --git a/docs/source/_static/publications/omnireset/marker_6x6_150mm_id12.pdf b/docs/source/_static/publications/omnireset/marker_6x6_150mm_id12.pdf new file mode 100644 index 00000000..8f1a0604 Binary files /dev/null and b/docs/source/_static/publications/omnireset/marker_6x6_150mm_id12.pdf differ diff --git a/docs/source/_static/publications/omnireset/peg_success_rate_seeds.jpg b/docs/source/_static/publications/omnireset/peg_success_rate_seeds.jpg new file mode 100644 index 00000000..d7f321ac Binary files /dev/null and b/docs/source/_static/publications/omnireset/peg_success_rate_seeds.jpg differ diff --git a/docs/source/_static/publications/omnireset/peg_success_rate_seeds_walltime.jpg b/docs/source/_static/publications/omnireset/peg_success_rate_seeds_walltime.jpg new file mode 100644 index 00000000..83a20a47 Binary files /dev/null and b/docs/source/_static/publications/omnireset/peg_success_rate_seeds_walltime.jpg differ diff --git a/docs/source/_static/publications/omnireset/rectangle_success_rate_seeds.jpg b/docs/source/_static/publications/omnireset/rectangle_success_rate_seeds.jpg new file mode 100644 index 00000000..ff2d237a Binary files /dev/null and b/docs/source/_static/publications/omnireset/rectangle_success_rate_seeds.jpg differ diff --git a/docs/source/_static/publications/omnireset/rectangle_success_rate_seeds_walltime.jpg b/docs/source/_static/publications/omnireset/rectangle_success_rate_seeds_walltime.jpg new file mode 100644 index 00000000..a7c7c0e1 Binary files /dev/null and b/docs/source/_static/publications/omnireset/rectangle_success_rate_seeds_walltime.jpg differ diff --git a/docs/source/_static/publications/omnireset/success_rate_over_steps.jpg b/docs/source/_static/publications/omnireset/success_rate_over_steps.jpg deleted file mode 100644 index ad7e11bb..00000000 Binary files a/docs/source/_static/publications/omnireset/success_rate_over_steps.jpg and /dev/null differ diff --git a/docs/source/_static/publications/omnireset/success_rate_over_wall_clock.jpg b/docs/source/_static/publications/omnireset/success_rate_over_wall_clock.jpg deleted file mode 100644 index 8d87ac92..00000000 Binary files a/docs/source/_static/publications/omnireset/success_rate_over_wall_clock.jpg and /dev/null differ diff --git a/docs/source/_static/publications/omnireset/sysid_fit.jpg b/docs/source/_static/publications/omnireset/sysid_fit.jpg new file mode 100644 index 00000000..74d8c57d Binary files /dev/null and b/docs/source/_static/publications/omnireset/sysid_fit.jpg differ diff --git a/docs/source/overview/uw_environments.rst b/docs/source/overview/uw_environments.rst index 5637cf2c..c6eaefbf 100644 --- a/docs/source/overview/uw_environments.rst +++ b/docs/source/overview/uw_environments.rst @@ -83,12 +83,12 @@ Environments based on fixed-arm manipulation tasks. .. |ext-nut-thread-franka-link| replace:: `UW-Nut-Thread-Franka-v0 `__ .. |ext-gear-mesh-franka-link| replace:: `UW-Gear-Mesh-Franka-v0 `__ .. |ext-peg-insert-franka-link| replace:: `UW-Peg-Insert-Franka-v0 `__ -.. |omnireset-ur5e-drawer-link| replace:: `OmniReset-Ur5eRobotiq2f85-RelCartesianOSC-State-v0 `__ -.. |omnireset-ur5e-fbleg-link| replace:: `OmniReset-Ur5eRobotiq2f85-RelCartesianOSC-State-v0 `__ -.. |omnireset-ur5e-peg-insert-link| replace:: `OmniReset-Ur5eRobotiq2f85-RelCartesianOSC-State-v0 `__ -.. |omnireset-ur5e-rectangle-link| replace:: `OmniReset-Ur5eRobotiq2f85-RelCartesianOSC-State-v0 `__ -.. |omnireset-ur5e-cupcake-link| replace:: `OmniReset-Ur5eRobotiq2f85-RelCartesianOSC-State-v0 `__ -.. |omnireset-ur5e-cube-stack-link| replace:: `OmniReset-Ur5eRobotiq2f85-RelCartesianOSC-State-v0 `__ +.. |omnireset-ur5e-drawer-link| replace:: `OmniReset-Ur5eRobotiq2f85-RelCartesianOSC-State-v0 `__ +.. |omnireset-ur5e-fbleg-link| replace:: `OmniReset-Ur5eRobotiq2f85-RelCartesianOSC-State-v0 `__ +.. |omnireset-ur5e-peg-insert-link| replace:: `OmniReset-Ur5eRobotiq2f85-RelCartesianOSC-State-v0 `__ +.. |omnireset-ur5e-rectangle-link| replace:: `OmniReset-Ur5eRobotiq2f85-RelCartesianOSC-State-v0 `__ +.. |omnireset-ur5e-cupcake-link| replace:: `OmniReset-Ur5eRobotiq2f85-RelCartesianOSC-State-v0 `__ +.. |omnireset-ur5e-cube-stack-link| replace:: `OmniReset-Ur5eRobotiq2f85-RelCartesianOSC-State-v0 `__ Locomotion diff --git a/docs/source/publications/omnireset/distillation.rst b/docs/source/publications/omnireset/distillation.rst new file mode 100644 index 00000000..9b228ef8 --- /dev/null +++ b/docs/source/publications/omnireset/distillation.rst @@ -0,0 +1,367 @@ +Distillation & Deployment +========================= + +This guide covers distilling a state-based RL expert into a vision-based policy, evaluating it in simulation, and deploying on a real robot. + +.. _distillation-install: + +One-Time Setup +-------------- + +Evaluation and training use the `diffusion_policy `_ repo (``omnireset`` branch). Clone it as a sibling to UWLab. If you already cloned ``diffusion_policy`` for :doc:`sim2real`, skip the clone step. + +.. code:: text + + / + UWLab/ + diffusion_policy/ + +.. code:: bash + + cd + git clone -b omnireset https://github.com/WEIRDLabUW/diffusion_policy.git + +Then install the dependencies into your UWLab conda environment (required even if you already cloned above): + +.. code:: bash + + cd /diffusion_policy + conda activate env_uwlab + python -m pip install -e . + python -m pip install dill hydra-core omegaconf zarr einops "diffusers<0.37" wandb accelerate + +---- + +Quick Start: Evaluate Pretrained RGB Policies +---------------------------------------------- + +Download our pretrained vision policy checkpoints and evaluate immediately. All commands in this section run in ``env_uwlab`` from the UWLab directory. + +.. tab-set:: + + .. tab-item:: Peg Insertion + + .. code:: bash + + wget https://huggingface.co/datasets/UW-Lab/uwlab-assets/resolve/main/Policies/OmniReset/distilled_rgb_policies/peg_distilled_rgb.ckpt + + python scripts_v2/tools/eval_distilled_policy.py \ + --task OmniReset-Ur5eRobotiq2f85-RelCartesianOSC-RGB-Play-v0 \ + --checkpoint peg_distilled_rgb.ckpt \ + --num_envs 32 \ + --num_trajectories 100 \ + --headless \ + --enable_cameras \ + --save_video \ + env.scene.insertive_object=peg \ + env.scene.receptive_object=peghole + + .. tab-item:: Leg Twisting + + .. code:: bash + + wget https://huggingface.co/datasets/UW-Lab/uwlab-assets/resolve/main/Policies/OmniReset/distilled_rgb_policies/leg_distilled_rgb.ckpt + + python scripts_v2/tools/eval_distilled_policy.py \ + --task OmniReset-Ur5eRobotiq2f85-RelCartesianOSC-RGB-Play-v0 \ + --checkpoint leg_distilled_rgb.ckpt \ + --num_envs 32 \ + --num_trajectories 100 \ + --headless \ + --enable_cameras \ + --save_video \ + env.scene.insertive_object=fbleg \ + env.scene.receptive_object=fbtabletop + + .. tab-item:: Drawer Assembly + + .. code:: bash + + wget https://huggingface.co/datasets/UW-Lab/uwlab-assets/resolve/main/Policies/OmniReset/distilled_rgb_policies/drawer_distilled_rgb.ckpt + + python scripts_v2/tools/eval_distilled_policy.py \ + --task OmniReset-Ur5eRobotiq2f85-RelCartesianOSC-RGB-Play-v0 \ + --checkpoint drawer_distilled_rgb.ckpt \ + --num_envs 32 \ + --num_trajectories 100 \ + --headless \ + --enable_cameras \ + --save_video \ + env.scene.insertive_object=fbdrawerbottom \ + env.scene.receptive_object=fbdrawerbox + +---- + +Train Your Own +-------------- + +To train your own vision policy from scratch, follow the steps below. + +.. important:: + + **Prerequisites:** If doing sim2real transfer, complete :doc:`sim2real` (system identification and RL finetuning) before collecting demonstrations. The RGB tasks expect a **Stage 2** (finetuned) expert checkpoint. + +Collect Demonstrations +^^^^^^^^^^^^^^^^^^^^^^ + +**Step 1 — Export the expert policy** + +Run ``play.py`` on a **Stage 2** (finetuned) checkpoint to export a JIT-traced ``policy.pt``. You can finetune your own (see :doc:`sim2real`) or download a pre-finetuned checkpoint from the :ref:`finetuned checkpoints ` section. + +.. code:: bash + + conda activate env_uwlab + cd /UWLab + python scripts/reinforcement_learning/rsl_rl/play.py \ + --task OmniReset-Ur5eRobotiq2f85-RelCartesianOSC-State-Finetune-Play-v0 \ + --num_envs 4 \ + --checkpoint \ + --headless + +This saves ``policy.pt`` (and ``policy.onnx``) under ``/exported/``. + +**Step 2 — Collect RGB demonstrations** + +Use the exported ``policy.pt`` to roll out the expert and record RGB observations in Zarr format. Only successful trajectories are saved. + +.. tip:: + + For **sim2real deployment**, collect **80K+ demos**. For **sim-only distillation** (performance evaluation), **10K** is sufficient. + 10K demos takes ~2 hours on a 3090 GPU. 32 envs fits on 24 GB VRAM. + +.. tab-set:: + + .. tab-item:: Peg Insertion + + .. code:: bash + + python scripts_v2/tools/collect_demos.py \ + --task OmniReset-Ur5eRobotiq2f85-RelCartesianOSC-RGB-DataCollection-v0 \ + --dataset_file datasets/peg/rgb0.zarr \ + --num_envs 32 \ + --num_demos 10000 \ + --enable_cameras \ + --headless \ + env.scene.insertive_object=peg \ + env.scene.receptive_object=peghole \ + agent.algorithm.offline_algorithm_cfg.behavior_cloning_cfg.experts_path='["exported/policy.pt"]' + + .. tab-item:: Leg Twisting + + .. code:: bash + + python scripts_v2/tools/collect_demos.py \ + --task OmniReset-Ur5eRobotiq2f85-RelCartesianOSC-RGB-DataCollection-v0 \ + --dataset_file datasets/leg/rgb0.zarr \ + --num_envs 32 \ + --num_demos 10000 \ + --enable_cameras \ + --headless \ + env.scene.insertive_object=fbleg \ + env.scene.receptive_object=fbtabletop \ + agent.algorithm.offline_algorithm_cfg.behavior_cloning_cfg.experts_path='["exported/policy.pt"]' + + .. tab-item:: Drawer Assembly + + .. code:: bash + + python scripts_v2/tools/collect_demos.py \ + --task OmniReset-Ur5eRobotiq2f85-RelCartesianOSC-RGB-DataCollection-v0 \ + --dataset_file datasets/drawer/rgb0.zarr \ + --num_envs 32 \ + --num_demos 10000 \ + --enable_cameras \ + --headless \ + env.scene.insertive_object=fbdrawerbottom \ + env.scene.receptive_object=fbdrawerbox \ + agent.algorithm.offline_algorithm_cfg.behavior_cloning_cfg.experts_path='["exported/policy.pt"]' + +---- + +Train Vision Policy +^^^^^^^^^^^^^^^^^^^ + +Train a ResNet18-MLP Gaussian policy using the collected Zarr dataset with the `diffusion_policy `_ repo (use the ``omnireset`` branch). + +Training requires the ``robodiff`` conda environment (separate from ``env_uwlab``). Create it once: + +.. code:: bash + + cd /diffusion_policy + mamba env create -f conda_environment.yaml # or: conda env create -f conda_environment.yaml + +Then activate it and run training: + +.. code:: bash + + conda activate robodiff + cd /diffusion_policy + python train.py \ + --config-name train_mlp_sim2real_image_with_aux_loss_workspace.yaml \ + --config-dir diffusion_policy/config \ + task.dataset.dataset_dir= + +``dataset_dir`` is a folder containing one or more Zarr files produced by the data collection step: + +.. code:: text + + dataset_dir/ + rgb0.zarr + rgb1.zarr + +Multiple Zarr files are merged automatically, so you can split collection across runs. + +.. tip:: + + For sim2real, train for **350K iterations** (~2 days on a single H200). Sim performance should start being reasonable within ~1 day of training. + +**Training Curves** + +.. list-table:: + :widths: 33 33 33 + :class: borderless + + * - .. figure:: ../../../source/_static/publications/omnireset/distill_peg_losses.jpg + :width: 100% + :alt: Peg insertion distillation training curves + + Peg Insertion + + - .. figure:: ../../../source/_static/publications/omnireset/distill_leg_losses.jpg + :width: 100% + :alt: Leg twisting distillation training curves + + Leg Twisting + + - .. figure:: ../../../source/_static/publications/omnireset/distill_drawer_losses.jpg + :width: 100% + :alt: Drawer assembly distillation training curves + + Drawer Assembly + +---- + +Evaluate in Simulation +^^^^^^^^^^^^^^^^^^^^^^ + +Evaluate the trained vision policy in simulation. All commands below run in ``env_uwlab`` from the UWLab directory. + +.. tab-set:: + + .. tab-item:: Peg Insertion + + **In-distribution:** + + .. code:: bash + + python scripts_v2/tools/eval_distilled_policy.py \ + --task OmniReset-Ur5eRobotiq2f85-RelCartesianOSC-RGB-Play-v0 \ + --checkpoint .ckpt \ + --num_envs 32 \ + --num_trajectories 100 \ + --headless \ + --enable_cameras \ + --save_video \ + env.scene.insertive_object=peg \ + env.scene.receptive_object=peghole + + **Out-of-distribution (OOD)** lighting and textures: + + .. code:: bash + + python scripts_v2/tools/eval_distilled_policy.py \ + --task OmniReset-Ur5eRobotiq2f85-RelCartesianOSC-RGB-OOD-Play-v0 \ + --checkpoint .ckpt \ + --num_envs 32 \ + --num_trajectories 100 \ + --headless \ + --enable_cameras \ + env.scene.insertive_object=peg \ + env.scene.receptive_object=peghole + + .. tab-item:: Leg Twisting + + **In-distribution:** + + .. code:: bash + + python scripts_v2/tools/eval_distilled_policy.py \ + --task OmniReset-Ur5eRobotiq2f85-RelCartesianOSC-RGB-Play-v0 \ + --checkpoint .ckpt \ + --num_envs 32 \ + --num_trajectories 100 \ + --headless \ + --enable_cameras \ + --save_video \ + env.scene.insertive_object=fbleg \ + env.scene.receptive_object=fbtabletop + + **Out-of-distribution (OOD)** lighting and textures: + + .. code:: bash + + python scripts_v2/tools/eval_distilled_policy.py \ + --task OmniReset-Ur5eRobotiq2f85-RelCartesianOSC-RGB-OOD-Play-v0 \ + --checkpoint .ckpt \ + --num_envs 32 \ + --num_trajectories 100 \ + --headless \ + --enable_cameras \ + env.scene.insertive_object=fbleg \ + env.scene.receptive_object=fbtabletop + + .. tab-item:: Drawer Assembly + + **In-distribution:** + + .. code:: bash + + python scripts_v2/tools/eval_distilled_policy.py \ + --task OmniReset-Ur5eRobotiq2f85-RelCartesianOSC-RGB-Play-v0 \ + --checkpoint .ckpt \ + --num_envs 32 \ + --num_trajectories 100 \ + --headless \ + --enable_cameras \ + --save_video \ + env.scene.insertive_object=fbdrawerbottom \ + env.scene.receptive_object=fbdrawerbox + + **Out-of-distribution (OOD)** lighting and textures: + + .. code:: bash + + python scripts_v2/tools/eval_distilled_policy.py \ + --task OmniReset-Ur5eRobotiq2f85-RelCartesianOSC-RGB-OOD-Play-v0 \ + --checkpoint .ckpt \ + --num_envs 32 \ + --num_trajectories 100 \ + --headless \ + --enable_cameras \ + env.scene.insertive_object=fbdrawerbottom \ + env.scene.receptive_object=fbdrawerbox + +The OOD task applies domain randomization to visual observations to test robustness before real deployment. + +---- + +.. _deploy-on-real-robot: + +Deploy on Real Robot +-------------------- + +After training a vision policy (or using a pretrained checkpoint above): + +1. Ensure your cameras are physically mounted to match the calibrated poses (see :ref:`camera-calibration-section` in the Sim2Real guide). +2. Copy the checkpoint to the real-robot machine. +3. Run the evaluation script from the `diffusion_policy `_ repo (``omnireset`` branch): + +.. code:: bash + + conda activate robodiff_real + cd /diffusion_policy + python eval_real_robot.py \ + --input .ckpt \ + --output ./demo \ + --robot_ip 192.168.1.10 \ + -j diff --git a/docs/source/publications/omnireset/index.rst b/docs/source/publications/omnireset/index.rst index 5b2e1e24..7dc3f96d 100644 --- a/docs/source/publications/omnireset/index.rst +++ b/docs/source/publications/omnireset/index.rst @@ -1,10 +1,8 @@ OmniReset ========= -**OmniReset** is a robotic manipulation framework using RL to solve dexterous, contact-rich manipulation tasks without reward engineering or demos. - -.. note:: - Detailed documentation will be updated following the public release of the paper. +| **Paper:** `Emergent Dexterity via Diverse Resets and Large-Scale Reinforcement Learning (ICLR 2026) `_ +| **Project website:** `omnireset.github.io `_ ---- @@ -27,23 +25,51 @@ Download our pretrained checkpoint and run evaluation.
- .. code:: bash + .. tab-set:: - # Download checkpoint - wget https://s3.us-west-004.backblazeb2.com/uwlab-assets/Policies/OmniReset/fbleg_state_rl_expert.pt + .. tab-item:: Seed 42 - # Run evaluation - python scripts/reinforcement_learning/rsl_rl/play.py \ - --task OmniReset-Ur5eRobotiq2f85-RelCartesianOSC-State-Play-v0 \ - --num_envs 1 \ - --checkpoint fbleg_state_rl_expert.pt \ - env.scene.insertive_object=fbleg \ - env.scene.receptive_object=fbtabletop + .. code:: bash + + wget https://huggingface.co/datasets/UW-Lab/uwlab-assets/resolve/main/Policies/OmniReset/state_based_experts/leg_state_rl_expert_seed42.pt + + python scripts/reinforcement_learning/rsl_rl/play.py \ + --task OmniReset-Ur5eRobotiq2f85-RelCartesianOSC-State-Play-v0 \ + --num_envs 1 \ + --checkpoint leg_state_rl_expert_seed42.pt \ + env.scene.insertive_object=fbleg \ + env.scene.receptive_object=fbtabletop + + .. tab-item:: Seed 0 + + .. code:: bash + + wget https://huggingface.co/datasets/UW-Lab/uwlab-assets/resolve/main/Policies/OmniReset/state_based_experts/leg_state_rl_expert_seed0.pt + + python scripts/reinforcement_learning/rsl_rl/play.py \ + --task OmniReset-Ur5eRobotiq2f85-RelCartesianOSC-State-Play-v0 \ + --num_envs 1 \ + --checkpoint leg_state_rl_expert_seed0.pt \ + env.scene.insertive_object=fbleg \ + env.scene.receptive_object=fbtabletop + + .. tab-item:: Seed 1 + + .. code:: bash + + wget https://huggingface.co/datasets/UW-Lab/uwlab-assets/resolve/main/Policies/OmniReset/state_based_experts/leg_state_rl_expert_seed1.pt + + python scripts/reinforcement_learning/rsl_rl/play.py \ + --task OmniReset-Ur5eRobotiq2f85-RelCartesianOSC-State-Play-v0 \ + --num_envs 1 \ + --checkpoint leg_state_rl_expert_seed1.pt \ + env.scene.insertive_object=fbleg \ + env.scene.receptive_object=fbtabletop .. tab-item:: Drawer Assembly @@ -51,23 +77,51 @@ Download our pretrained checkpoint and run evaluation.
- .. code:: bash + .. tab-set:: - # Download checkpoint - wget https://s3.us-west-004.backblazeb2.com/uwlab-assets/Policies/OmniReset/fbdrawerbottom_state_rl_expert.pt + .. tab-item:: Seed 42 - # Run evaluation - python scripts/reinforcement_learning/rsl_rl/play.py \ - --task OmniReset-Ur5eRobotiq2f85-RelCartesianOSC-State-Play-v0 \ - --num_envs 1 \ - --checkpoint fbdrawerbottom_state_rl_expert.pt \ - env.scene.insertive_object=fbdrawerbottom \ - env.scene.receptive_object=fbdrawerbox + .. code:: bash + + wget https://huggingface.co/datasets/UW-Lab/uwlab-assets/resolve/main/Policies/OmniReset/state_based_experts/drawer_state_rl_expert_seed42.pt + + python scripts/reinforcement_learning/rsl_rl/play.py \ + --task OmniReset-Ur5eRobotiq2f85-RelCartesianOSC-State-Play-v0 \ + --num_envs 1 \ + --checkpoint drawer_state_rl_expert_seed42.pt \ + env.scene.insertive_object=fbdrawerbottom \ + env.scene.receptive_object=fbdrawerbox + + .. tab-item:: Seed 0 + + .. code:: bash + + wget https://huggingface.co/datasets/UW-Lab/uwlab-assets/resolve/main/Policies/OmniReset/state_based_experts/drawer_state_rl_expert_seed0.pt + + python scripts/reinforcement_learning/rsl_rl/play.py \ + --task OmniReset-Ur5eRobotiq2f85-RelCartesianOSC-State-Play-v0 \ + --num_envs 1 \ + --checkpoint drawer_state_rl_expert_seed0.pt \ + env.scene.insertive_object=fbdrawerbottom \ + env.scene.receptive_object=fbdrawerbox + + .. tab-item:: Seed 1 + + .. code:: bash + + wget https://huggingface.co/datasets/UW-Lab/uwlab-assets/resolve/main/Policies/OmniReset/state_based_experts/drawer_state_rl_expert_seed1.pt + + python scripts/reinforcement_learning/rsl_rl/play.py \ + --task OmniReset-Ur5eRobotiq2f85-RelCartesianOSC-State-Play-v0 \ + --num_envs 1 \ + --checkpoint drawer_state_rl_expert_seed1.pt \ + env.scene.insertive_object=fbdrawerbottom \ + env.scene.receptive_object=fbdrawerbox .. tab-item:: Peg Insertion @@ -75,23 +129,51 @@ Download our pretrained checkpoint and run evaluation.
- .. code:: bash + .. tab-set:: - # Download checkpoint - wget https://s3.us-west-004.backblazeb2.com/uwlab-assets/Policies/OmniReset/peg_state_rl_expert.pt + .. tab-item:: Seed 42 - # Run evaluation - python scripts/reinforcement_learning/rsl_rl/play.py \ - --task OmniReset-Ur5eRobotiq2f85-RelCartesianOSC-State-Play-v0 \ - --num_envs 1 \ - --checkpoint peg_state_rl_expert.pt \ - env.scene.insertive_object=peg \ - env.scene.receptive_object=peghole + .. code:: bash + + wget https://huggingface.co/datasets/UW-Lab/uwlab-assets/resolve/main/Policies/OmniReset/state_based_experts/peg_state_rl_expert_seed42.pt + + python scripts/reinforcement_learning/rsl_rl/play.py \ + --task OmniReset-Ur5eRobotiq2f85-RelCartesianOSC-State-Play-v0 \ + --num_envs 1 \ + --checkpoint peg_state_rl_expert_seed42.pt \ + env.scene.insertive_object=peg \ + env.scene.receptive_object=peghole + + .. tab-item:: Seed 0 + + .. code:: bash + + wget https://huggingface.co/datasets/UW-Lab/uwlab-assets/resolve/main/Policies/OmniReset/state_based_experts/peg_state_rl_expert_seed0.pt + + python scripts/reinforcement_learning/rsl_rl/play.py \ + --task OmniReset-Ur5eRobotiq2f85-RelCartesianOSC-State-Play-v0 \ + --num_envs 1 \ + --checkpoint peg_state_rl_expert_seed0.pt \ + env.scene.insertive_object=peg \ + env.scene.receptive_object=peghole + + .. tab-item:: Seed 1 + + .. code:: bash + + wget https://huggingface.co/datasets/UW-Lab/uwlab-assets/resolve/main/Policies/OmniReset/state_based_experts/peg_state_rl_expert_seed1.pt + + python scripts/reinforcement_learning/rsl_rl/play.py \ + --task OmniReset-Ur5eRobotiq2f85-RelCartesianOSC-State-Play-v0 \ + --num_envs 1 \ + --checkpoint peg_state_rl_expert_seed1.pt \ + env.scene.insertive_object=peg \ + env.scene.receptive_object=peghole .. tab-item:: Rectangle on Wall @@ -99,7 +181,7 @@ Download our pretrained checkpoint and run evaluation.
@@ -107,13 +189,13 @@ Download our pretrained checkpoint and run evaluation. .. code:: bash # Download checkpoint - wget https://s3.us-west-004.backblazeb2.com/uwlab-assets/Policies/OmniReset/rectangle_state_rl_expert.pt + wget https://huggingface.co/datasets/UW-Lab/uwlab-assets/resolve/main/Policies/OmniReset/state_based_experts/rectangle_state_rl_expert_seed0.pt # Run evaluation python scripts/reinforcement_learning/rsl_rl/play.py \ --task OmniReset-Ur5eRobotiq2f85-RelCartesianOSC-State-Play-v0 \ --num_envs 1 \ - --checkpoint rectangle_state_rl_expert.pt \ + --checkpoint rectangle_state_rl_expert_seed0.pt \ env.scene.insertive_object=rectangle \ env.scene.receptive_object=wall @@ -123,7 +205,7 @@ Download our pretrained checkpoint and run evaluation.
@@ -131,13 +213,13 @@ Download our pretrained checkpoint and run evaluation. .. code:: bash # Download checkpoint - wget https://s3.us-west-004.backblazeb2.com/uwlab-assets/Policies/OmniReset/cube_state_rl_expert.pt + wget https://huggingface.co/datasets/UW-Lab/uwlab-assets/resolve/main/Policies/OmniReset/state_based_experts/cube_state_rl_expert_seed42.pt # Run evaluation python scripts/reinforcement_learning/rsl_rl/play.py \ --task OmniReset-Ur5eRobotiq2f85-RelCartesianOSC-State-Play-v0 \ --num_envs 1 \ - --checkpoint cube_state_rl_expert.pt \ + --checkpoint cube_state_rl_expert_seed42.pt \ env.scene.insertive_object=cube \ env.scene.receptive_object=cube @@ -147,7 +229,7 @@ Download our pretrained checkpoint and run evaluation.
@@ -155,447 +237,98 @@ Download our pretrained checkpoint and run evaluation. .. code:: bash # Download checkpoint - wget https://s3.us-west-004.backblazeb2.com/uwlab-assets/Policies/OmniReset/cupcake_state_rl_expert.pt + wget https://huggingface.co/datasets/UW-Lab/uwlab-assets/resolve/main/Policies/OmniReset/state_based_experts/cupcake_state_rl_expert_seed42.pt # Run evaluation python scripts/reinforcement_learning/rsl_rl/play.py \ --task OmniReset-Ur5eRobotiq2f85-RelCartesianOSC-State-Play-v0 \ --num_envs 1 \ - --checkpoint cupcake_state_rl_expert.pt \ + --checkpoint cupcake_state_rl_expert_seed42.pt \ env.scene.insertive_object=cupcake \ env.scene.receptive_object=plate ---- -.. _reproduce-training: - -Reproduce Our Training ----------------------- - -Reproduce our training results from scratch. +.. _full-pipeline: + +Full Pipeline +------------- + +The full OmniReset pipeline from custom task creation to real-robot deployment: + +.. raw:: html + +
+
+ 1. Create New Task
assets & variants +
+
+
+ 2. Train RL Policy
resets & training
★ most users start here +
+
+
+ 3. Sys-ID & Finetune
sim2real alignment +
+
+
+ 4. Distill & Deploy
vision policy & real robot +
+
.. tip:: - **Want to try it quickly?** Start with **Cube Stacking** or **Peg Insertion**. They have the fastest reset state collection times and converge within ~8 hours on 4×L40S GPUs. - -.. tab-set:: - - .. tab-item:: Leg Twisting - - .. note:: - - **Skip directly to Step 4** if you want to train an RL policy with our pre-generated reset state datasets. Only run Steps 1-3 if you want to generate your own. - - **Step 1: Collect Partial Assemblies** (~30 seconds) - - .. code:: bash - - python scripts_v2/tools/record_partial_assemblies.py --task OmniReset-PartialAssemblies-v0 --num_envs 10 --num_trajectories 10 --dataset_dir ./partial_assembly_datasets --headless env.scene.insertive_object=fbleg env.scene.receptive_object=fbtabletop - - **Step 2: Sample Grasp Poses** (~1 minute) - - .. code:: bash - - python scripts_v2/tools/record_grasps.py --task OmniReset-Robotiq2f85-GraspSampling-v0 --num_envs 8192 --num_grasps 1000 --dataset_dir ./grasp_datasets --headless env.scene.object=fbleg - - **Step 3: Generate Reset State Datasets** (~1 min to 1 hour depending on the reset) - - .. important:: - - Before running, make sure ``base_path`` and ``base_paths`` in ``reset_states_cfg.py`` point to your dataset directories. - - .. code:: bash - - # Object Anywhere, End-Effector Anywhere (Reaching) - python scripts_v2/tools/record_reset_states.py --task OmniReset-UR5eRobotiq2f85-ObjectAnywhereEEAnywhere-v0 --num_envs 4096 --num_reset_states 10000 --headless --dataset_dir ./reset_state_datasets/ObjectAnywhereEEAnywhere env.scene.insertive_object=fbleg env.scene.receptive_object=fbtabletop - - # Object Resting, End-Effector Grasped (Near Object) - python scripts_v2/tools/record_reset_states.py --task OmniReset-UR5eRobotiq2f85-ObjectRestingEEGrasped-v0 --num_envs 4096 --num_reset_states 10000 --headless --dataset_dir ./reset_state_datasets/ObjectRestingEEGrasped env.scene.insertive_object=fbleg env.scene.receptive_object=fbtabletop - - # Object Anywhere, End-Effector Grasped (Grasped) - python scripts_v2/tools/record_reset_states.py --task OmniReset-UR5eRobotiq2f85-ObjectAnywhereEEGrasped-v0 --num_envs 4096 --num_reset_states 10000 --headless --dataset_dir ./reset_state_datasets/ObjectAnywhereEEGrasped env.scene.insertive_object=fbleg env.scene.receptive_object=fbtabletop - - # Object Partially Assembled, End-Effector Grasped (Near Goal) - python scripts_v2/tools/record_reset_states.py --task OmniReset-UR5eRobotiq2f85-ObjectPartiallyAssembledEEGrasped-v0 --num_envs 4096 --num_reset_states 10000 --headless --dataset_dir ./reset_state_datasets/ObjectPartiallyAssembledEEGrasped env.scene.insertive_object=fbleg env.scene.receptive_object=fbtabletop - - **Step 3.5: Visualize Reset States (Optional)** - - Visualize the generated reset states to verify they are correct. - - .. code:: bash - - python scripts_v2/tools/visualize_reset_states.py --task OmniReset-Ur5eRobotiq2f85-RelCartesianOSC-State-Play-v0 --num_envs 4 --dataset_dir ./reset_state_datasets env.scene.insertive_object=fbleg env.scene.receptive_object=fbtabletop - - **Step 4: Train RL Policy** - - .. important:: - - If you generated your own datasets in Steps 1-3, make sure to update ``base_paths`` in ``rl_state_cfg.py`` to point to your dataset directories. - - .. code:: bash - - python -m torch.distributed.run \ - --nnodes 1 \ - --nproc_per_node 4 \ - scripts/reinforcement_learning/rsl_rl/train.py \ - --task OmniReset-Ur5eRobotiq2f85-RelCartesianOSC-State-v0 \ - --num_envs 16384 \ - --logger wandb \ - --headless \ - --distributed \ - env.scene.insertive_object=fbleg \ - env.scene.receptive_object=fbtabletop - - .. tab-item:: Drawer Assembly - - .. note:: - - **Skip directly to Step 4** if you want to train an RL policy with our pre-generated reset state datasets. Only run Steps 1-3 if you want to generate your own. - - **Step 1: Collect Partial Assemblies** (~30 seconds) - - .. code:: bash - - python scripts_v2/tools/record_partial_assemblies.py --task OmniReset-PartialAssemblies-v0 --num_envs 10 --num_trajectories 10 --dataset_dir ./partial_assembly_datasets --headless env.scene.insertive_object=fbdrawerbottom env.scene.receptive_object=fbdrawerbox - - **Step 2: Sample Grasp Poses** (~1 minute) - - .. code:: bash - - python scripts_v2/tools/record_grasps.py --task OmniReset-Robotiq2f85-GraspSampling-v0 --num_envs 8192 --num_grasps 1000 --dataset_dir ./grasp_datasets --headless env.scene.object=fbdrawerbottom - - **Step 3: Generate Reset State Datasets** (~1 min to 1 hour depending on the reset) - - .. important:: - - Before running, make sure ``base_path`` and ``base_paths`` in ``reset_states_cfg.py`` point to your dataset directories. - - .. code:: bash - - # Object Anywhere, End-Effector Anywhere (Reaching) - python scripts_v2/tools/record_reset_states.py --task OmniReset-UR5eRobotiq2f85-ObjectAnywhereEEAnywhere-v0 --num_envs 4096 --num_reset_states 10000 --headless --dataset_dir ./reset_state_datasets/ObjectAnywhereEEAnywhere env.scene.insertive_object=fbdrawerbottom env.scene.receptive_object=fbdrawerbox - - # Object Resting, End-Effector Grasped (Near Object) - python scripts_v2/tools/record_reset_states.py --task OmniReset-UR5eRobotiq2f85-ObjectRestingEEGrasped-v0 --num_envs 4096 --num_reset_states 10000 --headless --dataset_dir ./reset_state_datasets/ObjectRestingEEGrasped env.scene.insertive_object=fbdrawerbottom env.scene.receptive_object=fbdrawerbox - - # Object Anywhere, End-Effector Grasped (Grasped) - python scripts_v2/tools/record_reset_states.py --task OmniReset-UR5eRobotiq2f85-ObjectAnywhereEEGrasped-v0 --num_envs 4096 --num_reset_states 10000 --headless --dataset_dir ./reset_state_datasets/ObjectAnywhereEEGrasped env.scene.insertive_object=fbdrawerbottom env.scene.receptive_object=fbdrawerbox - - # Object Partially Assembled, End-Effector Grasped (Near Goal) - python scripts_v2/tools/record_reset_states.py --task OmniReset-UR5eRobotiq2f85-ObjectPartiallyAssembledEEGrasped-v0 --num_envs 4096 --num_reset_states 10000 --headless --dataset_dir ./reset_state_datasets/ObjectPartiallyAssembledEEGrasped env.scene.insertive_object=fbdrawerbottom env.scene.receptive_object=fbdrawerbox - - **Step 3.5: Visualize Reset States (Optional)** - - Visualize the generated reset states to verify they are correct. - - .. code:: bash - - python scripts_v2/tools/visualize_reset_states.py --task OmniReset-Ur5eRobotiq2f85-RelCartesianOSC-State-Play-v0 --num_envs 4 --dataset_dir ./reset_state_datasets env.scene.insertive_object=fbdrawerbottom env.scene.receptive_object=fbdrawerbox - - **Step 4: Train RL Policy** - - .. important:: - - If you generated your own datasets in Steps 1-3, make sure to update ``base_paths`` in ``rl_state_cfg.py`` to point to your dataset directories. - - .. code:: bash - - python -m torch.distributed.run \ - --nnodes 1 \ - --nproc_per_node 4 \ - scripts/reinforcement_learning/rsl_rl/train.py \ - --task OmniReset-Ur5eRobotiq2f85-RelCartesianOSC-State-v0 \ - --num_envs 16384 \ - --logger wandb \ - --headless \ - --distributed \ - env.scene.insertive_object=fbdrawerbottom \ - env.scene.receptive_object=fbdrawerbox - - .. tab-item:: Peg Insertion - - .. note:: - - **Skip directly to Step 4** if you want to train an RL policy with our pre-generated reset state datasets. Only run Steps 1-3 if you want to generate your own. - - **Step 1: Collect Partial Assemblies** (~30 seconds) - - .. code:: bash - - python scripts_v2/tools/record_partial_assemblies.py --task OmniReset-PartialAssemblies-v0 --num_envs 10 --num_trajectories 10 --dataset_dir ./partial_assembly_datasets --headless env.scene.insertive_object=peg env.scene.receptive_object=peghole - - **Step 2: Sample Grasp Poses** (~1 minute) - - .. code:: bash - - python scripts_v2/tools/record_grasps.py --task OmniReset-Robotiq2f85-GraspSampling-v0 --num_envs 8192 --num_grasps 1000 --dataset_dir ./grasp_datasets --headless env.scene.object=peg - - **Step 3: Generate Reset State Datasets** (~1 min to 1 hour depending on the reset) - - .. important:: - - Before running, make sure ``base_path`` and ``base_paths`` in ``reset_states_cfg.py`` point to your dataset directories. - - .. code:: bash - - # Object Anywhere, End-Effector Anywhere (Reaching) - python scripts_v2/tools/record_reset_states.py --task OmniReset-UR5eRobotiq2f85-ObjectAnywhereEEAnywhere-v0 --num_envs 4096 --num_reset_states 10000 --headless --dataset_dir ./reset_state_datasets/ObjectAnywhereEEAnywhere env.scene.insertive_object=peg env.scene.receptive_object=peghole - - # Object Resting, End-Effector Grasped (Near Object) - python scripts_v2/tools/record_reset_states.py --task OmniReset-UR5eRobotiq2f85-ObjectRestingEEGrasped-v0 --num_envs 4096 --num_reset_states 10000 --headless --dataset_dir ./reset_state_datasets/ObjectRestingEEGrasped env.scene.insertive_object=peg env.scene.receptive_object=peghole - - # Object Anywhere, End-Effector Grasped (Grasped) - python scripts_v2/tools/record_reset_states.py --task OmniReset-UR5eRobotiq2f85-ObjectAnywhereEEGrasped-v0 --num_envs 4096 --num_reset_states 10000 --headless --dataset_dir ./reset_state_datasets/ObjectAnywhereEEGrasped env.scene.insertive_object=peg env.scene.receptive_object=peghole - - # Object Partially Assembled, End-Effector Grasped (Near Goal) - python scripts_v2/tools/record_reset_states.py --task OmniReset-UR5eRobotiq2f85-ObjectPartiallyAssembledEEGrasped-v0 --num_envs 4096 --num_reset_states 10000 --headless --dataset_dir ./reset_state_datasets/ObjectPartiallyAssembledEEGrasped env.scene.insertive_object=peg env.scene.receptive_object=peghole - - **Step 3.5: Visualize Reset States (Optional)** - - Visualize the generated reset states to verify they are correct. - - .. code:: bash - - python scripts_v2/tools/visualize_reset_states.py --task OmniReset-Ur5eRobotiq2f85-RelCartesianOSC-State-Play-v0 --num_envs 4 --dataset_dir ./reset_state_datasets env.scene.insertive_object=peg env.scene.receptive_object=peghole - - **Step 4: Train RL Policy** - - .. important:: - - If you generated your own datasets in Steps 1-3, make sure to update ``base_paths`` in ``rl_state_cfg.py`` to point to your dataset directories. - - .. code:: bash - - python -m torch.distributed.run \ - --nnodes 1 \ - --nproc_per_node 4 \ - scripts/reinforcement_learning/rsl_rl/train.py \ - --task OmniReset-Ur5eRobotiq2f85-RelCartesianOSC-State-v0 \ - --num_envs 16384 \ - --logger wandb \ - --headless \ - --distributed \ - env.scene.insertive_object=peg \ - env.scene.receptive_object=peghole - - .. tab-item:: Rectangle on Wall - - .. note:: - - **Skip directly to Step 4** if you want to train an RL policy with our pre-generated reset state datasets. Only run Steps 1-3 if you want to generate your own. - - **Step 1: Collect Partial Assemblies** (~30 seconds) - - .. code:: bash - - python scripts_v2/tools/record_partial_assemblies.py --task OmniReset-PartialAssemblies-v0 --num_envs 10 --num_trajectories 10 --dataset_dir ./partial_assembly_datasets --headless env.scene.insertive_object=rectangle env.scene.receptive_object=wall - - **Step 2: Sample Grasp Poses** (~1 minute) - - .. code:: bash - - python scripts_v2/tools/record_grasps.py --task OmniReset-Robotiq2f85-GraspSampling-v0 --num_envs 8192 --num_grasps 1000 --dataset_dir ./grasp_datasets --headless env.scene.object=rectangle - - **Step 3: Generate Reset State Datasets** (~1 min to 1 hour depending on the reset) - - .. important:: - - Before running, make sure ``base_path`` and ``base_paths`` in ``reset_states_cfg.py`` point to your dataset directories. - - .. code:: bash - - # Object Anywhere, End-Effector Anywhere (Reaching) - python scripts_v2/tools/record_reset_states.py --task OmniReset-UR5eRobotiq2f85-ObjectAnywhereEEAnywhere-v0 --num_envs 4096 --num_reset_states 10000 --headless --dataset_dir ./reset_state_datasets/ObjectAnywhereEEAnywhere env.scene.insertive_object=rectangle env.scene.receptive_object=wall - - # Object Resting, End-Effector Grasped (Near Object) - python scripts_v2/tools/record_reset_states.py --task OmniReset-UR5eRobotiq2f85-ObjectRestingEEGrasped-v0 --num_envs 4096 --num_reset_states 10000 --headless --dataset_dir ./reset_state_datasets/ObjectRestingEEGrasped env.scene.insertive_object=rectangle env.scene.receptive_object=wall - - # Object Anywhere, End-Effector Grasped (Grasped) - python scripts_v2/tools/record_reset_states.py --task OmniReset-UR5eRobotiq2f85-ObjectAnywhereEEGrasped-v0 --num_envs 4096 --num_reset_states 10000 --headless --dataset_dir ./reset_state_datasets/ObjectAnywhereEEGrasped env.scene.insertive_object=rectangle env.scene.receptive_object=wall - - # Object Partially Assembled, End-Effector Grasped (Near Goal) - python scripts_v2/tools/record_reset_states.py --task OmniReset-UR5eRobotiq2f85-ObjectPartiallyAssembledEEGrasped-v0 --num_envs 4096 --num_reset_states 10000 --headless --dataset_dir ./reset_state_datasets/ObjectPartiallyAssembledEEGrasped env.scene.insertive_object=rectangle env.scene.receptive_object=wall - - **Step 3.5: Visualize Reset States (Optional)** - - Visualize the generated reset states to verify they are correct. - - .. code:: bash - - python scripts_v2/tools/visualize_reset_states.py --task OmniReset-Ur5eRobotiq2f85-RelCartesianOSC-State-Play-v0 --num_envs 4 --dataset_dir ./reset_state_datasets env.scene.insertive_object=rectangle env.scene.receptive_object=wall + **Most users only need step 2.** If you're training on one of our 6 existing tasks, jump straight to :doc:`rl_training`. - **Step 4: Train RL Policy** +- :doc:`new_task` -- Prepare USD assets, register object variants, verify in sim. +- :doc:`rl_training` -- Collect reset states and train an RL policy from scratch. **Start here for most use cases.** +- :doc:`sim2real` -- Robot calibration & USD, system identification, camera calibration, then ADR finetuning, or use our pre-finetuned checkpoints. +- :doc:`distillation` -- Evaluate pretrained RGB checkpoints, or collect demos and train your own ResNet18-MLP vision policy. Deploy on real robot. - .. important:: +.. toctree:: + :maxdepth: 1 + :caption: Pipeline - If you generated your own datasets in Steps 1-3, make sure to update ``base_paths`` in ``rl_state_cfg.py`` to point to your dataset directories. - - .. code:: bash - - python -m torch.distributed.run \ - --nnodes 1 \ - --nproc_per_node 4 \ - scripts/reinforcement_learning/rsl_rl/train.py \ - --task OmniReset-Ur5eRobotiq2f85-RelCartesianOSC-State-v0 \ - --num_envs 16384 \ - --logger wandb \ - --headless \ - --distributed \ - env.scene.insertive_object=rectangle \ - env.scene.receptive_object=wall - - .. tab-item:: Cube Stacking - - .. note:: - - **Skip directly to Step 4** if you want to train an RL policy with our pre-generated reset state datasets. Only run Steps 1-3 if you want to generate your own. - - **Step 1: Collect Partial Assemblies** (~30 seconds) - - .. code:: bash - - python scripts_v2/tools/record_partial_assemblies.py --task OmniReset-PartialAssemblies-v0 --num_envs 10 --num_trajectories 10 --dataset_dir ./partial_assembly_datasets --headless env.scene.insertive_object=cube env.scene.receptive_object=cube - - **Step 2: Sample Grasp Poses** (~1 minute) - - .. code:: bash + new_task + rl_training + sim2real + distillation - python scripts_v2/tools/record_grasps.py --task OmniReset-Robotiq2f85-GraspSampling-v0 --num_envs 8192 --num_grasps 1000 --dataset_dir ./grasp_datasets --headless env.scene.object=cube - - **Step 3: Generate Reset State Datasets** (~1 min to 1 hour depending on the reset) - - .. important:: - - Before running, make sure ``base_path`` and ``base_paths`` in ``reset_states_cfg.py`` point to your dataset directories. - - .. code:: bash - - # Object Anywhere, End-Effector Anywhere (Reaching) - python scripts_v2/tools/record_reset_states.py --task OmniReset-UR5eRobotiq2f85-ObjectAnywhereEEAnywhere-v0 --num_envs 4096 --num_reset_states 10000 --headless --dataset_dir ./reset_state_datasets/ObjectAnywhereEEAnywhere env.scene.insertive_object=cube env.scene.receptive_object=cube - - # Object Resting, End-Effector Grasped (Near Object) - python scripts_v2/tools/record_reset_states.py --task OmniReset-UR5eRobotiq2f85-ObjectRestingEEGrasped-v0 --num_envs 4096 --num_reset_states 10000 --headless --dataset_dir ./reset_state_datasets/ObjectRestingEEGrasped env.scene.insertive_object=cube env.scene.receptive_object=cube - - # Object Anywhere, End-Effector Grasped (Grasped) - python scripts_v2/tools/record_reset_states.py --task OmniReset-UR5eRobotiq2f85-ObjectAnywhereEEGrasped-v0 --num_envs 4096 --num_reset_states 10000 --headless --dataset_dir ./reset_state_datasets/ObjectAnywhereEEGrasped env.scene.insertive_object=cube env.scene.receptive_object=cube - - # Object Partially Assembled, End-Effector Grasped (Near Goal) - python scripts_v2/tools/record_reset_states.py --task OmniReset-UR5eRobotiq2f85-ObjectPartiallyAssembledEEGrasped-v0 --num_envs 4096 --num_reset_states 10000 --headless --dataset_dir ./reset_state_datasets/ObjectPartiallyAssembledEEGrasped env.scene.insertive_object=cube env.scene.receptive_object=cube - - **Step 3.5: Visualize Reset States (Optional)** - - Visualize the generated reset states to verify they are correct. - - .. code:: bash - - python scripts_v2/tools/visualize_reset_states.py --task OmniReset-Ur5eRobotiq2f85-RelCartesianOSC-State-Play-v0 --num_envs 4 --dataset_dir ./reset_state_datasets env.scene.insertive_object=cube env.scene.receptive_object=cube - - **Step 4: Train RL Policy** - - .. important:: - - If you generated your own datasets in Steps 1-3, make sure to update ``base_paths`` in ``rl_state_cfg.py`` to point to your dataset directories. - - .. code:: bash - - python -m torch.distributed.run \ - --nnodes 1 \ - --nproc_per_node 4 \ - scripts/reinforcement_learning/rsl_rl/train.py \ - --task OmniReset-Ur5eRobotiq2f85-RelCartesianOSC-State-v0 \ - --num_envs 16384 \ - --logger wandb \ - --headless \ - --distributed \ - env.scene.insertive_object=cube \ - env.scene.receptive_object=cube - - .. tab-item:: Cupcake on Plate - - .. note:: - - **Skip directly to Step 4** if you want to train an RL policy with our pre-generated reset state datasets. Only run Steps 1-3 if you want to generate your own. - - **Step 1: Collect Partial Assemblies** (~30 seconds) - - .. code:: bash - - python scripts_v2/tools/record_partial_assemblies.py --task OmniReset-PartialAssemblies-v0 --num_envs 10 --num_trajectories 10 --dataset_dir ./partial_assembly_datasets --headless env.scene.insertive_object=cupcake env.scene.receptive_object=plate - - **Step 2: Sample Grasp Poses** (~1 minute) - - .. code:: bash - - python scripts_v2/tools/record_grasps.py --task OmniReset-Robotiq2f85-GraspSampling-v0 --num_envs 8192 --num_grasps 1000 --dataset_dir ./grasp_datasets --headless env.scene.object=cupcake - - **Step 3: Generate Reset State Datasets** (~1 min to 1 hour depending on the reset) - - .. important:: - - Before running, make sure ``base_path`` and ``base_paths`` in ``reset_states_cfg.py`` point to your dataset directories. - - .. code:: bash - - # Object Anywhere, End-Effector Anywhere (Reaching) - python scripts_v2/tools/record_reset_states.py --task OmniReset-UR5eRobotiq2f85-ObjectAnywhereEEAnywhere-v0 --num_envs 4096 --num_reset_states 10000 --headless --dataset_dir ./reset_state_datasets/ObjectAnywhereEEAnywhere env.scene.insertive_object=cupcake env.scene.receptive_object=plate - - # Object Resting, End-Effector Grasped (Near Object) - python scripts_v2/tools/record_reset_states.py --task OmniReset-UR5eRobotiq2f85-ObjectRestingEEGrasped-v0 --num_envs 4096 --num_reset_states 10000 --headless --dataset_dir ./reset_state_datasets/ObjectRestingEEGrasped env.scene.insertive_object=cupcake env.scene.receptive_object=plate - - # Object Anywhere, End-Effector Grasped (Grasped) - python scripts_v2/tools/record_reset_states.py --task OmniReset-UR5eRobotiq2f85-ObjectAnywhereEEGrasped-v0 --num_envs 4096 --num_reset_states 10000 --headless --dataset_dir ./reset_state_datasets/ObjectAnywhereEEGrasped env.scene.insertive_object=cupcake env.scene.receptive_object=plate - - # Object Partially Assembled, End-Effector Grasped (Near Goal) - python scripts_v2/tools/record_reset_states.py --task OmniReset-UR5eRobotiq2f85-ObjectPartiallyAssembledEEGrasped-v0 --num_envs 4096 --num_reset_states 10000 --headless --dataset_dir ./reset_state_datasets/ObjectPartiallyAssembledEEGrasped env.scene.insertive_object=cupcake env.scene.receptive_object=plate - - **Step 3.5: Visualize Reset States (Optional)** - - Visualize the generated reset states to verify they are correct. - - .. code:: bash - - python scripts_v2/tools/visualize_reset_states.py --task OmniReset-Ur5eRobotiq2f85-RelCartesianOSC-State-Play-v0 --num_envs 4 --dataset_dir ./reset_state_datasets env.scene.insertive_object=cupcake env.scene.receptive_object=plate - - **Step 4: Train RL Policy** - - .. important:: - - If you generated your own datasets in Steps 1-3, make sure to update ``base_paths`` in ``rl_state_cfg.py`` to point to your dataset directories. - - .. code:: bash - - python -m torch.distributed.run \ - --nnodes 1 \ - --nproc_per_node 4 \ - scripts/reinforcement_learning/rsl_rl/train.py \ - --task OmniReset-Ur5eRobotiq2f85-RelCartesianOSC-State-v0 \ - --num_envs 16384 \ - --logger wandb \ - --headless \ - --distributed \ - env.scene.insertive_object=cupcake \ - env.scene.receptive_object=plate - -Training Curves -^^^^^^^^^^^^^^^ +---- -Below are success rate curves for each task plotting over number of training iterations and wall clock time when training on 4xL40S GPUs. -Insertion, twisting, cube stacking, and rectangle orientation on wall tasks converge within **8 hours**, while drawer assembly and cupcake on plate tasks take **1 day**. +Compute & Hardware Requirements +------------------------------- .. list-table:: - :widths: 50 50 - :class: borderless - - * - .. figure:: ../../../source/_static/publications/omnireset/success_rate_over_steps.jpg - :width: 100% - :alt: Training curve over steps - - Success Rate of 6 Tasks Over Number of Training Iterations - - - .. figure:: ../../../source/_static/publications/omnireset/success_rate_over_wall_clock.jpg - :width: 100% - :alt: Training curve over wall clock time - - Success Rate of 6 Tasks Over Wall Clock Time + :header-rows: 1 + :widths: 30 70 + + * - Stage + - Requirements + * - Policy evaluation + - 1 GPU. + * - RL training + - 4 GPUs, 24+ GB VRAM each (e.g. L40S, 4090). Cube/Peg converge in ~8 hours on 4x L40S. + * - RL finetuning + - 1--4 GPUs depending on task (see :doc:`sim2real` for per-task env counts). Peg converges in ~8 hours on 1x L40S. + * - Demo collection + - 1 RTX GPU, 24+ GB VRAM (32 envs fit on an RTX 4090). 10K demos ~2 hours. + * - Vision policy training + - 1 GPU. ~2 days of training on a H200 for transfer. ~1 day of training on a H200 for sim-only distillation. + * - Real-robot deploy + - UR5e/UR7e + Robotiq 2F-85 + 3x Intel RealSense (D415/D435/D455). ---- + +BibTeX +------ +.. code:: bibtex + + @inproceedings{ + yin2026omnireset, + title={Emergent Dexterity via Diverse Resets and Large-Scale Reinforcement Learning}, + author={Patrick Yin and Tyler Westenbroek and Zhengyu Zhang and Joshua Tran and Ignacio Dagnino and Eeshani Shilamkar and Numfor Mbiziwo-Tiapo and Simran Bagaria and Xinlei Liu and Galen Mullins and Andrey Kolobov and Abhishek Gupta}, + booktitle={The Fourteenth International Conference on Learning Representations}, + year={2026}, + url={https://arxiv.org/abs/2603.15789} + } diff --git a/docs/source/publications/omnireset/new_task.rst b/docs/source/publications/omnireset/new_task.rst new file mode 100644 index 00000000..7fcf6389 --- /dev/null +++ b/docs/source/publications/omnireset/new_task.rst @@ -0,0 +1,211 @@ +Create a New Task +================= + +This guide walks through adding a custom object pair (insertive + receptive) so you can train OmniReset policies on your own tasks. + +---- + +Step 1: Prepare Meshes in Blender +---------------------------------- + +All mesh-level properties must be set in Blender before export. Isaac Sim does not modify mesh geometry on import, so scale and orientation must be baked into the vertices here. + +In a single Blender session: + +1. Rescale objects to real-world dimensions (meters). ``Ctrl+A`` > Rotation & Scale to bake transforms. +2. Reorient so Z-axis points up when the object is resting on a table: ``Tab`` > Edit Mode, ``A`` to select all, rotate as needed (e.g. ``R X 90``). +3. Set origin: right-click > Set Origin > Origin to Center of Mass (Volume). +4. Place both objects in assembled pose, record the relative transform for ``assembled_pose`` to be used in Step 4. +5. Export each object as ``.usdz``. + +.. raw:: html + +
+ +
+ +---- + +Step 2: Set Up USD in Isaac Sim +-------------------------------- + +Import the exported mesh into Isaac Sim and restructure it into the standard asset format: + +1. Import the ``.usdz`` into a new USD stage. +2. Separate the mesh into a visual mesh (with materials) and a collision mesh (invisible, with SDF physics collider). +3. Save as ``.usd``. + +.. tip:: + + If objects appear in the wrong place in Step 6, try adding the Rigid Body component to the root prim, setting the SDF collider only on the collision mesh, and removing physics from the visual mesh. + +.. raw:: html + +
+ +
+ +---- + +Step 3: Compute Bottom Offset +------------------------------- + +The bottom offset is the distance from the object's origin to its lowest point, used for spawning objects flush on the table. Run on each ``.usd`` from Step 2: + +.. code:: bash + + python scripts_v2/tools/compute_bottom_offset.py /path/to/object.usd + +Example output:: + + bottom_offset: 0.056658 + +Record these values for Step 4. + +---- + +Step 4: Create Metadata +------------------------- + +Create a ``metadata.yaml`` file **in the same folder** as each ``.usd``: + +.. code:: text + + My_Insertive_Object/ + my_insertive_object.usd + metadata.yaml + + My_Receptive_Object/ + my_receptive_object.usd + metadata.yaml + +The metadata has the following fields: + +- ``assembled_offset``: Transform from the insertive object to this object in the assembled pose. Always identity for the insertive object; for the receptive object, use the relative transform recorded in Step 1. +- ``bottom_offset``: Transform from origin to the bottom of the object. The Z value is the **negative** of the script output from Step 3. +- ``success_thresholds`` (receptive only): How tightly the policy must align parts. Use ``position: 0.0025, orientation: 0.025`` for tight-fit tasks (e.g. screw insertion). For looser tasks (e.g. cube stacking), try ``position: 0.005, orientation: 0.05``. May need to tune depending on the task. + +**Insertive object** example: + +.. code:: yaml + + assembled_offset: + pos: [0.0, 0.0, 0.0] + quat: [1.0, 0.0, 0.0, 0.0] + bottom_offset: + pos: [0.0, 0.0, -0.056658] + quat: [1.0, 0.0, 0.0, 0.0] + +**Receptive object** example: + +.. code:: yaml + + assembled_offset: + pos: [0.012, 0.0, 0.035] + quat: [1.0, 0.0, 0.0, 0.0] + bottom_offset: + pos: [0.0, 0.0, -0.010169] + quat: [1.0, 0.0, 0.0, 0.0] + success_thresholds: + position: 0.0025 + orientation: 0.025 + +---- + +Step 5: Register Object Variants +---------------------------------- + +Add your objects to the ``variants`` dictionary in **4 config files**: + +.. list-table:: + :header-rows: 1 + :widths: 50 50 + + * - Config File + - Purpose + * - ``partial_assemblies_cfg.py`` + - Partial assembly collection + * - ``grasp_sampling_cfg.py`` + - Grasp pose sampling + * - ``reset_states_cfg.py`` + - Reset state generation + * - ``rl_state_cfg.py`` + - RL training & evaluation + + +All files live under: + +.. code:: text + + source/uwlab_tasks/.../omnireset/config/ur5e_robotiq_2f85/ + +Add to ``variants["scene.insertive_object"]``: + +.. code:: python + + "my_insertive_object": make_insertive_object( + "/absolute/path/to/my_insertive_object.usd" + ), + +Add to ``variants["scene.receptive_object"]``: + +.. code:: python + + "my_receptive_object": make_receptive_object( + "/absolute/path/to/my_receptive_object.usd" + ), + +.. tip:: + + Use local absolute paths during development. Switch to ``UWLAB_CLOUD_ASSETS_DIR`` when sharing. + +---- + +Step 6: Verify Setup +---------------------- + +**Check assembled pose offset** by running partial assemblies: + +.. code:: bash + + python scripts_v2/tools/record_partial_assemblies.py \ + --task OmniReset-PartialAssemblies-v0 \ + --num_envs 10 --num_trajectories 10 --headless \ + env.scene.insertive_object=my_insertive_object env.scene.receptive_object=my_receptive_object + +If objects are misaligned or upside down, revisit Step 1. + +**Check bottom offset** by generating a small set of reset states and visualizing: + +.. code:: bash + + python scripts_v2/tools/record_reset_states.py \ + --task OmniReset-UR5eRobotiq2f85-ObjectAnywhereEEAnywhere-v0 \ + --num_envs 4 --num_reset_states 8 --headless \ + env.scene.insertive_object=my_insertive_object env.scene.receptive_object=my_receptive_object + +.. code:: bash + + python scripts_v2/tools/visualize_reset_states.py \ + --task OmniReset-Ur5eRobotiq2f85-RelCartesianOSC-State-Play-v0 \ + --num_envs 4 --dataset_dir ./Datasets/OmniReset \ + env.scene.insertive_object=my_insertive_object env.scene.receptive_object=my_receptive_object + +Confirm the receptive object sits flush on the table. If it's floating or clipping, adjust the ``bottom_offset`` in Step 4. + +Once everything looks correct, proceed to :doc:`rl_training` to generate full reset states and train. + +---- + +Known Limitations +------------------ + +**Grasp sampling.** The grasp sampler does not always produce valid grasps for adversarial or unusually shaped objects. You may need to tune sampling parameters in the grasp sampling config. Additionally, the collision checker may be inaccurate when operating at mm-level precision, so ``min_dist`` may need task-specific tuning at the moment. + +We are actively working on removing the grasp sampling requirement from OmniReset entirely, which will make the pipeline more general and also eliminate the need for mm-level collision checking. diff --git a/docs/source/publications/omnireset/rl_training.rst b/docs/source/publications/omnireset/rl_training.rst new file mode 100644 index 00000000..0675f50a --- /dev/null +++ b/docs/source/publications/omnireset/rl_training.rst @@ -0,0 +1,956 @@ +.. _reproduce-training: + +Collect Resets & Train RL Policy +================================ + +Reproduce our training results from scratch. + +.. tip:: + + **Want to try it quickly?** Start with **Cube Stacking** or **Peg Insertion**. They have the fastest reset state collection times and converge within ~8 hours on 4×L40S GPUs. + +.. tab-set:: + + .. tab-item:: Leg Twisting + + .. note:: + + **Skip directly to Step 4** if you want to train an RL policy with our pre-generated reset state datasets. Only run Steps 1-3 if you want to generate your own. + + **Step 1: Collect Partial Assemblies** (~30 seconds) + + .. code:: bash + + python scripts_v2/tools/record_partial_assemblies.py --task OmniReset-PartialAssemblies-v0 --num_envs 10 --num_trajectories 10 --headless env.scene.insertive_object=fbleg env.scene.receptive_object=fbtabletop + + **Step 2: Sample Grasp Poses** (~1 minute) + + .. code:: bash + + python scripts_v2/tools/record_grasps.py --task OmniReset-Robotiq2f85-GraspSampling-v0 --num_envs 8192 --num_grasps 1000 --headless env.scene.object=fbleg + + **Step 3: Generate Reset State Datasets** (~1 min to multiple hours depending on the reset and task) + + .. code:: bash + + # Object Anywhere, End-Effector Anywhere (Reaching) + python scripts_v2/tools/record_reset_states.py \ + --task OmniReset-UR5eRobotiq2f85-ObjectAnywhereEEAnywhere-v0 \ + --num_envs 4096 --num_reset_states 10000 --headless \ + env.scene.insertive_object=fbleg env.scene.receptive_object=fbtabletop + + # Object Resting, End-Effector Grasped (Near Object) + python scripts_v2/tools/record_reset_states.py \ + --task OmniReset-UR5eRobotiq2f85-ObjectRestingEEGrasped-v0 \ + --num_envs 4096 --num_reset_states 10000 --headless \ + env.scene.insertive_object=fbleg env.scene.receptive_object=fbtabletop \ + env.events.reset_insertive_object_pose_from_reset_states.params.dataset_dir=./Datasets/OmniReset \ + env.events.reset_end_effector_pose_from_grasp_dataset.params.dataset_dir=./Datasets/OmniReset + + # Object Anywhere, End-Effector Grasped (Grasped) + python scripts_v2/tools/record_reset_states.py \ + --task OmniReset-UR5eRobotiq2f85-ObjectAnywhereEEGrasped-v0 \ + --num_envs 4096 --num_reset_states 10000 --headless \ + env.scene.insertive_object=fbleg env.scene.receptive_object=fbtabletop \ + env.events.reset_end_effector_pose_from_grasp_dataset.params.dataset_dir=./Datasets/OmniReset + + # Object Partially Assembled, End-Effector Grasped (Near Goal) + python scripts_v2/tools/record_reset_states.py \ + --task OmniReset-UR5eRobotiq2f85-ObjectPartiallyAssembledEEGrasped-v0 \ + --num_envs 4096 --num_reset_states 10000 --headless \ + env.scene.insertive_object=fbleg env.scene.receptive_object=fbtabletop \ + env.events.reset_insertive_object_pose_from_partial_assembly_dataset.params.dataset_dir=./Datasets/OmniReset \ + env.events.reset_end_effector_pose_from_grasp_dataset.params.dataset_dir=./Datasets/OmniReset + + **Step 3.5: Visualize Reset States (Optional)** + + Visualize the generated reset states to verify they are correct. By default all four reset distributions are loaded; use the tabs below to visualize one at a time. + + .. tab-set:: + + .. tab-item:: All + + .. code:: bash + + python scripts_v2/tools/visualize_reset_states.py \ + --task OmniReset-Ur5eRobotiq2f85-RelCartesianOSC-State-Play-v0 \ + --num_envs 4 --dataset_dir ./Datasets/OmniReset \ + env.scene.insertive_object=fbleg env.scene.receptive_object=fbtabletop + + .. tab-item:: Reaching + + .. code:: bash + + python scripts_v2/tools/visualize_reset_states.py \ + --task OmniReset-Ur5eRobotiq2f85-RelCartesianOSC-State-Play-v0 \ + --num_envs 4 --dataset_dir ./Datasets/OmniReset \ + --reset_type ObjectAnywhereEEAnywhere \ + env.scene.insertive_object=fbleg env.scene.receptive_object=fbtabletop + + .. tab-item:: Near Object + + .. code:: bash + + python scripts_v2/tools/visualize_reset_states.py \ + --task OmniReset-Ur5eRobotiq2f85-RelCartesianOSC-State-Play-v0 \ + --num_envs 4 --dataset_dir ./Datasets/OmniReset \ + --reset_type ObjectRestingEEGrasped \ + env.scene.insertive_object=fbleg env.scene.receptive_object=fbtabletop + + .. tab-item:: Grasped + + .. code:: bash + + python scripts_v2/tools/visualize_reset_states.py \ + --task OmniReset-Ur5eRobotiq2f85-RelCartesianOSC-State-Play-v0 \ + --num_envs 4 --dataset_dir ./Datasets/OmniReset \ + --reset_type ObjectAnywhereEEGrasped \ + env.scene.insertive_object=fbleg env.scene.receptive_object=fbtabletop + + .. tab-item:: Near Goal + + .. code:: bash + + python scripts_v2/tools/visualize_reset_states.py \ + --task OmniReset-Ur5eRobotiq2f85-RelCartesianOSC-State-Play-v0 \ + --num_envs 4 --dataset_dir ./Datasets/OmniReset \ + --reset_type ObjectPartiallyAssembledEEGrasped \ + env.scene.insertive_object=fbleg env.scene.receptive_object=fbtabletop + + **Step 4: Train RL Policy** + + Train with our pre-generated cloud datasets: + + .. code:: bash + + python -m torch.distributed.run \ + --nnodes 1 \ + --nproc_per_node 4 \ + scripts/reinforcement_learning/rsl_rl/train.py \ + --task OmniReset-Ur5eRobotiq2f85-RelCartesianOSC-State-v0 \ + --num_envs 16384 \ + --logger wandb \ + --headless \ + --distributed \ + env.scene.insertive_object=fbleg \ + env.scene.receptive_object=fbtabletop + + Or, train with your locally generated datasets from Steps 1-3: + + .. code:: bash + + python -m torch.distributed.run \ + --nnodes 1 \ + --nproc_per_node 4 \ + scripts/reinforcement_learning/rsl_rl/train.py \ + --task OmniReset-Ur5eRobotiq2f85-RelCartesianOSC-State-v0 \ + --num_envs 16384 \ + --logger wandb \ + --headless \ + --distributed \ + env.scene.insertive_object=fbleg \ + env.scene.receptive_object=fbtabletop \ + env.events.reset_from_reset_states.params.dataset_dir=./Datasets/OmniReset + + **Training Curves** + + .. list-table:: + :widths: 50 50 + :class: borderless + + * - .. figure:: ../../../source/_static/publications/omnireset/leg_success_rate_seeds.jpg + :width: 100% + :alt: Leg twisting success rate over steps + + - .. figure:: ../../../source/_static/publications/omnireset/leg_success_rate_seeds_walltime.jpg + :width: 100% + :alt: Leg twisting success rate over wall clock time + + .. tab-item:: Drawer Assembly + + .. note:: + + **Skip directly to Step 4** if you want to train an RL policy with our pre-generated reset state datasets. Only run Steps 1-3 if you want to generate your own. + + **Step 1: Collect Partial Assemblies** (~30 seconds) + + .. code:: bash + + python scripts_v2/tools/record_partial_assemblies.py --task OmniReset-PartialAssemblies-v0 --num_envs 10 --num_trajectories 10 --headless env.scene.insertive_object=fbdrawerbottom env.scene.receptive_object=fbdrawerbox + + **Step 2: Sample Grasp Poses** (~1 minute) + + .. code:: bash + + python scripts_v2/tools/record_grasps.py --task OmniReset-Robotiq2f85-GraspSampling-v0 --num_envs 8192 --num_grasps 1000 --headless env.scene.object=fbdrawerbottom + + **Step 3: Generate Reset State Datasets** (~1 min to multiple hours depending on the reset and task) + + .. code:: bash + + # Object Anywhere, End-Effector Anywhere (Reaching) + python scripts_v2/tools/record_reset_states.py \ + --task OmniReset-UR5eRobotiq2f85-ObjectAnywhereEEAnywhere-v0 \ + --num_envs 4096 --num_reset_states 10000 --headless \ + env.scene.insertive_object=fbdrawerbottom env.scene.receptive_object=fbdrawerbox + + # Object Resting, End-Effector Grasped (Near Object) + python scripts_v2/tools/record_reset_states.py \ + --task OmniReset-UR5eRobotiq2f85-ObjectRestingEEGrasped-v0 \ + --num_envs 4096 --num_reset_states 10000 --headless \ + env.scene.insertive_object=fbdrawerbottom env.scene.receptive_object=fbdrawerbox \ + env.events.reset_insertive_object_pose_from_reset_states.params.dataset_dir=./Datasets/OmniReset \ + env.events.reset_end_effector_pose_from_grasp_dataset.params.dataset_dir=./Datasets/OmniReset + + # Object Anywhere, End-Effector Grasped (Grasped) + python scripts_v2/tools/record_reset_states.py \ + --task OmniReset-UR5eRobotiq2f85-ObjectAnywhereEEGrasped-v0 \ + --num_envs 4096 --num_reset_states 10000 --headless \ + env.scene.insertive_object=fbdrawerbottom env.scene.receptive_object=fbdrawerbox \ + env.events.reset_end_effector_pose_from_grasp_dataset.params.dataset_dir=./Datasets/OmniReset + + # Object Partially Assembled, End-Effector Grasped (Near Goal) + python scripts_v2/tools/record_reset_states.py \ + --task OmniReset-UR5eRobotiq2f85-ObjectPartiallyAssembledEEGrasped-v0 \ + --num_envs 4096 --num_reset_states 10000 --headless \ + env.scene.insertive_object=fbdrawerbottom env.scene.receptive_object=fbdrawerbox \ + env.events.reset_insertive_object_pose_from_partial_assembly_dataset.params.dataset_dir=./Datasets/OmniReset \ + env.events.reset_end_effector_pose_from_grasp_dataset.params.dataset_dir=./Datasets/OmniReset + + **Step 3.5: Visualize Reset States (Optional)** + + Visualize the generated reset states to verify they are correct. By default all four reset distributions are loaded; use the tabs below to visualize one at a time. + + .. tab-set:: + + .. tab-item:: All + + .. code:: bash + + python scripts_v2/tools/visualize_reset_states.py \ + --task OmniReset-Ur5eRobotiq2f85-RelCartesianOSC-State-Play-v0 \ + --num_envs 4 --dataset_dir ./Datasets/OmniReset \ + env.scene.insertive_object=fbdrawerbottom env.scene.receptive_object=fbdrawerbox + + .. tab-item:: Reaching + + .. code:: bash + + python scripts_v2/tools/visualize_reset_states.py \ + --task OmniReset-Ur5eRobotiq2f85-RelCartesianOSC-State-Play-v0 \ + --num_envs 4 --dataset_dir ./Datasets/OmniReset \ + --reset_type ObjectAnywhereEEAnywhere \ + env.scene.insertive_object=fbdrawerbottom env.scene.receptive_object=fbdrawerbox + + .. tab-item:: Near Object + + .. code:: bash + + python scripts_v2/tools/visualize_reset_states.py \ + --task OmniReset-Ur5eRobotiq2f85-RelCartesianOSC-State-Play-v0 \ + --num_envs 4 --dataset_dir ./Datasets/OmniReset \ + --reset_type ObjectRestingEEGrasped \ + env.scene.insertive_object=fbdrawerbottom env.scene.receptive_object=fbdrawerbox + + .. tab-item:: Grasped + + .. code:: bash + + python scripts_v2/tools/visualize_reset_states.py \ + --task OmniReset-Ur5eRobotiq2f85-RelCartesianOSC-State-Play-v0 \ + --num_envs 4 --dataset_dir ./Datasets/OmniReset \ + --reset_type ObjectAnywhereEEGrasped \ + env.scene.insertive_object=fbdrawerbottom env.scene.receptive_object=fbdrawerbox + + .. tab-item:: Near Goal + + .. code:: bash + + python scripts_v2/tools/visualize_reset_states.py \ + --task OmniReset-Ur5eRobotiq2f85-RelCartesianOSC-State-Play-v0 \ + --num_envs 4 --dataset_dir ./Datasets/OmniReset \ + --reset_type ObjectPartiallyAssembledEEGrasped \ + env.scene.insertive_object=fbdrawerbottom env.scene.receptive_object=fbdrawerbox + + **Step 4: Train RL Policy** + + Train with our pre-generated cloud datasets: + + .. code:: bash + + python -m torch.distributed.run \ + --nnodes 1 \ + --nproc_per_node 4 \ + scripts/reinforcement_learning/rsl_rl/train.py \ + --task OmniReset-Ur5eRobotiq2f85-RelCartesianOSC-State-v0 \ + --num_envs 16384 \ + --logger wandb \ + --headless \ + --distributed \ + env.scene.insertive_object=fbdrawerbottom \ + env.scene.receptive_object=fbdrawerbox + + Or, train with your locally generated datasets from Steps 1-3: + + .. code:: bash + + python -m torch.distributed.run \ + --nnodes 1 \ + --nproc_per_node 4 \ + scripts/reinforcement_learning/rsl_rl/train.py \ + --task OmniReset-Ur5eRobotiq2f85-RelCartesianOSC-State-v0 \ + --num_envs 16384 \ + --logger wandb \ + --headless \ + --distributed \ + env.scene.insertive_object=fbdrawerbottom \ + env.scene.receptive_object=fbdrawerbox \ + env.events.reset_from_reset_states.params.dataset_dir=./Datasets/OmniReset + + **Training Curves** + + .. list-table:: + :widths: 50 50 + :class: borderless + + * - .. figure:: ../../../source/_static/publications/omnireset/drawer_success_rate_seeds.jpg + :width: 100% + :alt: Drawer assembly success rate over steps + + - .. figure:: ../../../source/_static/publications/omnireset/drawer_success_rate_seeds_walltime.jpg + :width: 100% + :alt: Drawer assembly success rate over wall clock time + + .. tab-item:: Peg Insertion + + .. note:: + + **Skip directly to Step 4** if you want to train an RL policy with our pre-generated reset state datasets. Only run Steps 1-3 if you want to generate your own. + + **Step 1: Collect Partial Assemblies** (~30 seconds) + + .. code:: bash + + python scripts_v2/tools/record_partial_assemblies.py --task OmniReset-PartialAssemblies-v0 --num_envs 10 --num_trajectories 10 --headless env.scene.insertive_object=peg env.scene.receptive_object=peghole + + **Step 2: Sample Grasp Poses** (~1 minute) + + .. code:: bash + + python scripts_v2/tools/record_grasps.py --task OmniReset-Robotiq2f85-GraspSampling-v0 --num_envs 8192 --num_grasps 1000 --headless env.scene.object=peg + + **Step 3: Generate Reset State Datasets** (~1 min to multiple hours depending on the reset and task) + + .. code:: bash + + # Object Anywhere, End-Effector Anywhere (Reaching) + python scripts_v2/tools/record_reset_states.py \ + --task OmniReset-UR5eRobotiq2f85-ObjectAnywhereEEAnywhere-v0 \ + --num_envs 4096 --num_reset_states 10000 --headless \ + env.scene.insertive_object=peg env.scene.receptive_object=peghole + + # Object Resting, End-Effector Grasped (Near Object) + python scripts_v2/tools/record_reset_states.py \ + --task OmniReset-UR5eRobotiq2f85-ObjectRestingEEGrasped-v0 \ + --num_envs 4096 --num_reset_states 10000 --headless \ + env.scene.insertive_object=peg env.scene.receptive_object=peghole \ + env.events.reset_insertive_object_pose_from_reset_states.params.dataset_dir=./Datasets/OmniReset \ + env.events.reset_end_effector_pose_from_grasp_dataset.params.dataset_dir=./Datasets/OmniReset + + # Object Anywhere, End-Effector Grasped (Grasped) + python scripts_v2/tools/record_reset_states.py \ + --task OmniReset-UR5eRobotiq2f85-ObjectAnywhereEEGrasped-v0 \ + --num_envs 4096 --num_reset_states 10000 --headless \ + env.scene.insertive_object=peg env.scene.receptive_object=peghole \ + env.events.reset_end_effector_pose_from_grasp_dataset.params.dataset_dir=./Datasets/OmniReset + + # Object Partially Assembled, End-Effector Grasped (Near Goal) + python scripts_v2/tools/record_reset_states.py \ + --task OmniReset-UR5eRobotiq2f85-ObjectPartiallyAssembledEEGrasped-v0 \ + --num_envs 4096 --num_reset_states 10000 --headless \ + env.scene.insertive_object=peg env.scene.receptive_object=peghole \ + env.events.reset_insertive_object_pose_from_partial_assembly_dataset.params.dataset_dir=./Datasets/OmniReset \ + env.events.reset_end_effector_pose_from_grasp_dataset.params.dataset_dir=./Datasets/OmniReset + + **Step 3.5: Visualize Reset States (Optional)** + + Visualize the generated reset states to verify they are correct. By default all four reset distributions are loaded; use the tabs below to visualize one at a time. + + .. tab-set:: + + .. tab-item:: All + + .. code:: bash + + python scripts_v2/tools/visualize_reset_states.py \ + --task OmniReset-Ur5eRobotiq2f85-RelCartesianOSC-State-Play-v0 \ + --num_envs 4 --dataset_dir ./Datasets/OmniReset \ + env.scene.insertive_object=peg env.scene.receptive_object=peghole + + .. tab-item:: Reaching + + .. code:: bash + + python scripts_v2/tools/visualize_reset_states.py \ + --task OmniReset-Ur5eRobotiq2f85-RelCartesianOSC-State-Play-v0 \ + --num_envs 4 --dataset_dir ./Datasets/OmniReset \ + --reset_type ObjectAnywhereEEAnywhere \ + env.scene.insertive_object=peg env.scene.receptive_object=peghole + + .. tab-item:: Near Object + + .. code:: bash + + python scripts_v2/tools/visualize_reset_states.py \ + --task OmniReset-Ur5eRobotiq2f85-RelCartesianOSC-State-Play-v0 \ + --num_envs 4 --dataset_dir ./Datasets/OmniReset \ + --reset_type ObjectRestingEEGrasped \ + env.scene.insertive_object=peg env.scene.receptive_object=peghole + + .. tab-item:: Grasped + + .. code:: bash + + python scripts_v2/tools/visualize_reset_states.py \ + --task OmniReset-Ur5eRobotiq2f85-RelCartesianOSC-State-Play-v0 \ + --num_envs 4 --dataset_dir ./Datasets/OmniReset \ + --reset_type ObjectAnywhereEEGrasped \ + env.scene.insertive_object=peg env.scene.receptive_object=peghole + + .. tab-item:: Near Goal + + .. code:: bash + + python scripts_v2/tools/visualize_reset_states.py \ + --task OmniReset-Ur5eRobotiq2f85-RelCartesianOSC-State-Play-v0 \ + --num_envs 4 --dataset_dir ./Datasets/OmniReset \ + --reset_type ObjectPartiallyAssembledEEGrasped \ + env.scene.insertive_object=peg env.scene.receptive_object=peghole + + **Step 4: Train RL Policy** + + Train with our pre-generated cloud datasets: + + .. code:: bash + + python -m torch.distributed.run \ + --nnodes 1 \ + --nproc_per_node 4 \ + scripts/reinforcement_learning/rsl_rl/train.py \ + --task OmniReset-Ur5eRobotiq2f85-RelCartesianOSC-State-v0 \ + --num_envs 16384 \ + --logger wandb \ + --headless \ + --distributed \ + env.scene.insertive_object=peg \ + env.scene.receptive_object=peghole + + Or, train with your locally generated datasets from Steps 1-3: + + .. code:: bash + + python -m torch.distributed.run \ + --nnodes 1 \ + --nproc_per_node 4 \ + scripts/reinforcement_learning/rsl_rl/train.py \ + --task OmniReset-Ur5eRobotiq2f85-RelCartesianOSC-State-v0 \ + --num_envs 16384 \ + --logger wandb \ + --headless \ + --distributed \ + env.scene.insertive_object=peg \ + env.scene.receptive_object=peghole \ + env.events.reset_from_reset_states.params.dataset_dir=./Datasets/OmniReset + + **Training Curves** + + .. list-table:: + :widths: 50 50 + :class: borderless + + * - .. figure:: ../../../source/_static/publications/omnireset/peg_success_rate_seeds.jpg + :width: 100% + :alt: Peg insertion success rate over steps + + - .. figure:: ../../../source/_static/publications/omnireset/peg_success_rate_seeds_walltime.jpg + :width: 100% + :alt: Peg insertion success rate over wall clock time + + .. tab-item:: Rectangle on Wall + + .. note:: + + **Skip directly to Step 4** if you want to train an RL policy with our pre-generated reset state datasets. Only run Steps 1-3 if you want to generate your own. + + **Step 1: Collect Partial Assemblies** (~30 seconds) + + .. code:: bash + + python scripts_v2/tools/record_partial_assemblies.py --task OmniReset-PartialAssemblies-v0 --num_envs 10 --num_trajectories 10 --headless env.scene.insertive_object=rectangle env.scene.receptive_object=wall + + **Step 2: Sample Grasp Poses** (~1 minute) + + .. code:: bash + + python scripts_v2/tools/record_grasps.py --task OmniReset-Robotiq2f85-GraspSampling-v0 --num_envs 8192 --num_grasps 1000 --headless env.scene.object=rectangle + + **Step 3: Generate Reset State Datasets** (~1 min to multiple hours depending on the reset and task) + + .. code:: bash + + # Object Anywhere, End-Effector Anywhere (Reaching) + python scripts_v2/tools/record_reset_states.py \ + --task OmniReset-UR5eRobotiq2f85-ObjectAnywhereEEAnywhere-v0 \ + --num_envs 4096 --num_reset_states 10000 --headless \ + env.scene.insertive_object=rectangle env.scene.receptive_object=wall + + # Object Resting, End-Effector Grasped (Near Object) + python scripts_v2/tools/record_reset_states.py \ + --task OmniReset-UR5eRobotiq2f85-ObjectRestingEEGrasped-v0 \ + --num_envs 4096 --num_reset_states 10000 --headless \ + env.scene.insertive_object=rectangle env.scene.receptive_object=wall \ + env.events.reset_insertive_object_pose_from_reset_states.params.dataset_dir=./Datasets/OmniReset \ + env.events.reset_end_effector_pose_from_grasp_dataset.params.dataset_dir=./Datasets/OmniReset + + # Object Anywhere, End-Effector Grasped (Grasped) + python scripts_v2/tools/record_reset_states.py \ + --task OmniReset-UR5eRobotiq2f85-ObjectAnywhereEEGrasped-v0 \ + --num_envs 4096 --num_reset_states 10000 --headless \ + env.scene.insertive_object=rectangle env.scene.receptive_object=wall \ + env.events.reset_end_effector_pose_from_grasp_dataset.params.dataset_dir=./Datasets/OmniReset + + # Object Partially Assembled, End-Effector Grasped (Near Goal) + python scripts_v2/tools/record_reset_states.py \ + --task OmniReset-UR5eRobotiq2f85-ObjectPartiallyAssembledEEGrasped-v0 \ + --num_envs 4096 --num_reset_states 10000 --headless \ + env.scene.insertive_object=rectangle env.scene.receptive_object=wall \ + env.events.reset_insertive_object_pose_from_partial_assembly_dataset.params.dataset_dir=./Datasets/OmniReset \ + env.events.reset_end_effector_pose_from_grasp_dataset.params.dataset_dir=./Datasets/OmniReset + + **Step 3.5: Visualize Reset States (Optional)** + + Visualize the generated reset states to verify they are correct. By default all four reset distributions are loaded; use the tabs below to visualize one at a time. + + .. tab-set:: + + .. tab-item:: All + + .. code:: bash + + python scripts_v2/tools/visualize_reset_states.py \ + --task OmniReset-Ur5eRobotiq2f85-RelCartesianOSC-State-Play-v0 \ + --num_envs 4 --dataset_dir ./Datasets/OmniReset \ + env.scene.insertive_object=rectangle env.scene.receptive_object=wall + + .. tab-item:: Reaching + + .. code:: bash + + python scripts_v2/tools/visualize_reset_states.py \ + --task OmniReset-Ur5eRobotiq2f85-RelCartesianOSC-State-Play-v0 \ + --num_envs 4 --dataset_dir ./Datasets/OmniReset \ + --reset_type ObjectAnywhereEEAnywhere \ + env.scene.insertive_object=rectangle env.scene.receptive_object=wall + + .. tab-item:: Near Object + + .. code:: bash + + python scripts_v2/tools/visualize_reset_states.py \ + --task OmniReset-Ur5eRobotiq2f85-RelCartesianOSC-State-Play-v0 \ + --num_envs 4 --dataset_dir ./Datasets/OmniReset \ + --reset_type ObjectRestingEEGrasped \ + env.scene.insertive_object=rectangle env.scene.receptive_object=wall + + .. tab-item:: Grasped + + .. code:: bash + + python scripts_v2/tools/visualize_reset_states.py \ + --task OmniReset-Ur5eRobotiq2f85-RelCartesianOSC-State-Play-v0 \ + --num_envs 4 --dataset_dir ./Datasets/OmniReset \ + --reset_type ObjectAnywhereEEGrasped \ + env.scene.insertive_object=rectangle env.scene.receptive_object=wall + + .. tab-item:: Near Goal + + .. code:: bash + + python scripts_v2/tools/visualize_reset_states.py \ + --task OmniReset-Ur5eRobotiq2f85-RelCartesianOSC-State-Play-v0 \ + --num_envs 4 --dataset_dir ./Datasets/OmniReset \ + --reset_type ObjectPartiallyAssembledEEGrasped \ + env.scene.insertive_object=rectangle env.scene.receptive_object=wall + + **Step 4: Train RL Policy** + + Train with our pre-generated cloud datasets: + + .. code:: bash + + python -m torch.distributed.run \ + --nnodes 1 \ + --nproc_per_node 4 \ + scripts/reinforcement_learning/rsl_rl/train.py \ + --task OmniReset-Ur5eRobotiq2f85-RelCartesianOSC-State-v0 \ + --num_envs 16384 \ + --logger wandb \ + --headless \ + --distributed \ + env.scene.insertive_object=rectangle \ + env.scene.receptive_object=wall + + Or, train with your locally generated datasets from Steps 1-3: + + .. code:: bash + + python -m torch.distributed.run \ + --nnodes 1 \ + --nproc_per_node 4 \ + scripts/reinforcement_learning/rsl_rl/train.py \ + --task OmniReset-Ur5eRobotiq2f85-RelCartesianOSC-State-v0 \ + --num_envs 16384 \ + --logger wandb \ + --headless \ + --distributed \ + env.scene.insertive_object=rectangle \ + env.scene.receptive_object=wall \ + env.events.reset_from_reset_states.params.dataset_dir=./Datasets/OmniReset + + .. warning:: + + This task has the least stable training. Some seeds plateau around 60%; if a run dies, reload from a checkpoint before the crash. You may need to try a few seeds (plot below is seed 0). + + **Training Curves** + + .. list-table:: + :widths: 50 50 + :class: borderless + + * - .. figure:: ../../../source/_static/publications/omnireset/rectangle_success_rate_seeds.jpg + :width: 100% + :alt: Rectangle on wall success rate over steps + + - .. figure:: ../../../source/_static/publications/omnireset/rectangle_success_rate_seeds_walltime.jpg + :width: 100% + :alt: Rectangle on wall success rate over wall clock time + + .. tab-item:: Cube Stacking + + .. note:: + + **Skip directly to Step 4** if you want to train an RL policy with our pre-generated reset state datasets. Only run Steps 1-3 if you want to generate your own. + + **Step 1: Collect Partial Assemblies** (~30 seconds) + + .. code:: bash + + python scripts_v2/tools/record_partial_assemblies.py --task OmniReset-PartialAssemblies-v0 --num_envs 10 --num_trajectories 10 --headless env.scene.insertive_object=cube env.scene.receptive_object=cube + + **Step 2: Sample Grasp Poses** (~1 minute) + + .. code:: bash + + python scripts_v2/tools/record_grasps.py --task OmniReset-Robotiq2f85-GraspSampling-v0 --num_envs 8192 --num_grasps 1000 --headless env.scene.object=cube + + **Step 3: Generate Reset State Datasets** (~1 min to multiple hours depending on the reset and task) + + .. code:: bash + + # Object Anywhere, End-Effector Anywhere (Reaching) + python scripts_v2/tools/record_reset_states.py \ + --task OmniReset-UR5eRobotiq2f85-ObjectAnywhereEEAnywhere-v0 \ + --num_envs 4096 --num_reset_states 10000 --headless \ + env.scene.insertive_object=cube env.scene.receptive_object=cube + + # Object Resting, End-Effector Grasped (Near Object) + python scripts_v2/tools/record_reset_states.py \ + --task OmniReset-UR5eRobotiq2f85-ObjectRestingEEGrasped-v0 \ + --num_envs 4096 --num_reset_states 10000 --headless \ + env.scene.insertive_object=cube env.scene.receptive_object=cube \ + env.events.reset_insertive_object_pose_from_reset_states.params.dataset_dir=./Datasets/OmniReset \ + env.events.reset_end_effector_pose_from_grasp_dataset.params.dataset_dir=./Datasets/OmniReset + + # Object Anywhere, End-Effector Grasped (Grasped) + python scripts_v2/tools/record_reset_states.py \ + --task OmniReset-UR5eRobotiq2f85-ObjectAnywhereEEGrasped-v0 \ + --num_envs 4096 --num_reset_states 10000 --headless \ + env.scene.insertive_object=cube env.scene.receptive_object=cube \ + env.events.reset_end_effector_pose_from_grasp_dataset.params.dataset_dir=./Datasets/OmniReset + + # Object Partially Assembled, End-Effector Grasped (Near Goal) + python scripts_v2/tools/record_reset_states.py \ + --task OmniReset-UR5eRobotiq2f85-ObjectPartiallyAssembledEEGrasped-v0 \ + --num_envs 4096 --num_reset_states 10000 --headless \ + env.scene.insertive_object=cube env.scene.receptive_object=cube \ + env.events.reset_insertive_object_pose_from_partial_assembly_dataset.params.dataset_dir=./Datasets/OmniReset \ + env.events.reset_end_effector_pose_from_grasp_dataset.params.dataset_dir=./Datasets/OmniReset + + **Step 3.5: Visualize Reset States (Optional)** + + Visualize the generated reset states to verify they are correct. By default all four reset distributions are loaded; use the tabs below to visualize one at a time. + + .. tab-set:: + + .. tab-item:: All + + .. code:: bash + + python scripts_v2/tools/visualize_reset_states.py \ + --task OmniReset-Ur5eRobotiq2f85-RelCartesianOSC-State-Play-v0 \ + --num_envs 4 --dataset_dir ./Datasets/OmniReset \ + env.scene.insertive_object=cube env.scene.receptive_object=cube + + .. tab-item:: Reaching + + .. code:: bash + + python scripts_v2/tools/visualize_reset_states.py \ + --task OmniReset-Ur5eRobotiq2f85-RelCartesianOSC-State-Play-v0 \ + --num_envs 4 --dataset_dir ./Datasets/OmniReset \ + --reset_type ObjectAnywhereEEAnywhere \ + env.scene.insertive_object=cube env.scene.receptive_object=cube + + .. tab-item:: Near Object + + .. code:: bash + + python scripts_v2/tools/visualize_reset_states.py \ + --task OmniReset-Ur5eRobotiq2f85-RelCartesianOSC-State-Play-v0 \ + --num_envs 4 --dataset_dir ./Datasets/OmniReset \ + --reset_type ObjectRestingEEGrasped \ + env.scene.insertive_object=cube env.scene.receptive_object=cube + + .. tab-item:: Grasped + + .. code:: bash + + python scripts_v2/tools/visualize_reset_states.py \ + --task OmniReset-Ur5eRobotiq2f85-RelCartesianOSC-State-Play-v0 \ + --num_envs 4 --dataset_dir ./Datasets/OmniReset \ + --reset_type ObjectAnywhereEEGrasped \ + env.scene.insertive_object=cube env.scene.receptive_object=cube + + .. tab-item:: Near Goal + + .. code:: bash + + python scripts_v2/tools/visualize_reset_states.py \ + --task OmniReset-Ur5eRobotiq2f85-RelCartesianOSC-State-Play-v0 \ + --num_envs 4 --dataset_dir ./Datasets/OmniReset \ + --reset_type ObjectPartiallyAssembledEEGrasped \ + env.scene.insertive_object=cube env.scene.receptive_object=cube + + **Step 4: Train RL Policy** + + Train with our pre-generated cloud datasets: + + .. code:: bash + + python -m torch.distributed.run \ + --nnodes 1 \ + --nproc_per_node 4 \ + scripts/reinforcement_learning/rsl_rl/train.py \ + --task OmniReset-Ur5eRobotiq2f85-RelCartesianOSC-State-v0 \ + --num_envs 16384 \ + --logger wandb \ + --headless \ + --distributed \ + env.scene.insertive_object=cube \ + env.scene.receptive_object=cube + + Or, train with your locally generated datasets from Steps 1-3: + + .. code:: bash + + python -m torch.distributed.run \ + --nnodes 1 \ + --nproc_per_node 4 \ + scripts/reinforcement_learning/rsl_rl/train.py \ + --task OmniReset-Ur5eRobotiq2f85-RelCartesianOSC-State-v0 \ + --num_envs 16384 \ + --logger wandb \ + --headless \ + --distributed \ + env.scene.insertive_object=cube \ + env.scene.receptive_object=cube \ + env.events.reset_from_reset_states.params.dataset_dir=./Datasets/OmniReset + + **Training Curves** + + .. list-table:: + :widths: 50 50 + :class: borderless + + * - .. figure:: ../../../source/_static/publications/omnireset/cube_success_rate_seeds.jpg + :width: 100% + :alt: Cube stacking success rate over steps + + - .. figure:: ../../../source/_static/publications/omnireset/cube_success_rate_seeds_walltime.jpg + :width: 100% + :alt: Cube stacking success rate over wall clock time + + .. tab-item:: Cupcake on Plate + + .. note:: + + **Skip directly to Step 4** if you want to train an RL policy with our pre-generated reset state datasets. Only run Steps 1-3 if you want to generate your own. + + **Step 1: Collect Partial Assemblies** (~30 seconds) + + .. code:: bash + + python scripts_v2/tools/record_partial_assemblies.py --task OmniReset-PartialAssemblies-v0 --num_envs 10 --num_trajectories 10 --headless env.scene.insertive_object=cupcake env.scene.receptive_object=plate + + **Step 2: Sample Grasp Poses** (~1 minute) + + .. code:: bash + + python scripts_v2/tools/record_grasps.py --task OmniReset-Robotiq2f85-GraspSampling-v0 --num_envs 8192 --num_grasps 1000 --headless env.scene.object=cupcake + + **Step 3: Generate Reset State Datasets** (~1 min to multiple hours depending on the reset and task) + + .. code:: bash + + # Object Anywhere, End-Effector Anywhere (Reaching) + python scripts_v2/tools/record_reset_states.py \ + --task OmniReset-UR5eRobotiq2f85-ObjectAnywhereEEAnywhere-v0 \ + --num_envs 4096 --num_reset_states 10000 --headless \ + env.scene.insertive_object=cupcake env.scene.receptive_object=plate + + # Object Resting, End-Effector Grasped (Near Object) + python scripts_v2/tools/record_reset_states.py \ + --task OmniReset-UR5eRobotiq2f85-ObjectRestingEEGrasped-v0 \ + --num_envs 4096 --num_reset_states 10000 --headless \ + env.scene.insertive_object=cupcake env.scene.receptive_object=plate \ + env.events.reset_insertive_object_pose_from_reset_states.params.dataset_dir=./Datasets/OmniReset \ + env.events.reset_end_effector_pose_from_grasp_dataset.params.dataset_dir=./Datasets/OmniReset + + # Object Anywhere, End-Effector Grasped (Grasped) + python scripts_v2/tools/record_reset_states.py \ + --task OmniReset-UR5eRobotiq2f85-ObjectAnywhereEEGrasped-v0 \ + --num_envs 4096 --num_reset_states 10000 --headless \ + env.scene.insertive_object=cupcake env.scene.receptive_object=plate \ + env.events.reset_end_effector_pose_from_grasp_dataset.params.dataset_dir=./Datasets/OmniReset + + # Object Partially Assembled, End-Effector Grasped (Near Goal) + python scripts_v2/tools/record_reset_states.py \ + --task OmniReset-UR5eRobotiq2f85-ObjectPartiallyAssembledEEGrasped-v0 \ + --num_envs 4096 --num_reset_states 10000 --headless \ + env.scene.insertive_object=cupcake env.scene.receptive_object=plate \ + env.events.reset_insertive_object_pose_from_partial_assembly_dataset.params.dataset_dir=./Datasets/OmniReset \ + env.events.reset_end_effector_pose_from_grasp_dataset.params.dataset_dir=./Datasets/OmniReset + + **Step 3.5: Visualize Reset States (Optional)** + + Visualize the generated reset states to verify they are correct. By default all four reset distributions are loaded; use the tabs below to visualize one at a time. + + .. tab-set:: + + .. tab-item:: All + + .. code:: bash + + python scripts_v2/tools/visualize_reset_states.py \ + --task OmniReset-Ur5eRobotiq2f85-RelCartesianOSC-State-Play-v0 \ + --num_envs 4 --dataset_dir ./Datasets/OmniReset \ + env.scene.insertive_object=cupcake env.scene.receptive_object=plate + + .. tab-item:: Reaching + + .. code:: bash + + python scripts_v2/tools/visualize_reset_states.py \ + --task OmniReset-Ur5eRobotiq2f85-RelCartesianOSC-State-Play-v0 \ + --num_envs 4 --dataset_dir ./Datasets/OmniReset \ + --reset_type ObjectAnywhereEEAnywhere \ + env.scene.insertive_object=cupcake env.scene.receptive_object=plate + + .. tab-item:: Near Object + + .. code:: bash + + python scripts_v2/tools/visualize_reset_states.py \ + --task OmniReset-Ur5eRobotiq2f85-RelCartesianOSC-State-Play-v0 \ + --num_envs 4 --dataset_dir ./Datasets/OmniReset \ + --reset_type ObjectRestingEEGrasped \ + env.scene.insertive_object=cupcake env.scene.receptive_object=plate + + .. tab-item:: Grasped + + .. code:: bash + + python scripts_v2/tools/visualize_reset_states.py \ + --task OmniReset-Ur5eRobotiq2f85-RelCartesianOSC-State-Play-v0 \ + --num_envs 4 --dataset_dir ./Datasets/OmniReset \ + --reset_type ObjectAnywhereEEGrasped \ + env.scene.insertive_object=cupcake env.scene.receptive_object=plate + + .. tab-item:: Near Goal + + .. code:: bash + + python scripts_v2/tools/visualize_reset_states.py \ + --task OmniReset-Ur5eRobotiq2f85-RelCartesianOSC-State-Play-v0 \ + --num_envs 4 --dataset_dir ./Datasets/OmniReset \ + --reset_type ObjectPartiallyAssembledEEGrasped \ + env.scene.insertive_object=cupcake env.scene.receptive_object=plate + + **Step 4: Train RL Policy** + + Train with our pre-generated cloud datasets: + + .. code:: bash + + python -m torch.distributed.run \ + --nnodes 1 \ + --nproc_per_node 4 \ + scripts/reinforcement_learning/rsl_rl/train.py \ + --task OmniReset-Ur5eRobotiq2f85-RelCartesianOSC-State-v0 \ + --num_envs 16384 \ + --logger wandb \ + --headless \ + --distributed \ + env.scene.insertive_object=cupcake \ + env.scene.receptive_object=plate + + Or, train with your locally generated datasets from Steps 1-3: + + .. code:: bash + + python -m torch.distributed.run \ + --nnodes 1 \ + --nproc_per_node 4 \ + scripts/reinforcement_learning/rsl_rl/train.py \ + --task OmniReset-Ur5eRobotiq2f85-RelCartesianOSC-State-v0 \ + --num_envs 16384 \ + --logger wandb \ + --headless \ + --distributed \ + env.scene.insertive_object=cupcake \ + env.scene.receptive_object=plate \ + env.events.reset_from_reset_states.params.dataset_dir=./Datasets/OmniReset + + **Training Curves** + + .. list-table:: + :widths: 50 50 + :class: borderless + + * - .. figure:: ../../../source/_static/publications/omnireset/cupcake_success_rate_seeds.jpg + :width: 100% + :alt: Cupcake on plate success rate over steps + + - .. figure:: ../../../source/_static/publications/omnireset/cupcake_success_rate_seeds_walltime.jpg + :width: 100% + :alt: Cupcake on plate success rate over wall clock time + +---- + +Next Steps +^^^^^^^^^^ + +With a trained policy, you can: + +- **Go sim-to-real:** Continue to :doc:`sim2real` to finetune with system identification. +- **RGB student-teacher distillation:** See :doc:`distillation` for collecting RGB data with the state-based expert and training an RGB BC policy. diff --git a/docs/source/publications/omnireset/sim2real.rst b/docs/source/publications/omnireset/sim2real.rst new file mode 100644 index 00000000..9f2c7c0b --- /dev/null +++ b/docs/source/publications/omnireset/sim2real.rst @@ -0,0 +1,769 @@ +Sim2Real: SysID & RL Finetuning +================================ + +This guide bridges sim-to-real via system identification and policy finetuning. Finetuning uses a curriculum: sim dynamics shift toward your sys-id'd parameters (with higher OSC gains to compensate for friction, since policies do not train well under high friction from scratch), and action scale is reduced so the policy runs slower and transfers better to the real robot. + +Our system identification follows the `PACE `_ framework by Bjelonic et al. + +.. important:: + + **Prerequisite:** A trained RL policy from :doc:`rl_training`. + +Pipeline overview +----------------- + +1. **Robot setup** — UR5e/UR7e hardware config, robot calibration & USD, FK verification, metadata. Re-run reset state collection and RL training from :doc:`rl_training` (geometry-dependent). Install the diffusion_policy repo for real-robot control and sysid data collection. + +2. **System identification** — Collect chirp on real robot, run CMA-ES in UWLab, verify fit, write sysid params to metadata, teleop to verify. + +3. **Finetune** — Select best Stage-1 checkpoint, finetune with ADR, evaluate. Or use our pre-finetuned checkpoints (next section) if your setup matches ours. + +4. **Camera & hardware setup** — Mount cameras (D415/D435/D455), print task objects, calibrate camera extrinsics. + +5. **Next** — :doc:`distillation` for vision policy training and real-robot deployment. + +---- + +Robot Setup +----------- + +.. _installing-diffusion-policy: + +Installing Diffusion Policy +---------------------------- + +You need this codebase to control the real UR5e/UR7e. Real-robot deployment uses a **separate** conda environment (``robodiff``) so it does not conflict with Isaac Sim / UWLab. On Ubuntu, install RealSense SDK dependencies first if you will use cameras; see the `Diffusion Policy README `_ (use the ``omnireset`` branch). + +Clone the repo as a sibling to UWLab (skip the clone if you already did this for :doc:`distillation`): + +.. code:: text + + / + UWLab/ + diffusion_policy/ + +.. code:: bash + + conda deactivate # exit env_uwlab if active + cd + git clone -b omnireset https://github.com/WEIRDLabUW/diffusion_policy.git + cd diffusion_policy + mamba env create -f conda_environment_real.yaml # or: conda env create -f conda_environment_real.yaml + conda activate robodiff_real + python -m pip install -e . + +UR5e/UR7e Setup +---------------------------- + +The real UR5e/UR7e must be configured for external control (network, security, External Control URCap, Robotiq 2F-85 URCap). For step-by-step instructions, see the diffusion policy repo's `README_ur5e.md `_. + +Robot Calibration & URDF +---------------------------- + +Every UR5e/UR7e differs from the nominal model in link lengths, joint angles, and zero offsets, which compounds to significant end-effector error. To fix this, generate a calibrated URDF from your robot's factory calibration and update the robot USD. + +**1. Extract calibration and generate calibrated URDF** + +Install ROS 2 and set up the UR robot driver following the `NVIDIA Isaac ROS Universal Robots setup guide `_. Run their calibration script to extract your robot's joint offsets into ``ur5e_calibration.yaml``, then generate a calibrated URDF: + +.. code:: bash + + source /opt/ros/rolling/setup.bash + + ros2 run xacro xacro \ + /opt/ros/rolling/share/ur_description/urdf/ur.urdf.xacro \ + ur_type:=ur5e \ + name:=ur5e \ + kinematics_params:=$HOME/ur5e_calibration.yaml \ + > /path/to/ur5e_calibrated.urdf + + sed 's|package://ur_description|/opt/ros/rolling/share/ur_description|g' \ + /path/to/ur5e_calibrated.urdf \ + > /path/to/ur5e_calibrated_absolute.urdf + +**2. Update the robot USD** + +Download the existing calibrated robot USD from +`here `__ +and open it in Isaac Sim. Replace the UR5e/UR7e arm in the USD with the URDF of your newly calibrated UR5e/UR7e. After replacing the arm, relink the joint that attaches the gripper to the arm. This joint connection must be re-established in Isaac Sim for the gripper to remain properly attached. + +**3. Verify alignment** + +Collect (joint_pos, ee_pose) pairs from the simulator using IK-based workspace sampling, then verify that the analytical FK in the real-world codebase reproduces those poses (< 0.01 mm error per dimension): + +.. code:: bash + + # In UWLab (env_uwlab) + conda activate env_uwlab + cd /UWLab + python scripts_v2/tools/sim2real/collect_fk_pairs.py \ + --num_samples 4 --output /tmp/fk_pairs.npz --headless + +.. code:: bash + + # In diffusion_policy (robodiff_real) + conda activate robodiff_real + cd /diffusion_policy + python scripts/sim2real/test_fk_comparison.py --pairs /tmp/fk_pairs.npz + +This verifies that the real-world analytical FK (``ur5e_kinematics.py``) matches the physics engine's body transforms from the calibrated USD. A mismatch indicates a stale USD, wrong calibration constants, or a frame-convention bug. + +**4. Set up robot asset folder** + +Place the calibrated USD and a ``metadata.yaml`` side by side: + +.. code:: + + your_robot/ + ur5e_robotiq_gripper_d415_mount_safety_calibrated.usd + metadata.yaml + +Copy the base ``metadata.yaml`` from `here `__ and update the ``calibrated_joints`` (xyz/rpy) and ``link_inertials`` (masses/coms/inertias) sections with the values from your calibrated URDF. The ``sysid`` block will be filled in after :ref:`system identification ` below. + +**5. Recollect reset states & retrain** + +With the new USD, re-run reset state collection and RL training from :doc:`rl_training`. The reset datasets are geometry-dependent, so they must be regenerated whenever the USD changes. + +.. tip:: + + **Gripper sanity check.** Run a few open/close cycles on both the real and simulated gripper and compare the trajectories. If the real gripper is noticeably faster or slower than sim, tune the force and speed parameters in your real-world gripper config until the profiles roughly match. + +---- + +.. _sysid-section: + +Controller System Identification +--------------------------------- + +System identification calibrates simulation parameters (armature, friction, motor delay) to match your physical robot's dynamics. + +**1. Collect real-world data** + +Run a chirp (frequency-sweep) trajectory on the real UR5e/UR7e under the calibrated OSC controller and record joint positions and target poses at 500 Hz: + +.. code:: bash + + conda activate robodiff_real + cd /diffusion_policy + python scripts/sim2real/collect_sysid_data.py \ + --robot_ip 192.168.1.10 \ + --output /tmp/sysid_data_real.pt \ + --duration 8 --f0 0.1 --f1 3.0 + +**2. Run system identification** + +Use CMA-ES to optimize simulator dynamics parameters (armature, friction, motor delay) so the simulated trajectory matches the real one: + +.. code:: bash + + conda activate env_uwlab + cd /UWLab + python scripts_v2/tools/sim2real/sysid_ur5e_osc.py --headless \ + --num_envs 512 \ + --real_data /tmp/sysid_data_real.pt \ + --max_iter 200 + +**3. Verify the fit** + +Plot simulated vs. real joint trajectories using the best checkpoint: + +.. code:: bash + + python scripts_v2/tools/sim2real/plot_sysid_fit.py --headless \ + --checkpoint logs/sysid//checkpoint_0200.pt \ + --real_data /tmp/sysid_data_real.pt + +Inspect the overlay plots. A good fit should show close tracking across all joints (less than 2° RMSE per joint): + +.. image:: ../../_static/publications/omnireset/sysid_fit.jpg + :width: 100% + :alt: Sysid fit: sim vs real joint trajectories after CMA-ES optimization + +**4. Save parameters** + +Replace the ``sysid`` block in ``metadata.yaml`` (next to your robot USD) with the identified values for ``armature``, ``static_friction``, ``dynamic_ratio``, and ``viscous_friction``. These are loaded automatically during finetuning and evaluation. See the current calibrated robot's `metadata.yaml `_ for reference. + +**5. Teleop to verify motion** + +Teleop the real robot and confirm it moves sensibly (no stalling or sluggish tracking). If it stalls, increase OSC gains. For Mello setup (calibrate, stream, test connection), see the diffusion policy repo's `README_ur5e.md `_. From the diffusion_policy repo root: + +.. code:: bash + + conda activate robodiff_real + cd /diffusion_policy + python demo_real_robot.py -o --robot_ip + +To tune gains if the arm lags or stalls, add ``--osc_kp_pos`` and ``--osc_kp_rot``. + +---- + +Select Best Checkpoint & Finetune with ADR +-------------------------------------------- + +Either run the pipeline below or use our pre-finetuned checkpoints (next section) if your setup matches ours. Some policies transfer better than others. As an offline proxy, evaluate candidate checkpoints under action noise and pick the one with the highest success rate, then finetune it with `ADR (Automatic Domain Randomization) `__. Finetuning uses the identified sysid parameters as the center of a randomization range that ADR automatically expands, producing a policy robust to real-world variation. + +ADR shifts the training distribution from zero friction, armature, and motor delay toward a randomization band around the sys-id'd values. OSC gains increase to compensate for higher friction. Action scale is reduced over the curriculum to slow the policy down for safer real-world transfer. + +Minimum env counts for stable finetuning: Peg 4096 (1 GPU), Leg 16384 per GPU (4 GPUs, 65536 total), Drawer 8192 (1 GPU). Download Stage 1 checkpoints from :doc:`index` and pass them via ``--resume_path``. + +All commands below run in the ``env_uwlab`` environment from the UWLab directory: + +.. code:: bash + + conda activate env_uwlab + cd /UWLab + +.. tab-set:: + + .. tab-item:: Peg Insertion + + **Select best checkpoint** + + .. code:: bash + + python scripts_v2/tools/sim2real/eval_robustness.py \ + --task OmniReset-Ur5eRobotiq2f85-RelCartesianOSC-State-Play-v0 \ + --checkpoints ckpt_seed1.pt ckpt_seed2.pt ckpt_seed3.pt \ + --action_noise 2.0 \ + --eval_steps 1000 \ + --num_envs 4096 \ + --headless \ + env.scene.insertive_object=peg \ + env.scene.receptive_object=peghole + + **Train** (4096 envs, 1 GPU) + + .. code:: bash + + python scripts/reinforcement_learning/rsl_rl/train.py \ + --task OmniReset-Ur5eRobotiq2f85-RelCartesianOSC-State-Finetune-v0 \ + --num_envs 4096 \ + --logger wandb \ + --headless \ + --resume_path \ + env.scene.insertive_object=peg \ + env.scene.receptive_object=peghole + + **Evaluate** + + .. code:: bash + + python scripts/reinforcement_learning/rsl_rl/play.py \ + --task OmniReset-Ur5eRobotiq2f85-RelCartesianOSC-State-Finetune-Play-v0 \ + --num_envs 1 \ + --checkpoint \ + env.scene.insertive_object=peg \ + env.scene.receptive_object=peghole + + **Finetuning Curves** + + .. list-table:: + :widths: 50 50 + :class: borderless + + * - .. figure:: ../../../source/_static/publications/omnireset/finetune_peg_curriculum_seeds.jpg + :width: 100% + :alt: Peg finetune curriculum over updates + + - .. figure:: ../../../source/_static/publications/omnireset/finetune_peg_curriculum_seeds_walltime.jpg + :width: 100% + :alt: Peg finetune curriculum over wall clock time + + .. tab-item:: Leg Twisting + + **Select best checkpoint** + + .. code:: bash + + python scripts_v2/tools/sim2real/eval_robustness.py \ + --task OmniReset-Ur5eRobotiq2f85-RelCartesianOSC-State-Play-v0 \ + --checkpoints ckpt_seed1.pt ckpt_seed2.pt ckpt_seed3.pt \ + --action_noise 2.0 \ + --eval_steps 1000 \ + --num_envs 4096 \ + --headless \ + env.scene.insertive_object=fbleg \ + env.scene.receptive_object=fbtabletop + + **Train** (16384 envs per GPU × 4 GPUs) + + .. code:: bash + + python -m torch.distributed.run \ + --nnodes 1 \ + --nproc_per_node 4 \ + scripts/reinforcement_learning/rsl_rl/train.py \ + --task OmniReset-Ur5eRobotiq2f85-RelCartesianOSC-State-Finetune-v0 \ + --num_envs 16384 \ + --logger wandb \ + --headless \ + --distributed \ + --resume_path \ + env.scene.insertive_object=fbleg \ + env.scene.receptive_object=fbtabletop + + **Evaluate** + + .. code:: bash + + python scripts/reinforcement_learning/rsl_rl/play.py \ + --task OmniReset-Ur5eRobotiq2f85-RelCartesianOSC-State-Finetune-Play-v0 \ + --num_envs 1 \ + --checkpoint \ + env.scene.insertive_object=fbleg \ + env.scene.receptive_object=fbtabletop + + **Finetuning Curves** + + .. list-table:: + :widths: 50 50 + :class: borderless + + * - .. figure:: ../../../source/_static/publications/omnireset/finetune_leg_curriculum_seeds.jpg + :width: 100% + :alt: Leg finetune curriculum over updates + + - .. figure:: ../../../source/_static/publications/omnireset/finetune_leg_curriculum_seeds_walltime.jpg + :width: 100% + :alt: Leg finetune curriculum over wall clock time + + .. tab-item:: Drawer Assembly + + **Select best checkpoint** + + .. code:: bash + + python scripts_v2/tools/sim2real/eval_robustness.py \ + --task OmniReset-Ur5eRobotiq2f85-RelCartesianOSC-State-Play-v0 \ + --checkpoints ckpt_seed1.pt ckpt_seed2.pt ckpt_seed3.pt \ + --action_noise 2.0 \ + --eval_steps 1000 \ + --num_envs 4096 \ + --headless \ + env.scene.insertive_object=fbdrawerbottom \ + env.scene.receptive_object=fbdrawerbox + + **Train** (8192 envs, 1 GPU) + + .. code:: bash + + python scripts/reinforcement_learning/rsl_rl/train.py \ + --task OmniReset-Ur5eRobotiq2f85-RelCartesianOSC-State-Finetune-v0 \ + --num_envs 8192 \ + --logger wandb \ + --headless \ + --resume_path \ + env.scene.insertive_object=fbdrawerbottom \ + env.scene.receptive_object=fbdrawerbox + + **Evaluate** + + .. code:: bash + + python scripts/reinforcement_learning/rsl_rl/play.py \ + --task OmniReset-Ur5eRobotiq2f85-RelCartesianOSC-State-Finetune-Play-v0 \ + --num_envs 1 \ + --checkpoint \ + env.scene.insertive_object=fbdrawerbottom \ + env.scene.receptive_object=fbdrawerbox + + **Finetuning Curves** + + .. list-table:: + :widths: 50 50 + :class: borderless + + * - .. figure:: ../../../source/_static/publications/omnireset/finetune_drawer_curriculum_seeds.jpg + :width: 100% + :alt: Drawer finetune curriculum over updates + + - .. figure:: ../../../source/_static/publications/omnireset/finetune_drawer_curriculum_seeds_walltime.jpg + :width: 100% + :alt: Drawer finetune curriculum over wall clock time + +---- + +.. _use-finetuned-checkpoints: + +Use our finetuned checkpoints +----------------------------- + +Pre-finetuned for our robot calibration and sys-id'd parameters. If your setup is similar, you can download and run these instead of finetuning yourself. + +All commands below run in ``env_uwlab`` from the UWLab directory. + +.. tab-set:: + + .. tab-item:: Peg Insertion + + .. tab-set:: + + .. tab-item:: Seed 42 + + .. code:: bash + + wget https://huggingface.co/datasets/UW-Lab/uwlab-assets/resolve/main/Policies/OmniReset/state_based_experts_finetuned/peg_state_rl_expert_finetuned_seed42.pt + + python scripts/reinforcement_learning/rsl_rl/play.py \ + --task OmniReset-Ur5eRobotiq2f85-RelCartesianOSC-State-Finetune-Play-v0 \ + --num_envs 1 \ + --checkpoint peg_state_rl_expert_finetuned_seed42.pt \ + env.scene.insertive_object=peg \ + env.scene.receptive_object=peghole + + .. tab-item:: Seed 0 + + .. code:: bash + + wget https://huggingface.co/datasets/UW-Lab/uwlab-assets/resolve/main/Policies/OmniReset/state_based_experts_finetuned/peg_state_rl_expert_finetuned_seed0.pt + + python scripts/reinforcement_learning/rsl_rl/play.py \ + --task OmniReset-Ur5eRobotiq2f85-RelCartesianOSC-State-Finetune-Play-v0 \ + --num_envs 1 \ + --checkpoint peg_state_rl_expert_finetuned_seed0.pt \ + env.scene.insertive_object=peg \ + env.scene.receptive_object=peghole + + .. tab-item:: Seed 1 + + .. code:: bash + + wget https://huggingface.co/datasets/UW-Lab/uwlab-assets/resolve/main/Policies/OmniReset/state_based_experts_finetuned/peg_state_rl_expert_finetuned_seed1.pt + + python scripts/reinforcement_learning/rsl_rl/play.py \ + --task OmniReset-Ur5eRobotiq2f85-RelCartesianOSC-State-Finetune-Play-v0 \ + --num_envs 1 \ + --checkpoint peg_state_rl_expert_finetuned_seed1.pt \ + env.scene.insertive_object=peg \ + env.scene.receptive_object=peghole + + .. tab-item:: Leg Twisting + + .. tab-set:: + + .. tab-item:: Seed 42 + + .. code:: bash + + wget https://huggingface.co/datasets/UW-Lab/uwlab-assets/resolve/main/Policies/OmniReset/state_based_experts_finetuned/leg_state_rl_expert_finetuned_seed42.pt + + python scripts/reinforcement_learning/rsl_rl/play.py \ + --task OmniReset-Ur5eRobotiq2f85-RelCartesianOSC-State-Finetune-Play-v0 \ + --num_envs 1 \ + --checkpoint leg_state_rl_expert_finetuned_seed42.pt \ + env.scene.insertive_object=fbleg \ + env.scene.receptive_object=fbtabletop + + .. tab-item:: Seed 0 + + .. code:: bash + + wget https://huggingface.co/datasets/UW-Lab/uwlab-assets/resolve/main/Policies/OmniReset/state_based_experts_finetuned/leg_state_rl_expert_finetuned_seed0.pt + + python scripts/reinforcement_learning/rsl_rl/play.py \ + --task OmniReset-Ur5eRobotiq2f85-RelCartesianOSC-State-Finetune-Play-v0 \ + --num_envs 1 \ + --checkpoint leg_state_rl_expert_finetuned_seed0.pt \ + env.scene.insertive_object=fbleg \ + env.scene.receptive_object=fbtabletop + + .. tab-item:: Seed 1 + + .. code:: bash + + wget https://huggingface.co/datasets/UW-Lab/uwlab-assets/resolve/main/Policies/OmniReset/state_based_experts_finetuned/leg_state_rl_expert_finetuned_seed1.pt + + python scripts/reinforcement_learning/rsl_rl/play.py \ + --task OmniReset-Ur5eRobotiq2f85-RelCartesianOSC-State-Finetune-Play-v0 \ + --num_envs 1 \ + --checkpoint leg_state_rl_expert_finetuned_seed1.pt \ + env.scene.insertive_object=fbleg \ + env.scene.receptive_object=fbtabletop + + .. tab-item:: Drawer Assembly + + .. tab-set:: + + .. tab-item:: Seed 42 + + .. code:: bash + + wget https://huggingface.co/datasets/UW-Lab/uwlab-assets/resolve/main/Policies/OmniReset/state_based_experts_finetuned/drawer_state_rl_expert_finetuned_seed42.pt + + python scripts/reinforcement_learning/rsl_rl/play.py \ + --task OmniReset-Ur5eRobotiq2f85-RelCartesianOSC-State-Finetune-Play-v0 \ + --num_envs 1 \ + --checkpoint drawer_state_rl_expert_finetuned_seed42.pt \ + env.scene.insertive_object=fbdrawerbottom \ + env.scene.receptive_object=fbdrawerbox + + .. tab-item:: Seed 0 + + .. code:: bash + + wget https://huggingface.co/datasets/UW-Lab/uwlab-assets/resolve/main/Policies/OmniReset/state_based_experts_finetuned/drawer_state_rl_expert_finetuned_seed0.pt + + python scripts/reinforcement_learning/rsl_rl/play.py \ + --task OmniReset-Ur5eRobotiq2f85-RelCartesianOSC-State-Finetune-Play-v0 \ + --num_envs 1 \ + --checkpoint drawer_state_rl_expert_finetuned_seed0.pt \ + env.scene.insertive_object=fbdrawerbottom \ + env.scene.receptive_object=fbdrawerbox + + .. tab-item:: Seed 1 + + .. code:: bash + + wget https://huggingface.co/datasets/UW-Lab/uwlab-assets/resolve/main/Policies/OmniReset/state_based_experts_finetuned/drawer_state_rl_expert_finetuned_seed1.pt + + python scripts/reinforcement_learning/rsl_rl/play.py \ + --task OmniReset-Ur5eRobotiq2f85-RelCartesianOSC-State-Finetune-Play-v0 \ + --num_envs 1 \ + --checkpoint drawer_state_rl_expert_finetuned_seed1.pt \ + env.scene.insertive_object=fbdrawerbottom \ + env.scene.receptive_object=fbdrawerbox + +---- + +.. _camera-hardware-setup: + +Camera & Hardware Setup +----------------------- + +We use a **three-camera setup** with Intel RealSense depth cameras: + +* **Wrist camera** — D415 mounted on the Robotiq 2F-85 gripper via a 3D-printed bracket. +* **Two third-person cameras** — D435 and D455 on tripods, providing front and side views. + +Any combination of D415 / D435 / D455 works for any of the three viewpoints (the D455 has a wider baseline and higher depth quality, so prefer it when available). + +.. figure:: ../../../source/_static/publications/omnireset/camera_setup.jpg + :width: 80% + :alt: Three-camera setup with UR5e/UR7e, Robotiq gripper, and Intel RealSense cameras + + Example three-camera setup: D415 on wrist, D455 front, D435 side. + +**3D-printed assets** + +Download the STL files below and print them on any 3D printer. Higher infill (e.g. 80 %) helps the parts last longer under repeated contact; lower infill works but you may need to reprint more often. + +.. list-table:: + :header-rows: 1 + :widths: 40 60 + + * - Part + - Download + * - D415 wrist-camera mount (Robotiq 2F-85) + - `2f85_d415_mount.stl `__ + * - Peg insertion task + - `peg.stl `__, `peghole.stl `__ + * - Leg twisting task (based on `FurnitureBench `__) + - `fbleg.stl `__, `fbtabletop.stl `__ + * - Drawer assembly task (based on `FurnitureBench `__) + - `drawer_bottom.stl `__, `drawer_box.stl `__ + +---- + +.. _camera-calibration-section: + +Calibrate Cameras +----------------- + +Virtual cameras in simulation must match your real camera poses and intrinsics so the distilled policy transfers to real RGB observations. + +**Prerequisites** + +1. The calibration workflow switches between two environments: ``robodiff_real`` for real-robot scripts (Step 1) and ``env_uwlab`` for UWLab simulation scripts (Step 2). Set up ``robodiff_real`` in :ref:`Installing Diffusion Policy ` above. + +2. Print an ArUco marker — the calibration scripts use dictionary **6x6_50**, marker **ID 12**, printed at **150 mm**. Download the printable PDF: :download:`marker_6x6_150mm_id12.pdf <../../_static/publications/omnireset/marker_6x6_150mm_id12.pdf>`. + +3. Place the printed marker flat on the table near the robot base (see the :ref:`camera setup photo ` above for an example placement). Measure the offset (in meters) from the marker center to the robot base-frame origin and update ``aruco_offset`` in ``0_camera_calibrate.py``. If you place the marker in the same position as our setup photo, the default ``[0.24, 0.0, 0.0]`` should work. + +4. Record the robot's joint angles (in degrees) at the pose used for the reference image. You can read them from the UR teach pendant or from ``1_camera_get_rgb.py`` output. Pass them to ``align_cameras.py`` via ``--joint_angles`` so the simulated robot matches the real pose. + +.. tip:: + + **Wrist camera calibration:** Put the UR5e/UR7e into zero-gravity (freedrive) mode and manually position the arm so the wrist camera has a clear view of the ArUco marker. + +**Calibration workflow** + +Calibrate one camera at a time. For each camera: (a) run ArUco calibration and capture a reference RGB on the real robot, (b) convert extrinsics, (c) interactively align the sim camera to the real image, then (d) paste the result into the config. **Unplug all other cameras** while calibrating one. + +Real-world scripts live in the `diffusion_policy `_ repo (``omnireset`` branch) under ``scripts/sim2real/``. The interactive alignment tool lives in UWLab. + +.. tab-set:: + + .. tab-item:: Front Camera + + **Step 1 — Calibrate & capture (diffusion_policy, robodiff_real)** + + .. code:: bash + + conda activate robodiff_real + cd /diffusion_policy + python scripts/sim2real/0_camera_calibrate.py + python scripts/sim2real/1_camera_get_rgb.py + python scripts/sim2real/2_get_isaacsim_extrinsics.py + + Copy the ``pos``, ``rot``, and ``focal_length`` printed by ``2_get_isaacsim_extrinsics.py`` into the corresponding ``front_camera`` entry in ``camera_align_cfg.py`` as the initial guess for interactive alignment. + + **Step 2 — Interactive alignment (UWLab, env_uwlab)** + + .. code:: bash + + conda activate env_uwlab + cd /UWLab + python scripts_v2/tools/sim2real/align_cameras.py \ + --enable_cameras \ + --headless \ + --camera front_camera \ + --real_image /path/to/real_front.png \ + --joint_angles + + Replace ```` … ```` with the real robot's joint angles in degrees at the pose used when capturing the reference image. + + Press ``p`` to print the calibrated ``pos``, ``rot``, and ``focal_length``. + + .. figure:: ../../_static/publications/omnireset/example_blend_front_camera.png + :width: 80% + :align: center + :alt: Example blend after aligning the front camera + + Sample aligned sim-to-real blend (50% opacity). + + .. tab-item:: Side Camera + + **Step 1 — Calibrate & capture (diffusion_policy, robodiff_real)** + + .. code:: bash + + conda activate robodiff_real + cd /diffusion_policy + python scripts/sim2real/0_camera_calibrate.py + python scripts/sim2real/1_camera_get_rgb.py + python scripts/sim2real/2_get_isaacsim_extrinsics.py + + Copy the ``pos``, ``rot``, and ``focal_length`` printed by ``2_get_isaacsim_extrinsics.py`` into the corresponding ``side_camera`` entry in ``camera_align_cfg.py`` as the initial guess for interactive alignment. + + **Step 2 — Interactive alignment (UWLab, env_uwlab)** + + .. code:: bash + + conda activate env_uwlab + cd /UWLab + python scripts_v2/tools/sim2real/align_cameras.py \ + --enable_cameras \ + --headless \ + --camera side_camera \ + --real_image /path/to/real_side.png \ + --joint_angles + + Replace ```` … ```` with the real robot's joint angles in degrees. + + Press ``p`` to print the calibrated ``pos``, ``rot``, and ``focal_length``. + + .. figure:: ../../_static/publications/omnireset/example_blend_side_camera.png + :width: 80% + :align: center + :alt: Example blend after aligning the side camera + + Sample aligned sim-to-real blend (50% opacity). + + .. tab-item:: Wrist Camera + + **Step 1 — Calibrate & capture (diffusion_policy, robodiff_real)** + + .. code:: bash + + conda activate robodiff_real + cd /diffusion_policy + python scripts/sim2real/0_camera_calibrate.py + python scripts/sim2real/1_camera_get_rgb.py + python scripts/sim2real/2_get_isaacsim_extrinsics.py + + Copy the ``pos``, ``rot``, and ``focal_length`` printed by ``2_get_isaacsim_extrinsics.py`` into the corresponding ``wrist_camera`` entry in ``camera_align_cfg.py`` as the initial guess for interactive alignment. + + **Step 2 — Interactive alignment (UWLab, env_uwlab)** + + .. code:: bash + + conda activate env_uwlab + cd /UWLab + python scripts_v2/tools/sim2real/align_cameras.py \ + --enable_cameras \ + --headless \ + --camera wrist_camera \ + --real_image /path/to/real_wrist.png \ + --joint_angles + + Replace ```` … ```` with the real robot's joint angles in degrees. + + Press ``p`` to print the calibrated ``pos``, ``rot``, and ``focal_length``. + + .. figure:: ../../_static/publications/omnireset/example_blend_wrist_camera.png + :width: 80% + :align: center + :alt: Example blend after aligning the wrist camera + + Sample aligned sim-to-real blend (50% opacity). + +**Update config** + +After aligning each camera, paste the resulting ``pos``, ``rot``, and ``focal_length`` into ``data_collection_rgb_cfg.py``: + +.. code:: text + + source/uwlab_tasks/.../omnireset/config/ur5e_robotiq_2f85/data_collection_rgb_cfg.py + +Update the ``TiledCameraCfg`` entries (``front_camera``, ``side_camera``, ``wrist_camera``) with the calibrated values. Also update the corresponding ``base_position`` and ``base_rotation`` in the randomization events (``randomize_front_camera``, ``randomize_side_camera``, ``randomize_wrist_camera``) to match. + +With calibrated cameras, proceed to :doc:`distillation` to collect RGB demos, train a vision policy, evaluate in sim, and deploy on the real robot. + +---- + +Citations +--------- + +If you use the system identification pipeline, please cite PACE. If you use the ADR finetuning, please also cite OpenAI's ADR work. The leg-twisting and drawer-assembly tasks are based on FurnitureBench. For real-robot deployment and the diffusion_policy codebase, cite Diffusion Policy: + +.. code:: bibtex + + @misc{chi2024diffusionpolicyvisuomotorpolicy, + title={Diffusion Policy: Visuomotor Policy Learning via Action Diffusion}, + author={Cheng Chi and Zhenjia Xu and Siyuan Feng and Eric Cousineau and Yilun Du and Benjamin Burchfiel and Russ Tedrake and Shuran Song}, + year={2024}, + eprint={2303.04137}, + archivePrefix={arXiv}, + primaryClass={cs.RO}, + url={https://arxiv.org/abs/2303.04137}, + } + + @article{bjelonic2025towards, + title = {Towards Bridging the Gap: Systematic Sim-to-Real Transfer for Diverse Legged Robots}, + author = {Bjelonic, Filip and Tischhauser, Fabian and Hutter, Marco}, + journal = {arXiv preprint arXiv:2509.06342}, + year = {2025}, + eprint = {2509.06342}, + archivePrefix = {arXiv}, + primaryClass = {cs.RO}, + } + + @misc{heo2023furniturebenchreproduciblerealworldbenchmark, + title={FurnitureBench: Reproducible Real-World Benchmark for Long-Horizon Complex Manipulation}, + author={Minho Heo and Youngwoon Lee and Doohyun Lee and Joseph J. Lim}, + year={2023}, + eprint={2305.12821}, + archivePrefix={arXiv}, + primaryClass={cs.RO}, + url={https://arxiv.org/abs/2305.12821}, + } + + @article{akkaya2019solving, + title = {Solving Rubik's Cube with a Robot Hand}, + author = {Akkaya, Ilge and Andrychowicz, Marcin and Chociej, Maciek and Litwin, Mateusz and McGrew, Bob and Petron, Arthur and Paino, Alex and Plappert, Matthias and Powell, Glenn and Ribas, Raphael and others}, + journal = {arXiv preprint arXiv:1910.07113}, + year = {2019}, + eprint = {1910.07113}, + archivePrefix = {arXiv}, + primaryClass = {cs.LG}, + } diff --git a/environment.yml b/environment.yml index 5505274e..e4cd1de8 100644 --- a/environment.yml +++ b/environment.yml @@ -9,3 +9,5 @@ channels: dependencies: - python=3.11 - importlib_metadata + - pip: + - huggingface_hub diff --git a/scripts/reinforcement_learning/rsl_rl/cli_args.py b/scripts/reinforcement_learning/rsl_rl/cli_args.py index 2e3a546e..215003b5 100644 --- a/scripts/reinforcement_learning/rsl_rl/cli_args.py +++ b/scripts/reinforcement_learning/rsl_rl/cli_args.py @@ -6,6 +6,8 @@ from __future__ import annotations import argparse +import importlib.metadata as metadata +import inspect import random from typing import TYPE_CHECKING @@ -89,3 +91,31 @@ def update_rsl_rl_cfg(agent_cfg: RslRlBaseRunnerCfg, args_cli: argparse.Namespac agent_cfg.neptune_project = args_cli.log_project_name return agent_cfg + + +def sanitize_rsl_rl_cfg(agent_cfg: RslRlBaseRunnerCfg) -> RslRlBaseRunnerCfg: + """Make agent_cfg compatible with the installed rsl-rl version. + + Calls IsaacLab's deprecation handler, then drops any algorithm-config keys + that the installed algorithm class does not accept (e.g. ``optimizer`` and + ``share_cnn_encoders`` were added for rsl-rl >= 4.0 but are absent in 3.x). + """ + from isaaclab_rl.rsl_rl import handle_deprecated_rsl_rl_cfg + + installed_version = metadata.version("rsl-rl-lib") + agent_cfg = handle_deprecated_rsl_rl_cfg(agent_cfg, installed_version) + + # Resolve the actual algorithm class and drop unknown keys + alg_cfg = agent_cfg.algorithm + class_name = getattr(alg_cfg, "class_name", None) + if class_name is not None: + from rsl_rl import algorithms + + alg_class = getattr(algorithms, class_name, None) + if alg_class is not None: + accepted = set(inspect.signature(alg_class.__init__).parameters.keys()) + for key in list(vars(alg_cfg)): + if key != "class_name" and key not in accepted: + delattr(alg_cfg, key) + + return agent_cfg diff --git a/scripts/reinforcement_learning/rsl_rl/play.py b/scripts/reinforcement_learning/rsl_rl/play.py index 19b56892..611e5047 100644 --- a/scripts/reinforcement_learning/rsl_rl/play.py +++ b/scripts/reinforcement_learning/rsl_rl/play.py @@ -70,8 +70,9 @@ from isaaclab.utils.assets import retrieve_file_path from isaaclab.utils.dict import print_dict -from isaaclab_rl.rsl_rl import RslRlBaseRunnerCfg, RslRlVecEnvWrapper, export_policy_as_jit, export_policy_as_onnx +from isaaclab_rl.rsl_rl import RslRlBaseRunnerCfg, RslRlVecEnvWrapper from isaaclab_rl.utils.pretrained_checkpoint import get_published_pretrained_checkpoint +from uwlab_rl.rsl_rl.exporter import export_policy_as_jit, export_policy_as_onnx import isaaclab_tasks # noqa: F401 import uwlab_tasks # noqa: F401 @@ -92,6 +93,9 @@ def main(env_cfg: ManagerBasedRLEnvCfg | DirectRLEnvCfg | DirectMARLEnvCfg, agen agent_cfg: RslRlBaseRunnerCfg = cli_args.update_rsl_rl_cfg(agent_cfg, args_cli) env_cfg.scene.num_envs = args_cli.num_envs if args_cli.num_envs is not None else env_cfg.scene.num_envs + # make config compatible with installed rsl-rl version + agent_cfg = cli_args.sanitize_rsl_rl_cfg(agent_cfg) + # set the environment seed # note: certain randomizations occur in the environment initialization so we set the seed here env_cfg.seed = agent_cfg.seed diff --git a/scripts/reinforcement_learning/rsl_rl/train.py b/scripts/reinforcement_learning/rsl_rl/train.py index f0c2eb0a..bee6d5dc 100644 --- a/scripts/reinforcement_learning/rsl_rl/train.py +++ b/scripts/reinforcement_learning/rsl_rl/train.py @@ -31,6 +31,10 @@ "--distributed", action="store_true", default=False, help="Run training with multiple GPUs or nodes." ) parser.add_argument("--export_io_descriptors", action="store_true", default=False, help="Export IO descriptors.") +parser.add_argument( + "--resume_path", type=str, default=None, + help="Direct path to a checkpoint file to resume from (bypasses log directory search).", +) parser.add_argument( "--ray-proc-id", "-rid", type=int, default=None, help="Automatically configured by Ray integration, otherwise None." ) @@ -97,6 +101,7 @@ import isaaclab_tasks # noqa: F401 import uwlab_tasks # noqa: F401 +from isaaclab.utils.assets import retrieve_file_path from isaaclab_tasks.utils import get_checkpoint_path from uwlab_tasks.utils.hydra import hydra_task_config @@ -121,6 +126,9 @@ def main(env_cfg: ManagerBasedRLEnvCfg | DirectRLEnvCfg | DirectMARLEnvCfg, agen args_cli.max_iterations if args_cli.max_iterations is not None else agent_cfg.max_iterations ) + # make config compatible with installed rsl-rl version + agent_cfg = cli_args.sanitize_rsl_rl_cfg(agent_cfg) + # set the environment seed # note: certain randomizations occur in the environment initialization so we set the seed here env_cfg.seed = agent_cfg.seed @@ -173,7 +181,10 @@ def main(env_cfg: ManagerBasedRLEnvCfg | DirectRLEnvCfg | DirectMARLEnvCfg, agen env = multi_agent_to_single_agent(env) # save resume path before creating a new log_dir - if agent_cfg.resume or agent_cfg.algorithm.class_name == "Distillation": + if args_cli.resume_path is not None: + resume_path = retrieve_file_path(args_cli.resume_path) + agent_cfg.resume = True + elif agent_cfg.resume or agent_cfg.algorithm.class_name == "Distillation": resume_path = get_checkpoint_path(log_root_path, agent_cfg.load_run, agent_cfg.load_checkpoint) # wrap for video recording diff --git a/scripts_v2/tools/collect_demos.py b/scripts_v2/tools/collect_demos.py new file mode 100644 index 00000000..d1de6599 --- /dev/null +++ b/scripts_v2/tools/collect_demos.py @@ -0,0 +1,209 @@ +# Copyright (c) 2024-2026, The UW Lab Project Developers. (https://github.com/uw-lab/UWLab/blob/main/CONTRIBUTORS.md). +# All Rights Reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +# Copyright (c) 2024-2025, The UW Lab Project Developers. +# All Rights Reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Script to collect demonstrations from a trained RL policy.""" + +"""Launch Isaac Sim Simulator first.""" + +import argparse +import contextlib +import gymnasium as gym +import os +import torch +from tqdm import tqdm + +from isaaclab.app import AppLauncher + +# add argparse arguments +parser = argparse.ArgumentParser(description="Collect demonstrations from trained RL policy.") +parser.add_argument("--num_envs", type=int, default=None, help="Number of environments to simulate.") +parser.add_argument("--task", type=str, default=None, help="Name of the task.") +parser.add_argument("--dataset_file", type=str, default="./datasets/dataset.zarr", help="Output dataset path.") +parser.add_argument("--num_demos", type=int, default=10, help="Number of demonstrations to record.") +parser.add_argument( + "--deterministic", + action="store_true", + default=False, + help="Use the mean of the policy distribution instead of sampling.", +) + +# append AppLauncher cli args +AppLauncher.add_app_launcher_args(parser) +args_cli, remaining_args = parser.parse_known_args() + +# launch omniverse app +app_launcher = AppLauncher(args_cli) +simulation_app = app_launcher.app + +"""Rest everything follows.""" + +import isaaclab_tasks # noqa: F401 +from isaaclab.envs import DirectRLEnvCfg, ManagerBasedRLEnvCfg +from isaaclab.managers.recorder_manager import DatasetExportMode + +# Import dataset handlers +from isaaclab.utils.datasets import HDF5DatasetFileHandler +from isaaclab_rl.rsl_rl import RslRlOnPolicyRunnerCfg, RslRlVecEnvWrapper + +from uwlab.utils.datasets import ZarrDatasetFileHandler + +import uwlab_tasks # noqa: F401 +from uwlab_tasks.manager_based.manipulation.omnireset.mdp.recorders.recorders_cfg import ActionStateRecorderManagerCfg +from uwlab_tasks.utils.hydra import hydra_task_compose + +torch.backends.cuda.matmul.allow_tf32 = True +torch.backends.cudnn.allow_tf32 = True +torch.backends.cudnn.deterministic = False +torch.backends.cudnn.benchmark = False + + +def process_agent_cfg(env_cfg, agent_cfg): + if hasattr(agent_cfg.algorithm, "behavior_cloning_cfg"): + if agent_cfg.algorithm.behavior_cloning_cfg is None: + del agent_cfg.algorithm.behavior_cloning_cfg + else: + bc_cfg = agent_cfg.algorithm.behavior_cloning_cfg + if bc_cfg.experts_observation_group_cfg is not None: + import importlib + + # resolve path to the module location + mod_name, attr_name = bc_cfg.experts_observation_group_cfg.split(":") + mod = importlib.import_module(mod_name) + cfg_cls = mod + for attr in attr_name.split("."): + cfg_cls = getattr(cfg_cls, attr) + cfg = cfg_cls() + setattr(env_cfg.observations, "expert_obs", cfg) + + if hasattr(agent_cfg.algorithm, "offline_algorithm_cfg"): + if agent_cfg.algorithm.offline_algorithm_cfg is None: + del agent_cfg.algorithm.offline_algorithm_cfg + else: + if agent_cfg.algorithm.offline_algorithm_cfg.behavior_cloning_cfg is None: + del agent_cfg.algorithm.offline_algorithm_cfg.behavior_cloning_cfg + else: + bc_cfg = agent_cfg.algorithm.offline_algorithm_cfg.behavior_cloning_cfg + if bc_cfg.experts_observation_group_cfg is not None: + import importlib + + # resolve path to the module location + mod_name, attr_name = bc_cfg.experts_observation_group_cfg.split(":") + mod = importlib.import_module(mod_name) + cfg_cls = mod + for attr in attr_name.split("."): + cfg_cls = getattr(cfg_cls, attr) + cfg = cfg_cls() + setattr(env_cfg.observations, "expert_obs", cfg) + return agent_cfg + + +@hydra_task_compose(args_cli.task, "rsl_rl_cfg_entry_point", hydra_args=remaining_args) +def main(env_cfg: ManagerBasedRLEnvCfg | DirectRLEnvCfg, agent_cfg: RslRlOnPolicyRunnerCfg): + """Collect demonstrations from the environment using RSL-RL policy.""" + # get directory path and file name (without extension) from cli arguments + output_dir = os.path.dirname(args_cli.dataset_file) + output_file_name = os.path.basename(args_cli.dataset_file) + + # create directory if it does not exist + if not os.path.exists(output_dir): + os.makedirs(output_dir, exist_ok=True) + + # add recordermanager to save data + use_zarr_format = args_cli.dataset_file.endswith(".zarr") + if use_zarr_format: + dataset_handler = ZarrDatasetFileHandler + else: + dataset_handler = HDF5DatasetFileHandler + + # Setup recorder for raw actions + env_cfg.recorders = ActionStateRecorderManagerCfg() + + env_cfg.recorders.dataset_export_dir_path = output_dir + env_cfg.recorders.dataset_filename = output_file_name + env_cfg.recorders.dataset_export_mode = DatasetExportMode.EXPORT_SUCCEEDED_ONLY + env_cfg.recorders.dataset_file_handler_class_type = dataset_handler + + # override configurations with non-hydra CLI arguments + env_cfg.scene.num_envs = args_cli.num_envs if args_cli.num_envs is not None else env_cfg.scene.num_envs + env_cfg.sim.device = args_cli.device if args_cli.device is not None else env_cfg.sim.device + env_cfg.seed = None + + # add expert obs into env_cfg + agent_cfg = process_agent_cfg(env_cfg, agent_cfg) + + # create isaac environment + env = gym.make(args_cli.task, cfg=env_cfg, render_mode="rgb_array") + + # wrap around environment for rsl-rl + env = RslRlVecEnvWrapper(env) + + # load expert + bc = agent_cfg.algorithm.offline_algorithm_cfg.behavior_cloning_cfg + assert len(bc.experts_path) == 1, "Only one expert is supported for now." + expert_obs_fn = bc.experts_observation_func + loader = bc.experts_loader + if not callable(loader): + loader = eval(loader) + expert_policy = loader(bc.experts_path[0]).to(env_cfg.sim.device) + expert_policy.eval() + + print(f"[Policy] {'Deterministic (mean)' if args_cli.deterministic else 'Stochastic (sampled)'} actions") + + # simulate environment -- run everything in inference mode + current_recorded_demo_count = 0 + with contextlib.suppress(KeyboardInterrupt), torch.inference_mode(): + # Initialize tqdm progress bar if num_demos > 0 + pbar = tqdm(total=args_cli.num_demos, desc="Recording Demonstrations", unit="demo") + + while True: + # agent stepping + expert_policy_obs = expert_obs_fn(env) + mean, std = expert_policy.compute_distribution(expert_policy_obs) + actions = mean if args_cli.deterministic else torch.normal(mean, std) + + # Mask actions to zero for environments in their first step after reset since first image may not be valid + first_step_mask = env.unwrapped.episode_length_buf == 0 + if torch.any(first_step_mask): + actions[first_step_mask, :-1] = 0.0 + actions[first_step_mask, -1] = -1.0 # close gripper + + # Inject expert distribution into obs_buf so recorder saves them alongside observations + env.unwrapped.obs_buf["data_collection"]["expert_action_mean"] = mean.clone() + env.unwrapped.obs_buf["data_collection"]["expert_action_std"] = std.clone() + + # env stepping + env.step(actions) + + # print out the current demo count if it has changed + new_count = env.unwrapped.recorder_manager.exported_successful_episode_count + if new_count > current_recorded_demo_count: + increment = new_count - current_recorded_demo_count + current_recorded_demo_count = new_count + pbar.update(increment) + + if args_cli.num_demos > 0 and new_count >= args_cli.num_demos: + print(f"All {args_cli.num_demos} demonstrations recorded. Exiting the app.") + break + + # check that simulation is stopped or not + if env.unwrapped.sim.is_stopped(): + break + + pbar.close() + + # close the simulator + env.close() + + +if __name__ == "__main__": + # run the main function - the decorator handles parameter passing + main() # type: ignore + # close sim app + simulation_app.close() diff --git a/scripts_v2/tools/compute_bottom_offset.py b/scripts_v2/tools/compute_bottom_offset.py new file mode 100644 index 00000000..554a6133 --- /dev/null +++ b/scripts_v2/tools/compute_bottom_offset.py @@ -0,0 +1,81 @@ +# Copyright (c) 2024-2026, The UW Lab Project Developers. (https://github.com/uw-lab/UWLab/blob/main/CONTRIBUTORS.md). +# All Rights Reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Compute the bottom offset of a USD asset. + +The bottom offset is the distance from the asset's origin to the lowest +point of any mesh in the USD file. This value is used to spawn objects +flush on the table surface. + +Usage: + python scripts_v2/tools/compute_bottom_offset.py /path/to/asset.usd +""" + +import argparse +import numpy as np + +from pxr import Usd, UsdGeom + + +def get_world_points(mesh: UsdGeom.Mesh, time=Usd.TimeCode.Default()) -> np.ndarray: + """Get mesh vertices transformed to world space.""" + points = np.array(mesh.GetPointsAttr().Get(time), dtype=np.float64) + if len(points) == 0: + return points + xform = UsdGeom.Xformable(mesh.GetPrim()) + world_transform = xform.ComputeLocalToWorldTransform(time) + mat = np.array(world_transform, dtype=np.float64).T + ones = np.ones((len(points), 1), dtype=np.float64) + homogeneous = np.hstack([points, ones]) + return (homogeneous @ mat.T)[:, :3] + + +def compute_bottom_offset(usd_path: str, verbose: bool = False) -> float: + """Compute distance from origin to lowest mesh vertex in the USD.""" + stage = Usd.Stage.Open(usd_path) + if not stage: + raise FileNotFoundError(f"Cannot open USD: {usd_path}") + + all_points = [] + mesh_count = 0 + for prim in stage.Traverse(): + if prim.IsA(UsdGeom.Mesh): + mesh = UsdGeom.Mesh(prim) + pts = get_world_points(mesh) + if len(pts) > 0: + all_points.append(pts) + mesh_count += 1 + + if not all_points: + raise ValueError(f"No meshes found in {usd_path}") + + pts = np.vstack(all_points) + bbox_min = pts.min(axis=0) + bbox_max = pts.max(axis=0) + min_z = bbox_min[2] + + if verbose: + print(f"Meshes found: {mesh_count}") + print(f"Total vertices: {len(pts)}") + print(f"Bounding box min: ({bbox_min[0]:.6f}, {bbox_min[1]:.6f}, {bbox_min[2]:.6f})") + print(f"Bounding box max: ({bbox_max[0]:.6f}, {bbox_max[1]:.6f}, {bbox_max[2]:.6f})") + size = bbox_max - bbox_min + print(f"Size: ({size[0]:.6f}, {size[1]:.6f}, {size[2]:.6f})") + + return abs(min_z) + + +def main(): + parser = argparse.ArgumentParser(description="Compute bottom offset of a USD asset.") + parser.add_argument("usd_path", type=str, help="Path to the USD file.") + parser.add_argument("-v", "--verbose", action="store_true", help="Print bounding box details.") + args = parser.parse_args() + + offset = compute_bottom_offset(args.usd_path, verbose=args.verbose) + print(f"bottom_offset: {offset:.6f}") + + +if __name__ == "__main__": + main() diff --git a/scripts_v2/tools/eval_distilled_policy.py b/scripts_v2/tools/eval_distilled_policy.py new file mode 100644 index 00000000..62e2b7a8 --- /dev/null +++ b/scripts_v2/tools/eval_distilled_policy.py @@ -0,0 +1,261 @@ +# Copyright (c) 2024-2026, The UW Lab Project Developers. (https://github.com/uw-lab/UWLab/blob/main/CONTRIBUTORS.md). +# All Rights Reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +# Copyright (c) 2022-2024, The Isaac Lab Project Developers. +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Script to run a trained diffusion policy.""" + +"""Launch Isaac Sim Simulator first.""" + +import argparse + +from isaaclab.app import AppLauncher + +# add argparse arguments +parser = argparse.ArgumentParser(description="Play policy trained using diffusion policy for Isaac Lab environments.") +parser.add_argument( + "--disable_fabric", action="store_true", default=False, help="Disable fabric and use USD I/O operations." +) +parser.add_argument("--task", type=str, default=None, help="Name of the task.") +parser.add_argument("--checkpoint", type=str, default=None, help="Path to diffusion policy checkpoint.") +parser.add_argument("--num_envs", type=int, default=1, help="Number of environments to run in parallel.") +parser.add_argument( + "--num_trajectories", + type=int, + default=100, + help="Number of trajectories to evaluate. If None, run until simulation is stopped.", +) +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility.") +parser.add_argument("--use_amp", action="store_true", default=False, help="Use automatic mixed precision.") +parser.add_argument("--save_video", action="store_true", default=False, help="Save video of the policy.") +# append AppLauncher cli args +AppLauncher.add_app_launcher_args(parser) +# parse the arguments +args_cli, remaining_args = parser.parse_known_args() + +# launch omniverse app +app_launcher = AppLauncher(args_cli) +simulation_app = app_launcher.app + +"""Rest everything follows.""" + +import gymnasium as gym +import numpy as np +import random +import torch +from contextlib import nullcontext +from tqdm import tqdm + +import dill +import hydra +import imageio +import isaaclab_tasks # noqa: F401 +from diffusion_policy.policy.base_image_policy import BaseImagePolicy + +# Diffusion policy imports +from diffusion_policy.workspace.base_workspace import BaseWorkspace +from isaaclab.envs import DirectRLEnvCfg, ManagerBasedRLEnvCfg + +# Import the Diffusion policy wrapper +from uwlab_rl.wrappers.diffusion import DiffusionPolicyWrapper + +import uwlab_tasks # noqa: F401 +from uwlab_tasks.utils.hydra import hydra_task_compose + + +def _set_seeds(seed: int): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + torch.cuda.manual_seed_all(seed) + torch.backends.cudnn.deterministic = True + torch.backends.cudnn.benchmark = False + + +def _load_policy(ckpt_path: str, device: torch.device, use_ema: bool = False) -> BaseImagePolicy: + with open(ckpt_path, "rb") as f: + payload = torch.load(f, pickle_module=dill) + cfg = payload["cfg"] + cls = hydra.utils.get_class(cfg._target_) + workspace = cls(cfg) + workspace: BaseWorkspace + workspace.load_payload(payload, exclude_keys=None, include_keys=None) + policy = workspace.ema_model if cfg.training.use_ema else workspace.model + return policy.eval().to(device) + + +def _discover_cameras(obs_dict, env): + """Return (cam_keys, scene_cam_names) for video recording.""" + cam_keys = sorted(k for k in obs_dict["policy"] if "rgb" in k) + if cam_keys: + return cam_keys, [] + scene_cam_names = sorted( + name + for name, sensor in env.unwrapped.scene._sensors.items() + if hasattr(sensor, "data") and hasattr(sensor.data, "output") and "rgb" in sensor.data.output + ) + if scene_cam_names: + print(f"Using scene cameras for video: {scene_cam_names}") + return cam_keys, scene_cam_names + + +def _capture_frame(obs_dict, env, env_idx: int, cam_keys: list, scene_cam_names: list) -> np.ndarray | None: + """Capture and concatenate camera images for one environment.""" + imgs = [] + if cam_keys: + for cam in cam_keys: + img = obs_dict["policy"][cam][env_idx].detach().cpu().permute(1, 2, 0).numpy() + imgs.append((img * 255).clip(0, 255).astype("uint8")) + elif scene_cam_names: + for cam_name in scene_cam_names: + img = env.unwrapped.scene._sensors[cam_name].data.output["rgb"][env_idx].detach().cpu().numpy() + if img.shape[0] in [1, 3, 4] and img.shape[0] < img.shape[1]: + img = img.transpose(1, 2, 0) + if img.dtype != np.uint8: + img = (img * 255).clip(0, 255).astype("uint8") + if img.shape[-1] == 4: + img = img[..., :3] + imgs.append(img) + return np.concatenate(imgs, axis=1) if imgs else None + + +def _count_successes(env, reset_ids: torch.Tensor, term_names: list[str]) -> int: + count = 0 + term_dones = env.unwrapped.termination_manager._term_dones[reset_ids] + for term_row in term_dones: + active = term_row.nonzero(as_tuple=False).flatten().cpu().tolist() + if any(term_names[idx] == "success" for idx in active): + count += 1 + return count + + +def _collect_metrics(infos: dict, episode_metrics: dict): + if "log" not in infos: + return + for key, value in infos["log"].items(): + if key.startswith("Metrics/") or key.startswith("Episode_Reward/"): + episode_metrics.setdefault(key, []).append(value) + + +def _print_results(episodes: int, successful_episodes: int, episode_metrics: dict): + print("\nFinal Statistics:") + print(f"Total trajectories evaluated: {episodes}") + if successful_episodes > 0 or "Episode_Termination/success" in episode_metrics: + print(f"Successful trajectories: {successful_episodes}") + print(f"Success rate: {successful_episodes / episodes * 100:.2f}%") + else: + print("Success rate: Not calculable (success metric not found in environment)") + if episode_metrics: + print("\nAverage Metrics:") + for metric_name, values in sorted(episode_metrics.items()): + if values: + floats = [float(v) if isinstance(v, torch.Tensor) else v for v in values] + print(f"{metric_name}: {sum(floats) / len(floats):.4f}") + + +@hydra_task_compose(args_cli.task, "env_cfg_entry_point", hydra_args=remaining_args) +def main(env_cfg: ManagerBasedRLEnvCfg | DirectRLEnvCfg, agent_cfg): + """Run a trained diffusion policy with Isaac Lab environment.""" + _set_seeds(args_cli.seed) + + device = torch.device(args_cli.device if args_cli.device else "cuda" if torch.cuda.is_available() else "cpu") + torch.backends.cudnn.benchmark = True + torch.backends.cuda.matmul.allow_tf32 = True + + env_cfg.scene.num_envs = args_cli.num_envs + env_cfg.sim.device = args_cli.device if args_cli.device is not None else env_cfg.sim.device + env_cfg.sim.use_fabric = not args_cli.disable_fabric + env_cfg.seed = args_cli.seed + env_cfg.observations.policy.concatenate_terms = False + + env = gym.make(args_cli.task, cfg=env_cfg, render_mode="rgb_array") + + policy = _load_policy(args_cli.checkpoint, device) + wrapped_policy = DiffusionPolicyWrapper(policy, device, n_obs_steps=policy.n_obs_steps, num_envs=args_cli.num_envs) + + obs_dict, _ = env.reset() + dones = torch.ones(args_cli.num_envs, dtype=torch.bool, device=device) + wrapped_policy.reset((dones > 0).nonzero(as_tuple=False).reshape(-1)) + + term_names = env.unwrapped.termination_manager._term_names # type: ignore + assert "success" in term_names, "Success term not found in termination manager" + + episodes, steps, successful_episodes = 0, 0, 0 + episode_metrics: dict = {} + + pbar = None + if args_cli.num_trajectories is not None: + pbar = tqdm(total=args_cli.num_trajectories, desc="Evaluating trajectories (Success: 0.00%)") + + # Video recording state + cam_keys, scene_cam_names, env_frames, frames_to_save = [], [], [], [] + if args_cli.save_video: + cam_keys, scene_cam_names = _discover_cameras(obs_dict, env) + env_frames = [[] for _ in range(args_cli.num_envs)] + + while simulation_app.is_running(): + if args_cli.num_trajectories is not None and episodes >= args_cli.num_trajectories: + print(f"\nReached target number of trajectories ({args_cli.num_trajectories}). Stopping evaluation.") + break + + with torch.inference_mode(), torch.autocast(device_type=device.type) if args_cli.use_amp else nullcontext(): + actions = wrapped_policy.predict_action(obs_dict) + + if args_cli.save_video: + for i in range(args_cli.num_envs): + frame = _capture_frame(obs_dict, env, i, cam_keys, scene_cam_names) + if frame is not None: + env_frames[i].append(frame) + + step_result = env.step(actions) + if len(step_result) == 4: + obs_dict, rewards, dones, infos = step_result + else: + obs_dict, rewards, terminated, truncated, infos = step_result + dones = terminated | truncated + + steps += 1 + + if isinstance(dones, torch.Tensor): + new_ids = (dones > 0).nonzero(as_tuple=False) + episodes += len(new_ids) + elif dones: + new_ids = [0] + episodes += 1 + else: + new_ids = [] + + if isinstance(dones, torch.Tensor) and dones.any(): + reset_ids = (dones > 0).nonzero(as_tuple=False).reshape(-1) + successful_episodes += _count_successes(env, reset_ids, term_names) + wrapped_policy.reset(reset_ids) + _collect_metrics(infos, episode_metrics) + steps = 0 + + if args_cli.save_video: + for i in reset_ids: + frames_to_save.extend(env_frames[i]) + env_frames[i] = [] + imageio.mimsave("policy_cameras.mp4", frames_to_save, fps=10, codec="libx264") + + if pbar is not None: + pbar.update(len(new_ids)) + rate = (successful_episodes / episodes * 100) if episodes > 0 else 0.0 + pbar.set_description(f"Evaluating trajectories (Success: {rate:.2f}%)") + + _print_results(episodes, successful_episodes, episode_metrics) + if pbar is not None: + pbar.close() + env.close() + + +if __name__ == "__main__": + # run the main function - the decorator handles parameter passing + main() # type: ignore + # close sim app + simulation_app.close() diff --git a/scripts_v2/tools/record_grasps.py b/scripts_v2/tools/record_grasps.py index 526e2ad2..24b9e623 100644 --- a/scripts_v2/tools/record_grasps.py +++ b/scripts_v2/tools/record_grasps.py @@ -12,7 +12,6 @@ import argparse import os import time -import yaml from tqdm import tqdm from typing import cast @@ -22,7 +21,9 @@ parser = argparse.ArgumentParser(description="Grasp sampling for end effector on objects.") parser.add_argument("--num_envs", type=int, default=1, help="Number of environments to simulate.") parser.add_argument("--task", type=str, default="OmniReset-Robotiq2f85-GraspSampling-v0", help="Name of the task.") -parser.add_argument("--dataset_dir", type=str, default="./grasp_datasets/", help="Directory to save grasp results.") +parser.add_argument( + "--dataset_dir", type=str, default="./Datasets/OmniReset/", help="Root Datasets/OmniReset/ directory." +) parser.add_argument("--num_grasps", type=int, default=500, help="Number of grasp candidates to evaluate.") AppLauncher.add_app_launcher_args(parser) @@ -44,7 +45,7 @@ from uwlab.utils.datasets.torch_dataset_file_handler import TorchDatasetFileHandler import uwlab_tasks # noqa: F401 -import uwlab_tasks.manager_based.manipulation.reset_states.mdp as task_mdp +import uwlab_tasks.manager_based.manipulation.omnireset.mdp as task_mdp from uwlab_tasks.utils.hydra import hydra_task_compose torch.backends.cuda.matmul.allow_tf32 = True @@ -60,35 +61,24 @@ def main(env_cfg, agent_cfg) -> None: if not os.path.exists(args_cli.dataset_dir): os.makedirs(args_cli.dataset_dir, exist_ok=True) - # Get USD path for hash computation + # Derive object name for output path object_usd_path = env_cfg.scene.object.spawn.usd_path + obj_name = task_mdp.utils.object_name_from_usd(object_usd_path) + output_dir = os.path.join(args_cli.dataset_dir, "Grasps", obj_name) + os.makedirs(output_dir, exist_ok=True) - # Compute hash for this object - dataset_hash = task_mdp.utils.compute_assembly_hash(object_usd_path) - - # Update info.yaml with this hash and USD path - info_file = os.path.join(args_cli.dataset_dir, "info.yaml") - info_data = {} - if os.path.exists(info_file): - with open(info_file) as f: - info_data = yaml.safe_load(f) or {} - - info_data[dataset_hash] = {"object_usd_path": object_usd_path} - - with open(info_file, "w") as f: - yaml.dump(info_data, f, default_flow_style=False) - - print(f"Recording grasps for hash: {dataset_hash}") + print(f"Recording grasps for: {obj_name}") print(f"Object: {object_usd_path}") + print(f"Output: {output_dir}/grasps.pt") - # Configure recorder for hash-based saving + # Configure recorder env_cfg.recorders = task_mdp.GraspRelativePoseRecorderManagerCfg( robot_name="robot", object_name="object", gripper_body_name="robotiq_base_link", ) - env_cfg.recorders.dataset_export_dir_path = args_cli.dataset_dir - env_cfg.recorders.dataset_filename = f"{dataset_hash}.pt" + env_cfg.recorders.dataset_export_dir_path = output_dir + env_cfg.recorders.dataset_filename = "grasps.pt" env_cfg.recorders.dataset_export_mode = DatasetExportMode.EXPORT_SUCCEEDED_ONLY env_cfg.recorders.dataset_file_handler_class_type = TorchDatasetFileHandler diff --git a/scripts_v2/tools/record_partial_assemblies.py b/scripts_v2/tools/record_partial_assemblies.py index 68ddb063..66619341 100644 --- a/scripts_v2/tools/record_partial_assemblies.py +++ b/scripts_v2/tools/record_partial_assemblies.py @@ -12,7 +12,6 @@ import argparse import os import torch -import yaml from tqdm import tqdm from typing import cast @@ -23,7 +22,7 @@ parser.add_argument("--num_envs", type=int, default=1, help="Number of environments to simulate.") parser.add_argument("--task", type=str, default="UW-FBLeg-PartialAssemblies-v0", help="Name of the task.") parser.add_argument( - "--dataset_dir", type=str, default="./partial_assembly_datasets/", help="Directory to save assembly results." + "--dataset_dir", type=str, default="./Datasets/OmniReset/", help="Root Datasets/OmniReset/ directory." ) parser.add_argument( "--num_trajectories", type=int, default=1, help="Number of physics trajectories to run for pose discovery." @@ -49,7 +48,7 @@ from isaaclab.envs import ManagerBasedRLEnv import uwlab_tasks # noqa: F401 -from uwlab_tasks.manager_based.manipulation.reset_states.mdp.utils import compute_assembly_hash +from uwlab_tasks.manager_based.manipulation.omnireset.mdp.utils import compute_pair_dir from uwlab_tasks.utils.hydra import hydra_task_compose torch.backends.cuda.matmul.allow_tf32 = True @@ -75,29 +74,12 @@ def main(env_cfg, agent_cfg) -> None: # Create environment env = cast(ManagerBasedRLEnv, gym.make(args_cli.task, cfg=env_cfg)).unwrapped - # Get USD paths for hash computation + # Derive pair directory for output path insertive_usd_path = env_cfg.scene.insertive_object.spawn.usd_path receptive_usd_path = env_cfg.scene.receptive_object.spawn.usd_path + pair = compute_pair_dir(insertive_usd_path, receptive_usd_path) - # Compute hash for this object combination - dataset_hash = compute_assembly_hash(insertive_usd_path, receptive_usd_path) - - # Update info.yaml with this hash and USD paths - info_file = os.path.join(args_cli.dataset_dir, "info.yaml") - info_data = {} - if os.path.exists(info_file): - with open(info_file) as f: - info_data = yaml.safe_load(f) or {} - - info_data[dataset_hash] = { - "insertive_object_usd_path": insertive_usd_path, - "receptive_object_usd_path": receptive_usd_path, - } - - with open(info_file, "w") as f: - yaml.dump(info_data, f, default_flow_style=False) - - print(f"Recording partial assemblies for hash: {dataset_hash}") + print(f"Recording partial assemblies for: {pair}") print(f"Insertive: {insertive_usd_path}") print(f"Receptive: {receptive_usd_path}") @@ -187,7 +169,7 @@ def main(env_cfg, agent_cfg) -> None: # Save any remaining poses if recorded_poses: - _save_poses_to_dataset(recorded_poses, args_cli.dataset_dir, dataset_hash) + _save_poses_to_dataset(recorded_poses, args_cli.dataset_dir, pair) pbar.close() @@ -201,7 +183,7 @@ def main(env_cfg, agent_cfg) -> None: env.close() -def _save_poses_to_dataset(pose_batches: list, dataset_dir: str, dataset_hash: str) -> None: +def _save_poses_to_dataset(pose_batches: list, dataset_dir: str, pair_name: str) -> None: """Save pose batches to Torch dataset (.pt).""" if not pose_batches: return @@ -211,8 +193,9 @@ def _save_poses_to_dataset(pose_batches: list, dataset_dir: str, dataset_hash: s for key in pose_batches[0].keys(): all_poses[key] = torch.cat([batch[key] for batch in pose_batches], dim=0).cpu() - # Save as Torch .pt file - output_file = os.path.join(dataset_dir, f"{dataset_hash}.pt") + output_dir = os.path.join(dataset_dir, "Resets", pair_name) + os.makedirs(output_dir, exist_ok=True) + output_file = os.path.join(output_dir, "partial_assemblies.pt") torch.save(all_poses, output_file) print(f"Saved {len(all_poses['relative_position'])} poses to {output_file}") diff --git a/scripts_v2/tools/record_reset_states.py b/scripts_v2/tools/record_reset_states.py index 47d85e60..c656bba1 100644 --- a/scripts_v2/tools/record_reset_states.py +++ b/scripts_v2/tools/record_reset_states.py @@ -12,7 +12,6 @@ import argparse import os import torch -import yaml from tqdm import tqdm from typing import cast @@ -25,7 +24,13 @@ "--task", type=str, default="OmniReset-UR5eRobotiq2f85-ObjectAnywhereEEAnywhere-v0", help="Name of the task." ) parser.add_argument( - "--dataset_dir", type=str, default="./reset_state_datasets/", help="Directory to save reset state results." + "--dataset_dir", type=str, default="./Datasets/OmniReset/", help="Root Datasets/OmniReset/ directory." +) +parser.add_argument( + "--reset_type", + type=str, + default=None, + help="Reset type name (e.g. ObjectAnywhereEEAnywhere). Auto-inferred from --task if omitted.", ) parser.add_argument( "--num_reset_states", type=int, default=100, help="Number of reset states to record. Set to 0 for infinite." @@ -50,7 +55,7 @@ from uwlab.utils.datasets.torch_dataset_file_handler import TorchDatasetFileHandler import uwlab_tasks # noqa: F401 -import uwlab_tasks.manager_based.manipulation.reset_states.mdp as task_mdp +import uwlab_tasks.manager_based.manipulation.omnireset.mdp as task_mdp from uwlab_tasks.utils.hydra import hydra_task_compose torch.backends.cuda.matmul.allow_tf32 = True @@ -73,33 +78,34 @@ def main(env_cfg, agent_cfg) -> None: # make sure environment is non-deterministic for diverse pose discovery env_cfg.seed = None - # Get USD paths for hash computation + # Derive pair directory and reset type for output path insertive_usd_path = env_cfg.scene.insertive_object.spawn.usd_path receptive_usd_path = env_cfg.scene.receptive_object.spawn.usd_path - reset_state_hash = task_mdp.utils.compute_assembly_hash(insertive_usd_path, receptive_usd_path) - - # Update info.yaml with this hash and USD paths - info_file = os.path.join(args_cli.dataset_dir, "info.yaml") - info_data = {} - if os.path.exists(info_file): - with open(info_file) as f: - info_data = yaml.safe_load(f) or {} - - info_data[reset_state_hash] = { - "insertive_object_usd_path": insertive_usd_path, - "receptive_object_usd_path": receptive_usd_path, - } - - with open(info_file, "w") as f: - yaml.dump(info_data, f, default_flow_style=False) - - print(f"Recording reset states for hash: {reset_state_hash}") + pair = task_mdp.utils.compute_pair_dir(insertive_usd_path, receptive_usd_path) + + # Auto-infer reset_type from task name if not provided + reset_type = args_cli.reset_type + if reset_type is None: + for candidate in [ + "ObjectAnywhereEEAnywhere", + "ObjectRestingEEGrasped", + "ObjectAnywhereEEGrasped", + "ObjectPartiallyAssembledEEGrasped", + ]: + if candidate in args_cli.task: + reset_type = candidate + break + if reset_type is None: + raise ValueError(f"Could not infer reset_type from task '{args_cli.task}'. Pass --reset_type explicitly.") + + print(f"Recording reset states for: {pair} / {reset_type}") print(f"Insertive: {insertive_usd_path}") print(f"Receptive: {receptive_usd_path}") # Setup recording configuration - output_dir = args_cli.dataset_dir - output_file_name = f"{reset_state_hash}.pt" + output_dir = os.path.join(args_cli.dataset_dir, "Resets", pair) + os.makedirs(output_dir, exist_ok=True) + output_file_name = f"resets_{reset_type}.pt" env_cfg.recorders = task_mdp.StableStateRecorderManagerCfg() env_cfg.recorders.dataset_export_dir_path = output_dir diff --git a/scripts_v2/tools/sim2real/align_cameras.py b/scripts_v2/tools/sim2real/align_cameras.py new file mode 100644 index 00000000..269eb5a9 --- /dev/null +++ b/scripts_v2/tools/sim2real/align_cameras.py @@ -0,0 +1,376 @@ +# Copyright (c) 2024-2026, The UW Lab Project Developers. (https://github.com/uw-lab/UWLab/blob/main/CONTRIBUTORS.md). +# All Rights Reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Interactive sim2real camera alignment for UR5e + Robotiq 2F-85. + +Renders the simulation camera view, blends it with a real reference image, +and lets you move/rotate the camera and adjust focal length with the keyboard. +Press 'p' to print the final (pos, rot, focal_length) values that you can +paste into data_collection_rgb_cfg.py. + +Mirrors the sysid workflow: + scripts_v2/tools/sim2real/sysid_ur5e_osc.py → tunes physics params + scripts_v2/tools/sim2real/align_cameras.py → tunes camera poses + +Usage (front camera example): + python scripts_v2/tools/sim2real/align_cameras.py \ + --enable_cameras \ + --camera front_camera \ + --real_image /path/to/real_front.png \ + --joint_angles -12.0 -80.0 63.0 -30.6 -97.9 174.3 + +Usage (wrist camera example): + python scripts_v2/tools/sim2real/align_cameras.py \ + --enable_cameras \ + --camera wrist_camera \ + --real_image /path/to/real_wrist.png \ + --joint_angles -12.0 -80.0 63.0 -30.6 -97.9 174.3 + +Keyboard controls: + w/x move +/- X i/k pitch +/- + a/d move +/- Y j/l yaw +/- + up/down move +/- Z u/o roll +/- + left/right focal length -/+ + 1/2 blend ratio -/+ (0 = all sim, 1 = all real) + +/- position step size +/- + r reset camera to initial pose + p print camera params & save current view + q quit +""" + +import argparse +import numpy as np +import torch + +from isaaclab.app import AppLauncher + +parser = argparse.ArgumentParser(description="Sim2Real camera alignment tool.") +parser.add_argument( + "--camera", + type=str, + default="front_camera", + choices=["front_camera", "side_camera", "wrist_camera"], + help="Which camera to align", +) +parser.add_argument("--real_image", type=str, default=None, help="Path to reference real RGB image (png/jpg)") +parser.add_argument( + "--joint_angles", + type=float, + nargs=6, + default=[2.28, -95.58, 99.07, -93.36, -86.57, 4.33], + help="Arm joint angles in degrees (6 joints). Default matches real_env.py default init pose.", +) +parser.add_argument("--gripper_pos", type=float, default=1.0, help="Gripper position (0=closed, 1=open)") +parser.add_argument("--warmup_steps", type=int, default=30, help="Simulation warmup steps before interaction") +AppLauncher.add_app_launcher_args(parser) +args_cli = parser.parse_args() + +# launch omniverse app +app_launcher = AppLauncher(args_cli) +simulation_app = app_launcher.app + +"""Rest everything follows.""" + +import gymnasium as gym # noqa: E402 +import matplotlib # noqa: E402 + +matplotlib.use("TkAgg") +import matplotlib.pyplot as plt # noqa: E402 + +from pxr import Gf, UsdGeom # noqa: E402 + +import uwlab_tasks # noqa: F401 +from uwlab_tasks.manager_based.manipulation.omnireset.config.ur5e_robotiq_2f85.camera_align_cfg import ( + CameraAlignEnvCfg, +) + +# ---- RGB key lookup ---- +CAMERA_TO_RGB = { + "front_camera": "front_rgb", + "side_camera": "side_rgb", + "wrist_camera": "wrist_rgb", +} + + +class CameraAligner: + """Interactive controller: keyboard → camera pose → blended view.""" + + def __init__(self, env, camera_key, real_img, fig, ax): + self.env = env + self.camera_key = camera_key + self.rgb_key = CAMERA_TO_RGB[camera_key] + self.fig = fig + self.ax = ax + self.real_img = real_img # (H, W, 3) float [0,1] + + self.camera = self.env.unwrapped.scene._sensors[camera_key] + + # Read initial LOCAL pose from the USD prim XformOps (offset relative to parent). + # We work in local space because USD XformOps are authoritative and survive + # the USD→Fabric sync that happens each sim step (unlike Fabric-only writes). + prim = self.camera._sensor_prims[0] + xformable = UsdGeom.Xformable(prim) + self._xform_ops = {op.GetOpType(): op for op in xformable.GetOrderedXformOps()} + self.pos = np.array(self._xform_ops[UsdGeom.XformOp.TypeTranslate].Get(), dtype=np.float64) + quat = self._xform_ops[UsdGeom.XformOp.TypeOrient].Get() + self.rot = np.array([quat.GetReal(), *quat.GetImaginary()], dtype=np.float64) + + # Tuning step sizes + self.pos_step = 0.005 + self.rot_step = 0.005 + self.focal_step = 0.1 + self.blend = 0.5 + + # We'll store the most recent obs + self.obs = None + self.action = None + + # ---- quaternion ↔ euler helpers (OpenGL convention) ---- + @staticmethod + def quat_to_euler(q): + w, x, y, z = q + roll = np.arctan2(2 * (w * x + y * z), 1 - 2 * (x * x + y * y)) + pitch = np.arcsin(np.clip(2 * (w * y - z * x), -1, 1)) + yaw = np.arctan2(2 * (w * z + x * y), 1 - 2 * (y * y + z * z)) + return np.array([roll, pitch, yaw]) + + @staticmethod + def euler_to_quat(e): + r, p, y = e / 2.0 + cr, cp, cy = np.cos(r), np.cos(p), np.cos(y) + sr, sp, sy = np.sin(r), np.sin(p), np.sin(y) + return np.array([ + cr * cp * cy + sr * sp * sy, + sr * cp * cy - cr * sp * sy, + cr * sp * cy + sr * cp * sy, + cr * cp * sy - sr * sp * cy, + ]) + + # ---- update sim ---- + def apply_camera_pose(self): + w, x, y, z = self.rot.tolist() + self._xform_ops[UsdGeom.XformOp.TypeTranslate].Set(Gf.Vec3d(*self.pos.tolist())) + self._xform_ops[UsdGeom.XformOp.TypeOrient].Set(Gf.Quatd(w, x, y, z)) + + def step_and_render(self): + self.obs, _, _, _, _ = self.env.step(self.action) + + # ---- visualize ---- + def update_view(self): + sim_rgb = self.obs["policy"][self.rgb_key] + # (1, C, H, W) or (1, H, W, C) – handle both + img = sim_rgb[0] + if img.shape[0] in (3, 4): + img = img.permute(1, 2, 0) + img = img.cpu().numpy().astype(np.float32) + if img.max() > 1.5: + img = img / 255.0 + + # Resize real to match sim if needed + real = self.real_img + if real is not None: + if real.shape[:2] != img.shape[:2]: + from PIL import Image + + real = ( + np.array(Image.fromarray((real * 255).astype(np.uint8)).resize((img.shape[1], img.shape[0]))) + / 255.0 + ) + blended = (1 - self.blend) * img[..., :3] + self.blend * real[..., :3] + else: + blended = img[..., :3] + + self.ax.clear() + self.ax.imshow(np.clip(blended, 0, 1)) + info = f"cam={self.camera_key} blend={self.blend:.2f} step={self.pos_step:.4f}" + self.ax.set_title(info, fontsize=9) + self.ax.axis("off") + self.fig.canvas.draw() + self.fig.canvas.flush_events() + + # ---- keyboard ---- + def on_key(self, event): + k = event.key + changed = True + + # --- position --- + if k == "w": + self.pos[0] += self.pos_step + elif k == "x": + self.pos[0] -= self.pos_step + elif k == "a": + self.pos[1] += self.pos_step + elif k == "d": + self.pos[1] -= self.pos_step + elif k == "up": + self.pos[2] += self.pos_step + elif k == "down": + self.pos[2] -= self.pos_step + + # --- rotation --- + elif k in ("i", "k", "j", "l", "u", "o"): + e = self.quat_to_euler(self.rot) + if k == "i": + e[1] += self.rot_step + elif k == "k": + e[1] -= self.rot_step + elif k == "j": + e[2] += self.rot_step + elif k == "l": + e[2] -= self.rot_step + elif k == "u": + e[0] += self.rot_step + elif k == "o": + e[0] -= self.rot_step + self.rot = self.euler_to_quat(e) + + # --- focal length --- + elif k in ("left", "right"): + prim = self.camera._sensor_prims[0] + fl = prim.GetFocalLengthAttr().Get() + fl += self.focal_step if k == "right" else -self.focal_step + prim.GetFocalLengthAttr().Set(fl) + print(f"focal_length={fl:.2f}") + + # --- blend --- + elif k == "1": + self.blend = max(0.0, self.blend - 0.1) + elif k == "2": + self.blend = min(1.0, self.blend + 0.1) + + # --- step size --- + elif k == "+": + self.pos_step *= 2.0 + print(f"pos_step={self.pos_step:.5f}") + elif k == "-": + self.pos_step /= 2.0 + print(f"pos_step={self.pos_step:.5f}") + + # --- reset --- + elif k == "r": + self.pos = np.array(self.camera.cfg.offset.pos, dtype=np.float64) + rot_cfg = self.camera.cfg.offset.rot + self.rot = np.array(rot_cfg, dtype=np.float64) + print("Reset to initial camera pose") + + # --- print / save --- + elif k == "p": + self._print_params() + changed = False + + # --- quit --- + elif k == "q": + plt.close(self.fig) + return + + else: + changed = False + + if changed: + self.apply_camera_pose() + self.step_and_render() + self.update_view() + + def _print_params(self): + prim = self.camera._sensor_prims[0] + fl = prim.GetFocalLengthAttr().Get() + euler = self.quat_to_euler(self.rot) + + print("\n" + "=" * 60) + print(f"Camera: {self.camera_key}") + print(f"Offset pos (x, y, z): ({self.pos[0]:.7f}, {self.pos[1]:.7f}, {self.pos[2]:.7f})") + print( + f"Quaternion (w, x, y, z): ({self.rot[0]:.8f}, {self.rot[1]:.8f}, {self.rot[2]:.8f}," + f" {self.rot[3]:.8f})" + ) + print(f"Euler (roll, pitch, yaw) rad: ({euler[0]:.6f}, {euler[1]:.6f}, {euler[2]:.6f})") + print(f"Focal length: {fl:.2f}") + print() + print("--- Paste into data_collection_rgb_cfg.py ---") + print("--- (same values for BOTH TiledCameraCfg.OffsetCfg AND BaseRGBEventCfg) ---") + print(f" pos=({self.pos[0]:.7f}, {self.pos[1]:.7f}, {self.pos[2]:.7f}),") + print(f" rot=({self.rot[0]:.8f}, {self.rot[1]:.8f}, {self.rot[2]:.8f}, {self.rot[3]:.8f}),") + print(f" focal_length={fl:.2f}") + print("=" * 60 + "\n") + + # save screenshot + sim_rgb = self.obs["policy"][self.rgb_key][0] + if sim_rgb.shape[0] in (3, 4): + sim_rgb = sim_rgb.permute(1, 2, 0) + img = sim_rgb.cpu().numpy() + if img.max() > 1.5: + img = (img / 255.0).clip(0, 1) + out_path = f"camera_align_{self.camera_key}.png" + plt.imsave(out_path, img[..., :3]) + print(f"Saved sim view to {out_path}") + + +def main(): + # Create the camera-alignment environment + env_cfg = CameraAlignEnvCfg() + + # Override default joint positions to match the real robot pose. + # joint_angles are in degrees; convert to radians for the init_state. + joint_names = [ + "shoulder_pan_joint", + "shoulder_lift_joint", + "elbow_joint", + "wrist_1_joint", + "wrist_2_joint", + "wrist_3_joint", + ] + joint_rads = [float(np.deg2rad(a)) for a in args_cli.joint_angles] + for name, rad in zip(joint_names, joint_rads): + env_cfg.scene.robot.init_state.joint_pos[name] = rad + + env = gym.make("OmniReset-Ur5eRobotiq2f85-CameraAlign-v0", cfg=env_cfg) + device = env.unwrapped.device + + # Send zero OSC deltas so the robot holds the init joint config. + arm_dim = 6 + gripper_dim = 1 + action = torch.zeros(1, arm_dim + gripper_dim, device=device) + action[0, -1] = args_cli.gripper_pos # gripper open + + # Reset and warm up + print(f"Warming up simulation for {args_cli.warmup_steps} steps...") + obs, _ = env.reset() + + for _ in range(args_cli.warmup_steps): + obs, _, _, _, _ = env.step(action) + + # Load real reference image + real_img = None + if args_cli.real_image is not None: + real_img = plt.imread(args_cli.real_image)[..., :3].astype(np.float32) + if real_img.max() > 1.5: + real_img = real_img / 255.0 + print(f"Loaded reference image: {args_cli.real_image} shape={real_img.shape}") + else: + print("No --real_image provided; showing sim-only view. Press 1/2 to adjust blend ratio once you supply one.") + + # Set up matplotlib — clear default keybindings that conflict with controls + for key in plt.rcParams: + if key.startswith("keymap."): + plt.rcParams[key] = [] + fig, ax = plt.subplots(figsize=(8, 6)) + plt.ion() + + aligner = CameraAligner(env, args_cli.camera, real_img, fig, ax) + aligner.action = action + aligner.obs = obs + + fig.canvas.mpl_connect("key_press_event", aligner.on_key) + + aligner.update_view() + print("\nCamera alignment ready. Use keyboard to adjust (see --help for keys).") + plt.show(block=True) + + env.close() + + +if __name__ == "__main__": + main() + simulation_app.close() diff --git a/scripts_v2/tools/sim2real/collect_fk_pairs.py b/scripts_v2/tools/sim2real/collect_fk_pairs.py new file mode 100644 index 00000000..a7f8dcb7 --- /dev/null +++ b/scripts_v2/tools/sim2real/collect_fk_pairs.py @@ -0,0 +1,165 @@ +# Copyright (c) 2024-2026, The UW Lab Project Developers. (https://github.com/uw-lab/UWLab/blob/main/CONTRIBUTORS.md). +# All Rights Reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Collect (joint_pos, ee_pose) pairs from the Isaac Lab physics engine. + +Uses IK-based workspace randomization (identical to training resets) to +sample diverse, reachable joint configurations. For each configuration the +physics-engine wrist_3_link pose (in the robot base frame) is recorded. + +The companion script (diffusion_policy/test_fk_comparison.py) then runs +our calibrated analytical FK on the same joint angles and compares, +verifying that the sim and real FK agree to < 0.01 mm per dimension. + +Usage: + python scripts_v2/tools/sim2real/collect_fk_pairs.py \\ + --num_samples 50 --output fk_pairs.npz + + # more samples via larger parallel batch: + python scripts_v2/tools/sim2real/collect_fk_pairs.py \\ + --num_samples 200 --output fk_pairs.npz --settle_steps 100 +""" + +import argparse +import numpy as np + +from isaaclab.app import AppLauncher + +parser = argparse.ArgumentParser(description="Collect FK verification pairs from simulation.") +parser.add_argument( + "--num_samples", "-n", type=int, default=50, help="Number of IK-solved joint configurations to collect" +) +parser.add_argument("--output", "-o", type=str, default="fk_pairs.npz", help="Output npz file path") +parser.add_argument("--settle_steps", type=int, default=200, help="Physics steps to settle after each reset") +parser.add_argument( + "--num_resets", type=int, default=1, help="Number of env resets (total pairs = num_samples * num_resets)" +) +AppLauncher.add_app_launcher_args(parser) +args_cli = parser.parse_args() + +app_launcher = AppLauncher(args_cli) +simulation_app = app_launcher.app + +"""Rest everything follows.""" + +import gymnasium as gym # noqa: E402 +import torch # noqa: E402 + +import isaaclab.utils.math as math_utils # noqa: E402 +from isaaclab.managers import EventTermCfg as EventTerm # noqa: E402 +from isaaclab.managers import SceneEntityCfg # noqa: E402 +from isaaclab.utils import configclass # noqa: E402 + +import uwlab_tasks # noqa: F401, E402 +from uwlab_tasks.manager_based.manipulation.omnireset import mdp as task_mdp # noqa: E402 +from uwlab_tasks.manager_based.manipulation.omnireset.config.ur5e_robotiq_2f85.sysid_cfg import ( # noqa: E402 + SysidEnvCfg, +) + + +@configclass +class FkPairsEventCfg: + """Reset events: IK-based EE workspace randomization matching training.""" + + reset_everything = EventTerm( + func=task_mdp.reset_scene_to_default, + mode="reset", + params={}, + ) + + reset_end_effector_pose = EventTerm( + func=task_mdp.reset_end_effector_round_fixed_asset, + mode="reset", + params={ + "fixed_asset_cfg": SceneEntityCfg("robot"), + "fixed_asset_offset": None, + "pose_range_b": { + "x": (0.3, 0.7), + "y": (-0.4, 0.4), + "z": (0.0, 0.5), + "roll": (0.0, 0.0), + "pitch": (np.pi / 4, 3 * np.pi / 4), + "yaw": (np.pi / 2, 3 * np.pi / 2), + }, + "robot_ik_cfg": SceneEntityCfg( + "robot", + joint_names=["shoulder.*", "elbow.*", "wrist.*"], + body_names="robotiq_base_link", + ), + }, + ) + + +def main(): + env_cfg = SysidEnvCfg() + env_cfg.scene.num_envs = args_cli.num_samples + env_cfg.events = FkPairsEventCfg() + + env = gym.make("OmniReset-Ur5eRobotiq2f85-Sysid-v0", cfg=env_cfg) + device = env.unwrapped.device + + robot = env.unwrapped.scene["robot"] + ee_idx = robot.body_names.index("wrist_3_link") + + arm_dim = 6 + gripper_dim = 1 + zero_action = torch.zeros(args_cli.num_samples, arm_dim + gripper_dim, device=device) + + all_joint_pos = [] + all_ee_pos = [] + all_ee_quat = [] + all_ee_aa = [] + + for r in range(args_cli.num_resets): + obs, _ = env.reset() + + for _ in range(args_cli.settle_steps): + obs, _, _, _, _ = env.step(zero_action) + + # Read all envs at once + joint_pos = robot.data.joint_pos[:, :6].cpu().numpy() # (N, 6) + ee_pos_w = robot.data.body_link_pos_w[:, ee_idx] # (N, 3) + ee_quat_w = robot.data.body_link_quat_w[:, ee_idx] # (N, 4) + ee_pos_b, ee_quat_b = math_utils.subtract_frame_transforms( + robot.data.root_pos_w, + robot.data.root_quat_w, + ee_pos_w, + ee_quat_w, + ) + ee_aa_b = math_utils.axis_angle_from_quat(ee_quat_b) + + all_joint_pos.append(joint_pos) + all_ee_pos.append(ee_pos_b.cpu().numpy()) + all_ee_quat.append(ee_quat_b.cpu().numpy()) + all_ee_aa.append(ee_aa_b.cpu().numpy()) + + print(f" Reset {r+1}/{args_cli.num_resets}: collected {len(joint_pos)} pairs") + + all_joint_pos = np.concatenate(all_joint_pos, axis=0) + all_ee_pos = np.concatenate(all_ee_pos, axis=0) + all_ee_quat = np.concatenate(all_ee_quat, axis=0) + all_ee_aa = np.concatenate(all_ee_aa, axis=0) + + np.savez( + args_cli.output, + joint_pos=all_joint_pos, + ee_pos=all_ee_pos, + ee_quat=all_ee_quat, + ee_rot_aa=all_ee_aa, + ) + + n = len(all_joint_pos) + print(f"\nSaved {n} pairs to {args_cli.output}") + print(f" joint_pos : {all_joint_pos.shape}") + print(f" ee_pos : {all_ee_pos.shape} (meters, robot base frame)") + print(f" ee_quat : {all_ee_quat.shape} (w,x,y,z)") + print(f" ee_rot_aa : {all_ee_aa.shape} (axis-angle, radians)") + + env.close() + + +if __name__ == "__main__": + main() + simulation_app.close() diff --git a/scripts_v2/tools/sim2real/eval_robustness.py b/scripts_v2/tools/sim2real/eval_robustness.py new file mode 100644 index 00000000..4eec03da --- /dev/null +++ b/scripts_v2/tools/sim2real/eval_robustness.py @@ -0,0 +1,176 @@ +# Copyright (c) 2024-2026, The UW Lab Project Developers. (https://github.com/uw-lab/UWLab/blob/main/CONTRIBUTORS.md). +# All Rights Reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Evaluate checkpoint robustness under action noise for distillation selection. + +All policies run simultaneously in the same env, each controlling a disjoint +slice of environments. This ensures identical resets/randomization and makes +results independent of checkpoint ordering. +""" + +"""Launch Isaac Sim Simulator first.""" + +import argparse +import os +import sys + +sys.path.append( + os.path.join(os.path.dirname(__file__), "..", "..", "..", "scripts", "reinforcement_learning", "rsl_rl") +) + +from isaaclab.app import AppLauncher + +import cli_args # isort: skip + +parser = argparse.ArgumentParser(description="Evaluate checkpoints for distillation robustness.") +parser.add_argument("--task", type=str, required=True, help="Name of the task.") +parser.add_argument("--num_envs", type=int, default=None, help="Number of environments to simulate (split evenly).") +parser.add_argument("--checkpoints", nargs="+", required=True, help="List of checkpoint paths to evaluate.") +parser.add_argument("--eval_steps", type=int, default=1000, help="Number of env steps to run.") +parser.add_argument("--action_noise", type=float, default=2.0, help="Std of Gaussian noise added to actions.") +parser.add_argument( + "--agent", type=str, default="rsl_rl_cfg_entry_point", help="Name of the RL agent configuration entry point." +) +parser.add_argument("--seed", type=int, default=None, help="Seed used for the environment") +parser.add_argument( + "--disable_fabric", action="store_true", default=False, help="Disable fabric and use USD I/O operations." +) +cli_args.add_rsl_rl_args(parser) +AppLauncher.add_app_launcher_args(parser) +args_cli, hydra_args = parser.parse_known_args() + +sys.argv = [sys.argv[0]] + hydra_args + +app_launcher = AppLauncher(args_cli) +simulation_app = app_launcher.app + +"""Rest everything follows.""" + +import gymnasium as gym +import torch + +import isaaclab_tasks # noqa: F401 +from isaaclab.envs import ( + DirectMARLEnv, + DirectMARLEnvCfg, + DirectRLEnvCfg, + ManagerBasedRLEnvCfg, + multi_agent_to_single_agent, +) +from isaaclab.managers import TerminationTermCfg as DoneTerm +from isaaclab.utils.assets import retrieve_file_path +from isaaclab_rl.rsl_rl import RslRlBaseRunnerCfg, RslRlVecEnvWrapper +from rsl_rl.runners import DistillationRunner, OnPolicyRunner + +import uwlab_tasks # noqa: F401 +from uwlab_tasks.manager_based.manipulation.omnireset import mdp as task_mdp +from uwlab_tasks.utils.hydra import hydra_task_config + + +@hydra_task_config(args_cli.task, args_cli.agent) +def main(env_cfg: ManagerBasedRLEnvCfg | DirectRLEnvCfg | DirectMARLEnvCfg, agent_cfg: RslRlBaseRunnerCfg): + """Evaluate checkpoints under action noise and rank by success throughput.""" + agent_cfg: RslRlBaseRunnerCfg = cli_args.update_rsl_rl_cfg(agent_cfg, args_cli) + agent_cfg = cli_args.sanitize_rsl_rl_cfg(agent_cfg) + env_cfg.scene.num_envs = args_cli.num_envs if args_cli.num_envs is not None else env_cfg.scene.num_envs + env_cfg.seed = agent_cfg.seed + env_cfg.sim.device = args_cli.device if args_cli.device is not None else env_cfg.sim.device + + num_policies = len(args_cli.checkpoints) + num_envs = env_cfg.scene.num_envs + envs_per_policy = num_envs // num_policies + slices = [] + for i in range(num_policies): + start = i * envs_per_policy + end = (i + 1) * envs_per_policy if i < num_policies - 1 else num_envs + slices.append((start, end)) + + env_cfg.terminations.success = DoneTerm( + func=task_mdp.consecutive_success_state_with_min_length, + params={"num_consecutive_successes": 5, "min_episode_length": 10}, + ) + + env = gym.make(args_cli.task, cfg=env_cfg) + if isinstance(env.unwrapped, DirectMARLEnv): + env = multi_agent_to_single_agent(env) + env = RslRlVecEnvWrapper(env, clip_actions=agent_cfg.clip_actions) + + term_names = env.unwrapped.termination_manager._term_names + assert "success" in term_names, f"'success' not in termination terms: {term_names}" + success_idx = term_names.index("success") + + policies = [] + policy_nns = [] + for ckpt_path in args_cli.checkpoints: + resume_path = retrieve_file_path(ckpt_path) + if agent_cfg.class_name == "OnPolicyRunner": + runner = OnPolicyRunner(env, agent_cfg.to_dict(), log_dir=None, device=agent_cfg.device) + elif agent_cfg.class_name == "DistillationRunner": + runner = DistillationRunner(env, agent_cfg.to_dict(), log_dir=None, device=agent_cfg.device) + else: + raise ValueError(f"Unsupported runner class: {agent_cfg.class_name}") + runner.load(resume_path) + policies.append(runner.get_inference_policy(device=env.unwrapped.device)) + try: + policy_nns.append(runner.alg.policy) + except AttributeError: + policy_nns.append(runner.alg.actor_critic) + + print(f"\n{'=' * 60}") + print(f"Running {num_policies} policies across {num_envs} envs") + print(f"Action noise std: {args_cli.action_noise}") + print(f"Eval steps: {args_cli.eval_steps}") + for i, ckpt in enumerate(args_cli.checkpoints): + s, e = slices[i] + print(f" Policy {i}: envs [{s}:{e}] ({e - s} envs) <- {os.path.basename(ckpt)}") + print(f"{'=' * 60}") + + total_successes = [0] * num_policies + total_episodes = [0] * num_policies + + obs = env.get_observations() + + for step in range(args_cli.eval_steps): + with torch.inference_mode(): + action_slices = [] + for i, policy in enumerate(policies): + s, e = slices[i] + action_slices.append(policy(obs)[s:e]) + actions = torch.cat(action_slices, dim=0) + actions = actions + args_cli.action_noise * torch.randn_like(actions) + obs, _, dones, extras = env.step(actions) + for pnn in policy_nns: + pnn.reset(dones) + + if dones.any(): + reset_ids = (dones > 0).nonzero(as_tuple=False).reshape(-1) + term_dones = env.unwrapped.termination_manager._term_dones[reset_ids] + + for env_id, term_row in zip(reset_ids, term_dones): + eid = env_id.item() + pidx = next(i for i, (s, e) in enumerate(slices) if s <= eid < e) + total_episodes[pidx] += 1 + active = term_row.nonzero(as_tuple=False).flatten().cpu().tolist() + if success_idx in active: + total_successes[pidx] += 1 + + print(f"\n{'=' * 60}") + print(f"RANKING BY THROUGHPUT (action_noise={args_cli.action_noise}, steps={args_cli.eval_steps})") + print(f"{'=' * 60}") + ranking = [] + for i, ckpt in enumerate(args_cli.checkpoints): + rate = total_successes[i] / total_episodes[i] if total_episodes[i] > 0 else 0.0 + ranking.append((ckpt, total_successes[i], total_episodes[i], rate)) + ranking.sort(key=lambda x: x[1], reverse=True) + for rank, (ckpt, succ, eps, rate) in enumerate(ranking, 1): + print(f" #{rank}: {os.path.basename(ckpt)}") + print(f" successes={succ} episodes={eps} rate={rate:.1%}") + + env.close() + + +if __name__ == "__main__": + main() + simulation_app.close() diff --git a/scripts_v2/tools/sim2real/plot_sysid_fit.py b/scripts_v2/tools/sim2real/plot_sysid_fit.py new file mode 100644 index 00000000..8605fc8b --- /dev/null +++ b/scripts_v2/tools/sim2real/plot_sysid_fit.py @@ -0,0 +1,361 @@ +# Copyright (c) 2024-2026, The UW Lab Project Developers. (https://github.com/uw-lab/UWLab/blob/main/CONTRIBUTORS.md). +# All Rights Reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +""" +Visualize system identification fit by replaying real waypoints through +the manager-based Sysid env (same RelCartesianOSCAction as sysid_ur5e_osc.py). + +Loads a CMA-ES checkpoint, applies best params to the sim, runs closed-loop +replay, and plots sim vs. real joint trajectories. + +Usage: + python scripts_v2/tools/sim2real/plot_sysid_fit.py --headless \ + --checkpoint logs/sysid/YYYYMMDD_HHMMSS/checkpoint_0200.pt \ + --real_data sysid_data_real.pt +""" + +import argparse +import matplotlib +import numpy as np +import os +import torch + +matplotlib.use("Agg") +import gymnasium as gym +import matplotlib.pyplot as plt + +from isaaclab.app import AppLauncher + +parser = argparse.ArgumentParser(description="Plot sysid fit") +parser.add_argument("--checkpoint", type=str, required=True, help="Path to sysid checkpoint .pt") +parser.add_argument("--real_data", type=str, required=True, help="Path to real data .pt") +parser.add_argument("--max_steps", type=int, default=None) + +AppLauncher.add_app_launcher_args(parser) +args_cli = parser.parse_args() + +app_launcher = AppLauncher(args_cli) +simulation_app = app_launcher.app + +from isaaclab.actuators import DelayedPDActuatorCfg +from isaaclab.assets import Articulation +from isaaclab.utils.math import subtract_frame_transforms + +from uwlab_assets.robots.ur5e_robotiq_gripper.kinematics import ARM_JOINT_NAMES, EE_BODY_NAME, NUM_ARM_JOINTS + +import uwlab_tasks # noqa: F401 # register gym envs +from uwlab_tasks.manager_based.manipulation.omnireset.config.ur5e_robotiq_2f85.sysid_cfg import SysidEnvCfg +from uwlab_tasks.manager_based.manipulation.omnireset.mdp.utils import settle_robot, target_pose_to_action + +# ============================================================================ +# Parameter application (same as sysid_ur5e_osc.py) +# ============================================================================ + + +def apply_params(robot, params, arm_joint_ids, num_joints, device): + """Apply 25-element param vector (single env) to robot.""" + N = 1 + env_ids = torch.arange(N, device=device) + p = torch.tensor(params, device=device, dtype=torch.float32).unsqueeze(0) + + armature_full = torch.zeros(N, num_joints, device=device) + static_friction_full = torch.zeros(N, num_joints, device=device) + dynamic_friction_full = torch.zeros(N, num_joints, device=device) + viscous_friction_full = torch.zeros(N, num_joints, device=device) + armature_full[:, arm_joint_ids] = p[:, 0:6] + static_fric = p[:, 6:12] + dynamic_ratio = p[:, 12:18] + static_friction_full[:, arm_joint_ids] = static_fric + dynamic_friction_full[:, arm_joint_ids] = dynamic_ratio * static_fric + viscous_friction_full[:, arm_joint_ids] = p[:, 18:24] + robot.write_joint_armature_to_sim(armature_full, env_ids=env_ids) + robot.write_joint_friction_coefficient_to_sim( + static_friction_full, + joint_dynamic_friction_coeff=dynamic_friction_full, + joint_viscous_friction_coeff=viscous_friction_full, + env_ids=env_ids, + ) + + delay_int = int(round(float(p[0, 24]))) + arm_actuator = robot.actuators["arm"] + delay_tensor = torch.tensor([delay_int], device=device, dtype=torch.int) + arm_actuator.positions_delay_buffer.set_time_lag(delay_tensor) + arm_actuator.velocities_delay_buffer.set_time_lag(delay_tensor) + arm_actuator.efforts_delay_buffer.set_time_lag(delay_tensor) + + +# ============================================================================ +# Closed-loop replay +# ============================================================================ + + +def closed_loop_replay( + env, + wp_step_indices, + wp_target_pos, + wp_target_quat, + initial_joint_pos, + arm_joint_ids, + ee_frame_idx, + sim_dt, + T_steps, + headless=True, +): + """Run closed-loop replay using env's RelCartesianOSC. Returns sim trajectory dict.""" + unwrapped = env.unwrapped + robot = unwrapped.scene["robot"] + sim = unwrapped.sim + device = unwrapped.device + action_dim = unwrapped.action_manager.total_action_dim + W = wp_step_indices.shape[0] + + default_joint_pos = robot.data.default_joint_pos.clone() + default_joint_vel = robot.data.default_joint_vel.clone() + default_joint_pos[:, arm_joint_ids] = initial_joint_pos.unsqueeze(0) + default_joint_vel[:] = 0.0 + env.reset() + settle_robot(robot, sim, default_joint_pos, default_joint_vel, arm_joint_ids, sim_dt, headless=headless) + + sim_positions, sim_velocities, sim_ee_positions = [], [], [] + wp_idx = 0 + + for t in range(T_steps): + while wp_idx + 1 < W and t >= wp_step_indices[wp_idx + 1]: + wp_idx += 1 + + ee_pos_w = robot.data.body_pos_w[:, ee_frame_idx] + ee_quat_w = robot.data.body_quat_w[:, ee_frame_idx] + ee_pos_b, ee_quat_b = subtract_frame_transforms( + robot.data.root_pos_w, robot.data.root_quat_w, ee_pos_w, ee_quat_w + ) + target_pos = wp_target_pos[wp_idx].unsqueeze(0) + target_quat = wp_target_quat[wp_idx].unsqueeze(0) + + action_arm = target_pose_to_action(ee_pos_b, ee_quat_b, target_pos, target_quat) + action = torch.cat([action_arm, torch.zeros(1, action_dim - 6, device=device)], dim=-1) + env.step(action) + + joint_pos = robot.data.joint_pos[:, arm_joint_ids] + joint_vel = robot.data.joint_vel[:, arm_joint_ids] + sim_positions.append(joint_pos[0].cpu().numpy().copy()) + sim_velocities.append(joint_vel[0].cpu().numpy().copy()) + sim_ee_positions.append(ee_pos_b[0].cpu().numpy().copy()) + + if (t + 1) % max(1, T_steps // 20) == 0: + print(f" step {t+1}/{T_steps} ({100*(t+1)/T_steps:.0f}%)") + + return { + "joint_positions": np.array(sim_positions), + "joint_velocities": np.array(sim_velocities), + "ee_positions": np.array(sim_ee_positions), + } + + +# ============================================================================ +# Plotting +# ============================================================================ + +JOINT_NAMES_SHORT = ["Shoulder Pan", "Shoulder Lift", "Elbow", "Wrist 1", "Wrist 2", "Wrist 3"] + + +def plot_overlay(real_joints, sim_joints, dt, save_path="sysid_fit.png"): + """Plot sim vs real joint positions.""" + T = real_joints.shape[0] + time_axis = np.arange(T) * dt + + fig, axes = plt.subplots(3, 2, figsize=(16, 10), sharex=True) + axes = axes.flatten() + + for j in range(NUM_ARM_JOINTS): + ax = axes[j] + ax.plot(time_axis, np.degrees(real_joints[:, j]), "b-", linewidth=1.0, label="Real", alpha=0.8) + ax.plot(time_axis, np.degrees(sim_joints[:, j]), "r-", linewidth=1.0, label="Sim", alpha=0.8) + ax.set_title(JOINT_NAMES_SHORT[j], fontsize=11) + ax.set_ylabel("deg") + ax.legend(loc="upper right", fontsize=8) + ax.grid(True, alpha=0.3) + + axes[-2].set_xlabel("Time (s)") + axes[-1].set_xlabel("Time (s)") + fig.suptitle("Sysid Fit: Sim vs Real Joint Trajectories", fontsize=13) + fig.tight_layout() + fig.savefig(save_path, dpi=150) + print(f"Saved overlay plot: {save_path}") + plt.close(fig) + + +def plot_error(real_joints, sim_joints, dt, save_path="sysid_fit_error.png"): + """Plot per-joint error over time.""" + T = real_joints.shape[0] + time_axis = np.arange(T) * dt + error_deg = np.degrees(sim_joints - real_joints) + + fig, axes = plt.subplots(3, 2, figsize=(16, 10), sharex=True) + axes = axes.flatten() + + for j in range(NUM_ARM_JOINTS): + ax = axes[j] + ax.plot(time_axis, error_deg[:, j], "k-", linewidth=0.8, alpha=0.7) + ax.axhline(y=0, color="gray", linestyle="--", alpha=0.5) + rmse_j = np.sqrt(np.mean(error_deg[:, j] ** 2)) + ax.set_title(f"{JOINT_NAMES_SHORT[j]} (RMSE={rmse_j:.2f}°)", fontsize=11) + ax.set_ylabel("Error (deg)") + ax.grid(True, alpha=0.3) + + axes[-2].set_xlabel("Time (s)") + axes[-1].set_xlabel("Time (s)") + fig.suptitle("Sysid Fit: Per-Joint Error", fontsize=13) + fig.tight_layout() + fig.savefig(save_path, dpi=150) + print(f"Saved error plot: {save_path}") + plt.close(fig) + + +# ============================================================================ +# Main +# ============================================================================ + + +def main(): + args = args_cli + device_str = args.device + + # Load checkpoint + print(f"\nLoading checkpoint: {args.checkpoint}") + ckpt = torch.load(args.checkpoint, map_location="cpu", weights_only=False) + best_params = ckpt["best_params"] + best_score = ckpt["best_score"] + ckpt_args = ckpt.get("args", {}) + print(f" Score (MSE): {best_score:.6f} RMSE: {np.degrees(np.sqrt(best_score)):.4f}°") + print(f" Checkpoint args: sim_dt={ckpt_args.get('sim_dt', 'N/A')}") + + # Print best params + arm = best_params[:6] + sfric = best_params[6:12] + dratio = best_params[12:18] + vfric = best_params[18:24] + delay = round(float(best_params[24])) + print(f"\n {'Joint':<20s} {'Armature':>10s} {'SFric':>10s} {'DRatio':>10s} {'VFric':>10s}") + for i, name in enumerate(JOINT_NAMES_SHORT): + print(f" {name:<20s} {arm[i]:10.4f} {sfric[i]:10.4f} {dratio[i]:10.4f} {vfric[i]:10.4f}") + print(f" Motor delay: {delay} steps") + + # Load real data + print(f"\nLoading real data: {args.real_data}") + real_data = torch.load(args.real_data, map_location="cpu", weights_only=False) + real_joint_pos = real_data["joint_positions"] + initial_joint_pos = real_data["initial_joint_pos"] + wp_step_indices = real_data["waypoint_step_indices"] + wp_target_pos = real_data["waypoint_target_pos"] + wp_target_quat = real_data["waypoint_target_quat"] + dt = real_data["dt"] + + T_steps = real_joint_pos.shape[0] + if args.max_steps is not None: + T_steps = min(T_steps, args.max_steps) + + print(f" {T_steps} steps ({T_steps*dt:.2f}s), dt={dt*1000:.1f}ms") + + # Move to GPU + real_joint_pos_np = real_joint_pos[:T_steps].numpy() + initial_joint_pos_dev = initial_joint_pos.to(device_str).float() + wp_step_indices = wp_step_indices.to(device_str).long() + wp_target_pos = wp_target_pos.to(device_str).float() + wp_target_quat = wp_target_quat.to(device_str).float() + + # Create env (same as sysid_ur5e_osc.py) + env_cfg = SysidEnvCfg() + env_cfg.scene.num_envs = 1 + env_cfg.scene.env_spacing = 2.0 + delay_max = max(delay, 5) + _effort_lim = { + "shoulder_pan_joint": 150.0, + "shoulder_lift_joint": 150.0, + "elbow_joint": 150.0, + "wrist_1_joint": 28.0, + "wrist_2_joint": 28.0, + "wrist_3_joint": 28.0, + } + _vel_lim = { + "shoulder_pan_joint": 1.5708, + "shoulder_lift_joint": 1.5708, + "elbow_joint": 1.5708, + "wrist_1_joint": 3.1415, + "wrist_2_joint": 3.1415, + "wrist_3_joint": 3.1415, + } + env_cfg.scene.robot.actuators["arm"] = DelayedPDActuatorCfg( + joint_names_expr=["shoulder.*", "elbow.*", "wrist.*"], + stiffness=0.0, + damping=0.0, + effort_limit=_effort_lim, + velocity_limit=_vel_lim, + min_delay=0, + max_delay=delay_max, + ) + env = gym.make("OmniReset-Ur5eRobotiq2f85-Sysid-v0", cfg=env_cfg) + env.reset() + + unwrapped = env.unwrapped + robot: Articulation = unwrapped.scene["robot"] + device = unwrapped.device + arm_joint_ids = robot.find_joints(ARM_JOINT_NAMES)[0] + ee_frame_idx = robot.find_bodies(EE_BODY_NAME)[0][0] + num_joints = robot.num_joints + sim_dt = env_cfg.sim.dt + + # Apply best params + print(f"\nApplying best params (delay={delay})...") + apply_params(robot, best_params, arm_joint_ids, num_joints, device) + + # Run closed-loop replay + print(f"\nRunning closed-loop replay ({T_steps} steps)...") + result = closed_loop_replay( + env, + wp_step_indices, + wp_target_pos, + wp_target_quat, + initial_joint_pos_dev, + arm_joint_ids, + ee_frame_idx, + sim_dt, + T_steps, + headless=args_cli.headless, + ) + + sim_joints = result["joint_positions"] + real_joints = real_joint_pos_np + + # Compute per-joint RMSE + error_deg = np.degrees(sim_joints - real_joints) + print(f"\n{'='*60}") + print("Per-joint RMSE (deg)") + print("=" * 60) + for j in range(NUM_ARM_JOINTS): + rmse_j = np.sqrt(np.mean(error_deg[:, j] ** 2)) + print(f" {JOINT_NAMES_SHORT[j]:<16s}: {rmse_j:.4f}") + + rmse_total = np.sqrt(np.mean(error_deg**2)) + mae_total = np.mean(np.abs(error_deg)) + max_total = np.max(np.abs(error_deg)) + print(f" TOTAL : RMSE={rmse_total:.4f} MAE={mae_total:.4f} Max={max_total:.4f}") + + # Sysid-equivalent score for comparison with checkpoint + error_rad = sim_joints - real_joints + sysid_score = np.mean(np.sum(error_rad**2, axis=1)) + sysid_rmse_deg = np.degrees(np.sqrt(sysid_score)) + print(f"\n Sysid-equivalent metric: score={sysid_score:.6f} RMSE={sysid_rmse_deg:.4f}°") + print(f" Checkpoint metric: score={best_score:.6f} RMSE={np.degrees(np.sqrt(best_score)):.4f}°") + print("=" * 60) + + # Plot + out_dir = os.path.dirname(args.checkpoint) if os.path.dirname(args.checkpoint) else "." + plot_overlay(real_joints, sim_joints, dt, save_path=os.path.join(out_dir, "sysid_fit.png")) + plot_error(real_joints, sim_joints, dt, save_path=os.path.join(out_dir, "sysid_fit_error.png")) + + +if __name__ == "__main__": + main() + simulation_app.close() diff --git a/scripts_v2/tools/sim2real/sysid_ur5e_osc.py b/scripts_v2/tools/sim2real/sysid_ur5e_osc.py new file mode 100644 index 00000000..a6e6813f --- /dev/null +++ b/scripts_v2/tools/sim2real/sysid_ur5e_osc.py @@ -0,0 +1,367 @@ +# Copyright (c) 2024-2026, The UW Lab Project Developers. (https://github.com/uw-lab/UWLab/blob/main/CONTRIBUTORS.md). +# All Rights Reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +""" +System Identification for UR5e using CMA-ES (Closed-Loop Replay). + +Uses the manager-based env (OmniReset-Ur5eRobotiq2f85-Sysid-v0) so the same +RelCartesianOSCAction as RL is used — no duplicate OSC. PACE-style integration. + +Parameters (25 total): armature*6, static_friction*6, dynamic_ratio*6, + viscous_friction*6, motor_delay*1. + +Usage: + python scripts_v2/tools/sim2real/sysid_ur5e_osc.py --headless --num_envs 512 \ + --real_data sysid_data_real.pt --max_iter 200 +""" + +import argparse +import gymnasium as gym +import numpy as np +import os +import time +import torch + +from isaaclab.app import AppLauncher + +parser = argparse.ArgumentParser(description="UR5e System Identification via CMA-ES") +parser.add_argument("--num_envs", type=int, default=512) +parser.add_argument("--real_data", type=str, required=True) +parser.add_argument("--max_iter", type=int, default=200) +parser.add_argument("--sigma", type=float, default=0.3) +parser.add_argument("--output_dir", type=str, default="logs/sysid") +parser.add_argument("--save_interval", type=int, default=5) +parser.add_argument("--max_steps", type=int, default=None) +# Parameter bounds +parser.add_argument("--armature_min", type=float, default=0.0) +parser.add_argument("--armature_max", type=float, default=10.0) +parser.add_argument("--friction_min", type=float, default=0.0) +parser.add_argument("--friction_max", type=float, default=20.0) +parser.add_argument("--viscous_friction_min", type=float, default=0.0) +parser.add_argument("--viscous_friction_max", type=float, default=20.0) +parser.add_argument( + "--delay_max", type=int, default=5, help="Max motor delay in physics steps. CMA-ES searches [0, delay_max]." +) + +AppLauncher.add_app_launcher_args(parser) +args_cli = parser.parse_args() + +app_launcher = AppLauncher(args_cli) +simulation_app = app_launcher.app + +from isaaclab.actuators import DelayedPDActuatorCfg +from isaaclab.assets import Articulation +from isaaclab.utils.math import subtract_frame_transforms + +from uwlab_assets.robots.ur5e_robotiq_gripper.kinematics import ARM_JOINT_NAMES, EE_BODY_NAME, NUM_ARM_JOINTS + +import uwlab_tasks # noqa: F401 # register gym envs +from uwlab_tasks.manager_based.manipulation.omnireset.config.ur5e_robotiq_2f85.sysid_cfg import SysidEnvCfg +from uwlab_tasks.manager_based.manipulation.omnireset.mdp.utils import settle_robot, target_pose_to_action + +# ============================================================================ +# CMA-ES Optimizer +# ============================================================================ + + +class CMAES: + """Lightweight CMA-ES wrapper using the cmaes library.""" + + def __init__(self, num_params, population_size, sigma=0.3, bounds=None): + from cmaes import CMA + + self.num_params = num_params + self.population_size = population_size + self.bounds = np.array(bounds) + self.optimizer = CMA( + mean=np.full(num_params, 0.5), + sigma=sigma, + population_size=population_size, + bounds=np.column_stack([np.zeros(num_params), np.ones(num_params)]), + ) + self._solutions = None + + def ask(self) -> np.ndarray: + self._solutions = [] + for _ in range(self.population_size): + self._solutions.append(self.optimizer.ask()) + normalized = np.array(self._solutions) + return self.bounds[:, 0] + normalized * (self.bounds[:, 1] - self.bounds[:, 0]) + + def tell(self, scores: np.ndarray): + self.optimizer.tell(list(zip(self._solutions, scores.tolist()))) + + @property + def best_params(self) -> np.ndarray: + mean_normalized = self.optimizer._mean + return self.bounds[:, 0] + mean_normalized * (self.bounds[:, 1] - self.bounds[:, 0]) + + +# ============================================================================ +# Parameter Mapping +# ============================================================================ + + +def build_bounds(args): + """25 params: [armature*6, static_friction*6, dynamic_ratio*6, viscous_friction*6, delay*1].""" + bounds = [] + for _ in range(NUM_ARM_JOINTS): + bounds.append([args.armature_min, args.armature_max]) + for _ in range(NUM_ARM_JOINTS): + bounds.append([args.friction_min, args.friction_max]) + for _ in range(NUM_ARM_JOINTS): + bounds.append([0.0, 1.0]) # dynamic_ratio + for _ in range(NUM_ARM_JOINTS): + bounds.append([args.viscous_friction_min, args.viscous_friction_max]) + bounds.append([0.0, float(args.delay_max)]) # motor_delay + return bounds + + +def apply_params_to_envs(robot, params_tensor, arm_joint_ids, num_joints, device): + """Apply 25 params to all envs (joint dynamics + per-env motor delay).""" + N = params_tensor.shape[0] + env_ids = torch.arange(N, device=device) + + armature_full = torch.zeros(N, num_joints, device=device) + static_friction_full = torch.zeros(N, num_joints, device=device) + dynamic_friction_full = torch.zeros(N, num_joints, device=device) + viscous_friction_full = torch.zeros(N, num_joints, device=device) + armature_full[:, arm_joint_ids] = params_tensor[:, 0:6] + static_fric = params_tensor[:, 6:12] + dynamic_ratio = params_tensor[:, 12:18] + static_friction_full[:, arm_joint_ids] = static_fric + dynamic_friction_full[:, arm_joint_ids] = dynamic_ratio * static_fric + viscous_friction_full[:, arm_joint_ids] = params_tensor[:, 18:24] + robot.write_joint_armature_to_sim(armature_full, env_ids=env_ids) + robot.write_joint_friction_coefficient_to_sim( + static_friction_full, + joint_dynamic_friction_coeff=dynamic_friction_full, + joint_viscous_friction_coeff=viscous_friction_full, + env_ids=env_ids, + ) + + # Motor delay: continuous -> round to int, set per-env on actuator buffers + delay_int = torch.round(params_tensor[:, 24]).clamp(min=0).to(torch.int) + arm_actuator = robot.actuators["arm"] + arm_actuator.positions_delay_buffer.set_time_lag(delay_int) + arm_actuator.velocities_delay_buffer.set_time_lag(delay_int) + arm_actuator.efforts_delay_buffer.set_time_lag(delay_int) + + +# ============================================================================ +# Main +# ============================================================================ + + +def main(): + args = args_cli + device_str = args.device + N = args.num_envs + num_params = NUM_ARM_JOINTS * 4 + 1 # 25 + + print("\n" + "=" * 60) + print("UR5e System Identification - CMA-ES (Closed-Loop Replay)") + print("=" * 60) + print(f"Envs: {N} Params: {num_params} Iters: {args.max_iter} Sigma: {args.sigma}") + print("Controller: env's RelCartesianOSC (same as RL)") + print(f"Motor delay: optimized [0, {args.delay_max}] steps") + + # Load real data + print(f"\nLoading: {args.real_data}") + real_data = torch.load(args.real_data, map_location="cpu", weights_only=False) + real_joint_pos = real_data["joint_positions"] + initial_joint_pos = real_data["initial_joint_pos"] + wp_step_indices = real_data["waypoint_step_indices"] + wp_target_pos = real_data["waypoint_target_pos"] + wp_target_quat = real_data["waypoint_target_quat"] + dt = real_data["dt"] + + T_steps = real_joint_pos.shape[0] + if args.max_steps is not None: + T_steps = min(T_steps, args.max_steps) + W = wp_step_indices.shape[0] + + print(f" {T_steps} steps ({T_steps*dt:.2f}s), {W} waypoints, dt={dt*1000:.1f}ms") + + # Move to GPU + real_joint_pos = real_joint_pos[:T_steps].to(device_str).float() + initial_joint_pos_dev = initial_joint_pos.to(device_str).float() + wp_step_indices = wp_step_indices.to(device_str).long() + wp_target_pos = wp_target_pos.to(device_str).float() + wp_target_quat = wp_target_quat.to(device_str).float() + + # Manager-based env (same RelCartesianOSC as RL) + env_cfg = SysidEnvCfg() + env_cfg.scene.num_envs = N + env_cfg.scene.env_spacing = 2.0 + _effort_lim = { + "shoulder_pan_joint": 150.0, + "shoulder_lift_joint": 150.0, + "elbow_joint": 150.0, + "wrist_1_joint": 28.0, + "wrist_2_joint": 28.0, + "wrist_3_joint": 28.0, + } + _vel_lim = { + "shoulder_pan_joint": 1.5708, + "shoulder_lift_joint": 1.5708, + "elbow_joint": 1.5708, + "wrist_1_joint": 3.1415, + "wrist_2_joint": 3.1415, + "wrist_3_joint": 3.1415, + } + env_cfg.scene.robot.actuators["arm"] = DelayedPDActuatorCfg( + joint_names_expr=["shoulder.*", "elbow.*", "wrist.*"], + stiffness=0.0, + damping=0.0, + effort_limit=_effort_lim, + velocity_limit=_vel_lim, + min_delay=0, + max_delay=args.delay_max, + ) + env = gym.make("OmniReset-Ur5eRobotiq2f85-Sysid-v0", cfg=env_cfg) + env.reset() + + unwrapped = env.unwrapped + robot: Articulation = unwrapped.scene["robot"] + sim = unwrapped.sim + device = unwrapped.device + arm_joint_ids = robot.find_joints(ARM_JOINT_NAMES)[0] + ee_frame_idx = robot.find_bodies(EE_BODY_NAME)[0][0] + num_joints = robot.num_joints + sim_dt = env_cfg.sim.dt + action_dim = unwrapped.action_manager.total_action_dim # 7 (arm 6 + gripper 1) + + default_joint_pos = robot.data.default_joint_pos.clone() + default_joint_vel = robot.data.default_joint_vel.clone() + default_joint_pos[:, arm_joint_ids] = initial_joint_pos_dev.unsqueeze(0).expand(N, -1) + default_joint_vel[:] = 0.0 + + bounds = build_bounds(args) + cmaes = CMAES(num_params=num_params, population_size=N, sigma=args.sigma, bounds=bounds) + + print( + f"\nBounds: armature[{args.armature_min},{args.armature_max}] " + f"friction[{args.friction_min},{args.friction_max}] " + f"dyn_ratio[0,1] viscous[{args.viscous_friction_min},{args.viscous_friction_max}] " + f"delay[0,{args.delay_max}]" + ) + + timestamp = time.strftime("%Y%m%d_%H%M%S") + output_dir = os.path.join(args.output_dir, timestamp) + os.makedirs(output_dir, exist_ok=True) + print(f"Output: {output_dir}\n") + + best_score_ever = float("inf") + best_params_ever = None + history = [] + + for iteration in range(args.max_iter): + iter_start = time.time() + + params_np = cmaes.ask() + params_tensor = torch.tensor(params_np, device=device, dtype=torch.float32) + apply_params_to_envs(robot, params_tensor, arm_joint_ids, num_joints, device) + + env.reset() + settle_robot(robot, sim, default_joint_pos, default_joint_vel, arm_joint_ids, sim_dt, headless=True) + + scores = torch.zeros(N, device=device) + wp_idx = 0 + + for t in range(T_steps): + while wp_idx + 1 < W and t >= wp_step_indices[wp_idx + 1]: + wp_idx += 1 + + ee_pos_w = robot.data.body_pos_w[:, ee_frame_idx] + ee_quat_w = robot.data.body_quat_w[:, ee_frame_idx] + ee_pos_b, ee_quat_b = subtract_frame_transforms( + robot.data.root_pos_w, robot.data.root_quat_w, ee_pos_w, ee_quat_w + ) + target_pos = wp_target_pos[wp_idx].unsqueeze(0).expand(N, -1) + target_quat = wp_target_quat[wp_idx].unsqueeze(0).expand(N, -1) + + action_arm = target_pose_to_action(ee_pos_b, ee_quat_b, target_pos, target_quat) + action = torch.cat([action_arm, torch.zeros(N, action_dim - 6, device=device)], dim=-1) + env.step(action) + + joint_pos = robot.data.joint_pos[:, arm_joint_ids] + scores += torch.sum((joint_pos - real_joint_pos[t].unsqueeze(0)) ** 2, dim=1) + + scores = scores / T_steps + scores_np = scores.cpu().numpy() + cmaes.tell(scores_np) + + min_score = scores_np.min() + mean_score = scores_np.mean() + iter_time = time.time() - iter_start + + if min_score < best_score_ever: + best_score_ever = min_score + best_params_ever = params_np[scores_np.argmin()] + + history.append( + {"iteration": iteration, "min": float(min_score), "mean": float(mean_score), "best": float(best_score_ever)} + ) + + best_delay = round(float(best_params_ever[24])) + rmse_deg = np.degrees(np.sqrt(best_score_ever)) + print( + f"[{iteration+1:3d}/{args.max_iter}] " + f"min={min_score:.6f} mean={mean_score:.6f} best={best_score_ever:.6f} " + f"({rmse_deg:.3f}\u00b0 delay={best_delay}) {iter_time:.1f}s" + ) + + if (iteration + 1) % args.save_interval == 0: + ckpt = { + "best_params": best_params_ever, + "best_score": best_score_ever, + "iteration": iteration + 1, + "history": history, + "bounds": bounds, + "args": vars(args), + } + ckpt_path = os.path.join(output_dir, f"checkpoint_{iteration+1:04d}.pt") + torch.save(ckpt, ckpt_path) + print(f" -> {ckpt_path}") + + # Final results + print(f"\n{'='*60}") + print(f"DONE RMSE: {np.degrees(np.sqrt(best_score_ever)):.4f}\u00b0") + print(f"{'='*60}") + + arm = best_params_ever[:6] + sfric = best_params_ever[6:12] + dratio = best_params_ever[12:18] + dfric = dratio * sfric + vfric = best_params_ever[18:24] + delay = round(float(best_params_ever[24])) + + print(f"\n {'Joint':<25s} {'Arm':>8s} {'SFric':>8s} {'DRat':>8s} {'DFric':>8s} {'VFric':>8s}") + for i, name in enumerate(ARM_JOINT_NAMES): + print(f" {name:<25s} {arm[i]:8.4f} {sfric[i]:8.4f} {dratio[i]:8.4f} {dfric[i]:8.4f} {vfric[i]:8.4f}") + print(f"\n Motor delay: {delay} steps ({delay*sim_dt*1000:.0f}ms at {1/sim_dt:.0f}Hz)") + + final = { + "best_params": best_params_ever, + "best_score": best_score_ever, + "best_armature": arm.tolist(), + "best_friction": sfric.tolist(), + "best_dynamic_ratio": dratio.tolist(), + "best_dynamic_friction": dfric.tolist(), + "best_viscous_friction": vfric.tolist(), + "best_delay": delay, + "history": history, + "bounds": bounds, + "args": vars(args), + } + final_path = os.path.join(output_dir, "final_results.pt") + torch.save(final, final_path) + print(f"\nSaved: {final_path}") + + +if __name__ == "__main__": + main() + simulation_app.close() diff --git a/scripts_v2/tools/visualize_reset_states.py b/scripts_v2/tools/visualize_reset_states.py index 599e40e7..db2eb403 100644 --- a/scripts_v2/tools/visualize_reset_states.py +++ b/scripts_v2/tools/visualize_reset_states.py @@ -3,7 +3,7 @@ # # SPDX-License-Identifier: BSD-3-Clause -"""Script to visualize saved states from HDF5 dataset.""" +"""Script to visualize saved reset states from a dataset directory.""" from __future__ import annotations @@ -21,8 +21,14 @@ parser.add_argument( "--dataset_dir", type=str, - default="./reset_state_datasets", - help="Directory containing reset-state datasets saved as .pt", + default="./Datasets/OmniReset", + help="Base dataset directory (contains Resets// subdirectories).", +) +parser.add_argument( + "--reset_type", + type=str, + default=None, + help="Single reset type to visualize (e.g. ObjectAnywhereEEAnywhere). If omitted, all four types are loaded.", ) parser.add_argument("--reset_interval", type=float, default=0.1, help="Time interval between resets in seconds.") @@ -37,11 +43,11 @@ import contextlib import gymnasium as gym +import inspect from isaaclab.envs import ManagerBasedRLEnv -from isaaclab.managers import EventTermCfg as EventTerm +from isaaclab.managers import ManagerTermBase -from uwlab_tasks.manager_based.manipulation.reset_states.mdp import events as task_mdp from uwlab_tasks.utils.hydra import hydra_task_compose torch.backends.cuda.matmul.allow_tf32 = True @@ -59,22 +65,29 @@ def main(env_cfg, agent_cfg) -> None: # make sure environment is non-deterministic for diverse pose discovery env_cfg.seed = None - # Set up the MultiResetManager to load states from the computed dataset - reset_from_reset_states = EventTerm( - func=task_mdp.MultiResetManager, - mode="reset", - params={ - "base_paths": [args_cli.dataset_dir], - "probs": [1.0], - "success": "env.reward_manager.get_term_cfg('progress_context').func.success", - }, - ) - - # Add the reset manager to the environment configuration - env_cfg.events.reset_from_reset_states = reset_from_reset_states + # Override existing MultiResetManager params to use the CLI-specified dataset/types + ALL_RESET_TYPES = [ + "ObjectAnywhereEEAnywhere", + "ObjectRestingEEGrasped", + "ObjectAnywhereEEGrasped", + "ObjectPartiallyAssembledEEGrasped", + ] + reset_types = [args_cli.reset_type] if args_cli.reset_type else ALL_RESET_TYPES + env_cfg.events.reset_from_reset_states.params["dataset_dir"] = args_cli.dataset_dir + env_cfg.events.reset_from_reset_states.params["reset_types"] = reset_types + env_cfg.events.reset_from_reset_states.params["probs"] = [1.0] * len(reset_types) # create environment env = cast(ManagerBasedRLEnv, gym.make(args_cli.task, cfg=env_cfg)).unwrapped + + # The EventManager is created before sim.play(), so ManagerTermBase classes + # are deferred to a timeline callback that can silently fail. Force-init any + # class-based event terms that the callback missed. + for mode_cfgs in env.event_manager._mode_term_cfgs.values(): + for tc in mode_cfgs: + if inspect.isclass(tc.func) and issubclass(tc.func, ManagerTermBase): + tc.func = tc.func(cfg=tc, env=env) + env.reset() # Initialize variables @@ -85,7 +98,7 @@ def main(env_cfg, agent_cfg) -> None: while True: asset = env.unwrapped.scene["robot"] # specific for robotiq - gripper_joint_positions = asset.data.joint_pos[:, asset.find_joints(["right_inner_finger_joint"])[0][0]] + gripper_joint_positions = asset.data.joint_pos[:, asset.find_joints(["finger_joint"])[0][0]] gripper_closed_fraction = ( torch.abs(gripper_joint_positions) / env_cfg.actions.gripper.close_command_expr["finger_joint"] ) diff --git a/source/uwlab/uwlab/terrains/trimesh/mesh_terrains.py b/source/uwlab/uwlab/terrains/trimesh/mesh_terrains.py index cb79fa89..35233102 100644 --- a/source/uwlab/uwlab/terrains/trimesh/mesh_terrains.py +++ b/source/uwlab/uwlab/terrains/trimesh/mesh_terrains.py @@ -23,7 +23,7 @@ from isaaclab.terrains.trimesh.mesh_terrains_cfg import MeshInvertedPyramidStairsTerrainCfg, MeshPyramidStairsTerrainCfg from isaaclab.terrains.trimesh.utils import make_border, make_plane -from uwlab_assets import UWLAB_CLOUD_ASSETS_DIR +from uwlab_assets import UWLAB_CLOUD_ASSETS_DIR, resolve_cloud_path if TYPE_CHECKING: from . import mesh_terrains_cfg @@ -112,10 +112,10 @@ def cached_terrain_gen( height = cfg.height overhang = "overhang_yes" if cfg.include_overhang else "overhang_no" mesh_id = "mesh_0" - root_path = f"{UWLAB_CLOUD_ASSETS_DIR}/dataset/terrains/dataset/generated_terrain/{terrain_type}/shape_8/height_{height}/level_{level}/{overhang}/{mesh_id}" + root_url = f"{UWLAB_CLOUD_ASSETS_DIR}/dataset/terrains/dataset/generated_terrain/{terrain_type}/shape_8/height_{height}/level_{level}/{overhang}/{mesh_id}" - terrain_mesh_path = os.path.join(root_path, "mesh_terrain.obj") - spawnfile_path = os.path.join(root_path, "spawnable_locations.npy") + terrain_mesh_path = resolve_cloud_path(f"{root_url}/mesh_terrain.obj") + spawnfile_path = resolve_cloud_path(f"{root_url}/spawnable_locations.npy") mesh: trimesh.Trimesh = load_mesh(terrain_mesh_path) xy_scale = cfg.size / (mesh.bounds[1] - mesh.bounds[0])[:2] diff --git a/source/uwlab/uwlab/utils/datasets/__init__.py b/source/uwlab/uwlab/utils/datasets/__init__.py new file mode 100644 index 00000000..67b1f7f7 --- /dev/null +++ b/source/uwlab/uwlab/utils/datasets/__init__.py @@ -0,0 +1,9 @@ +# Copyright (c) 2024-2026, The UW Lab Project Developers. (https://github.com/uw-lab/UWLab/blob/main/CONTRIBUTORS.md). +# All Rights Reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from .torch_dataset_file_handler import TorchDatasetFileHandler +from .zarr_dataset_file_handler import ZarrDatasetFileHandler + +__all__ = ["ZarrDatasetFileHandler", "TorchDatasetFileHandler"] diff --git a/source/uwlab/uwlab/utils/datasets/zarr_dataset_file_handler.py b/source/uwlab/uwlab/utils/datasets/zarr_dataset_file_handler.py new file mode 100644 index 00000000..219f4fd5 --- /dev/null +++ b/source/uwlab/uwlab/utils/datasets/zarr_dataset_file_handler.py @@ -0,0 +1,315 @@ +# Copyright (c) 2024-2026, The UW Lab Project Developers. (https://github.com/uw-lab/UWLab/blob/main/CONTRIBUTORS.md). +# All Rights Reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + + +""" +Zarr Dataset File Handler +This module provides a Zarr-based dataset file handler that works with +all manager-based environments in Isaac Lab, compatible with diffusion policy format. +DESIGN OVERVIEW: +================== +The ZarrDatasetFileHandler is designed to automatically extract and record episode data +from Isaac Lab environments to Zarr format using the ReplayBuffer structure from diffusion policy. +It uses a configuration-based approach to determine which observations and actions to record. +The Zarr format expects the dataset to contain: +- data/ (group containing all episode data) + - actions (array with shape [T, action_dim]) + - obs/ (group containing observations) + - observation_key_1 (array with shape [T, ...]) + - observation_key_2 (array with shape [T, ...]) + - ... (each observation stored as separate key) + - rewards (array with shape [T]) + - dones (array with shape [T]) +- meta/ (group containing metadata) + - episode_ends (array with episode end indices) +KEY FEATURES: +============ +1. CONFIGURATION-DRIVEN: + - Uses environment observation and action managers automatically + - Supports both regular observations and state observations +2. AUTOMATIC FEATURE EXTRACTION: + - Analyzes environment's observation and action managers automatically + - Handles nested observation structures with group-based access + - Automatically detects and processes video/image features + - Supports different action term types +3. FLEXIBLE OBSERVATION HANDLING: + - All observations: saved as "obs/{key}" (separate keys) + - Support for observations from different groups (policy, critic, etc.) + - Automatic tensor shape analysis and feature specification +4. UNIVERSAL COMPATIBILITY: + - Works with any manager-based environment + - No hardcoded assumptions about observation or action structure + - Adapts to different environment types automatically +5. EFFICIENT STORAGE: + - Uses Zarr compression for efficient storage + - Supports chunking for large datasets + - Compatible with diffusion policy ReplayBuffer +6. IMAGE SUPPORT: + - Automatically detects image observations (RGB format) + - Saves image observations as zarr arrays with optimized chunking + - Uses blosc compression for fast access during training +USAGE PATTERNS: +============== +1. Basic Usage (Zero Configuration): + ```python + # Automatically records all available observations + handler = ZarrDatasetFileHandler() + handler.create("dataset.zarr") + ``` +2. Image Support: + ```python + # Automatically detects and processes image data + # Handles [B, H, W, C] format and stores efficiently as zarr arrays + ``` +USAGE: +===== +The handler automatically detects and records all available observations from the environment. +No configuration is required, but you can optionally provide configuration for customization: + +```python +# Basic usage - automatically records all observations +handler = ZarrDatasetFileHandler() +handler.create("dataset.zarr") +``` + +This handler provides a streamlined way to record Isaac Lab environments to Zarr datasets +with zero configuration required, compatible with diffusion policy format. +""" + +import numpy as np +import shutil +import torch +from collections.abc import Iterable +from pathlib import Path +from typing import Any + +import numcodecs +import zarr +from isaaclab.utils.datasets.dataset_file_handler_base import DatasetFileHandlerBase +from isaaclab.utils.datasets.episode_data import EpisodeData + + +class ZarrDatasetFileHandler(DatasetFileHandlerBase): + """Zarr dataset file handler for storing episode data. + + Automatically records episode data to Zarr format compatible with diffusion policy ReplayBuffer. + Optimized for large-scale datasets with efficient chunking and compression. + Saves image observations as zarr arrays with memory-optimized chunking. + + Args: + chunk_size: Chunk size for temporal dimension (default: 1000 for memory efficiency) + image_chunk_size: Chunk size for image arrays (default: 100 for memory efficiency) + image_keys: List of observation keys that should be treated as images (default: None, auto-detect) + + Example Usage: + ```python + handler = ZarrDatasetFileHandler() + handler.create("dataset.zarr", env_name="my_env") + ``` + """ + + def __init__(self, chunk_size: int = 5000, image_chunk_size: int = 50, image_keys: list[str] | None = None): + """Initialize the Zarr dataset file handler. + + Args: + chunk_size: Chunk size for temporal dimension (non-image data) + image_chunk_size: Chunk size for image arrays (memory-optimized) + image_keys: List of observation keys that should be treated as images + """ + self._dataset = None + self._dataset_path = None + self._env_name = None + self._episode_count = 0 + self._compressor = None + self._chunk_size = chunk_size + self._image_chunk_size = image_chunk_size + self._image_keys = image_keys + + # Set up compression for all data + self._compressor = numcodecs.Blosc(cname="zstd", clevel=5, shuffle=numcodecs.Blosc.BITSHUFFLE) + + def create(self, file_path: str, env_name: str | None = None, overwrite: bool = True): + """Create a new dataset file. + + Args: + file_path: Path to the dataset file (must end with .zarr) + env_name: Optional name for the environment (used in metadata) + overwrite: If True, silently remove an existing dataset at this path. + If False, raise ValueError when the path already exists. + """ + if not file_path.endswith(".zarr"): + raise ValueError("Dataset file path must end with .zarr") + + self._dataset_path = Path(file_path) + + if self._dataset_path.exists(): + if not overwrite: + raise ValueError( + f"Dataset already exists at {self._dataset_path}. Pass overwrite=True or remove it manually." + ) + print(f"Removing existing dataset at {self._dataset_path}") + shutil.rmtree(self._dataset_path) + + # Initialize environment name + self._env_name = env_name or "isaac_lab_env" + + # Use default task description + self._task_description = "Custom task" + + # Create Zarr dataset structure + try: + # Create root group + self._dataset = zarr.group(str(self._dataset_path)) + + # Create data group (will be used later) + self._dataset.create_group("data") + + # Create meta group + meta_group = self._dataset.create_group("meta") + + # Initialize episode_ends array + meta_group.zeros("episode_ends", shape=(0,), dtype=np.int64, compressor=None) + + # Add environment name to metadata + self._dataset.attrs["env_name"] = self._env_name + self._dataset.attrs["task_description"] = self._task_description + + except Exception as e: + raise RuntimeError(f"Failed to create Zarr dataset: {e}") + + self._episode_count = 0 + + def open(self, file_path: str, mode: str = "r"): + """Open an existing dataset file.""" + raise NotImplementedError("Open not implemented for Zarr handler") + + def get_env_name(self) -> str | None: + """Get the environment name.""" + return self._env_name + + def get_episode_names(self) -> Iterable[str]: + """Get the names of the episodes in the file.""" + if self._dataset is None: + return [] + return [f"episode_{i:06d}" for i in range(self._episode_count)] + + def get_num_episodes(self) -> int: + """Get number of episodes in the file.""" + return self._episode_count + + def write_episode(self, episode: EpisodeData, demo_id: int | None = None): + """Add an episode to the dataset. + + Args: + episode: The episode data to add. + demo_id: Custom index for the episode. If None, uses default index. + """ + if self._dataset is None or episode.is_empty(): + return + + # Convert Isaac Lab episode data to Zarr format and save + self._convert_and_save_episode(episode) + + # Only increment episode count if using default indexing + if demo_id is None: + self._episode_count += 1 + + def _convert_and_save_episode(self, episode: EpisodeData): + """Convert Isaac Lab episode data to Zarr format and save it.""" + episode_dict = episode.data + + if "actions" not in episode_dict or "obs" not in episode_dict: + raise ValueError("Episode must contain actions and observations") + + num_frames = episode_dict["actions"].shape[0] + + # Process all observations together + obs_dict = episode_dict["obs"] + processed_obs = self._process_observations_for_episode(obs_dict) + + episode_data = { + "actions": episode_dict["actions"].cpu().numpy(), + "obs": processed_obs, + "rewards": episode_dict.get("rewards", torch.zeros(num_frames)).cpu().numpy(), + "dones": episode_dict.get("dones", torch.cat([torch.zeros(num_frames - 1), torch.ones(1)])).cpu().numpy(), + } + + # Save episode data to Zarr + self._save_episode_to_zarr(episode_data) + + def _process_observations_for_episode(self, obs_dict: dict[str, Any]) -> dict[str, np.ndarray]: + """Process observations for an entire episode.""" + episode_obs = {} + for obs_key, value in obs_dict.items(): + try: + episode_obs[obs_key] = value.cpu().numpy() + except Exception as e: + print(f"Error processing observation '{obs_key}': {e}") + return episode_obs + + def _save_episode_to_zarr(self, episode_data: dict[str, Any]): + """Save episode data to Zarr format.""" + if self._dataset is None: + raise RuntimeError("Dataset not initialized") + + data_group = self._dataset["data"] + meta_group = self._dataset["meta"] + episode_ends = meta_group["episode_ends"] + + # Get current episode end and calculate new end + current_end = int(episode_ends[-1]) if len(episode_ends) > 0 else 0 + episode_length = len(episode_data["actions"]) + new_end = current_end + episode_length + + # Extend arrays and add episode data + for key, value in episode_data.items(): + if key == "obs": + for obs_key, obs_value in value.items(): + self._extend_or_create_array(data_group, f"obs/{obs_key}", obs_value, episode_length) + else: + self._extend_or_create_array(data_group, key, value, episode_length) + + # Update episode ends + episode_ends.resize(len(episode_ends) + 1) + episode_ends[-1] = int(new_end) + + def _extend_or_create_array(self, group, key: str, data: np.ndarray, episode_length: int): + """Extend existing array or create new one with episode data.""" + if key in group: + # Extend existing array + arr = group[key] + arr.resize(arr.shape[0] + episode_length, *arr.shape[1:]) + arr[-episode_length:] = data + else: + # Create new array with optimized chunking + if self._is_image_array(data): + # Use image-optimized chunking and compression + chunks = (self._image_chunk_size,) + data.shape[1:] + else: + # Use standard chunking for non-image data + chunks = (self._chunk_size,) + data.shape[1:] + + group.create_dataset(key, data=data, chunks=chunks, dtype=data.dtype, compressor=self._compressor) + + def _is_image_array(self, data: np.ndarray) -> bool: + """Check if array is an image array (4D with shape [T, H, W, C]).""" + return data.ndim == 4 and data.shape[-1] in [1, 3, 4] + + def load_episode(self, episode_name: str) -> EpisodeData | None: + """Load episode data from the file.""" + raise NotImplementedError("Load episode not implemented for Zarr handler") + + def flush(self): + """Flush any pending data to disk.""" + # Zarr handles flushing automatically + pass + + def close(self): + """Close the dataset file handler.""" + # Clear references + self._dataset = None + + def add_env_args(self, env_args: dict): + pass diff --git a/source/uwlab_assets/setup.py b/source/uwlab_assets/setup.py index 6a351015..ff0b971b 100644 --- a/source/uwlab_assets/setup.py +++ b/source/uwlab_assets/setup.py @@ -14,6 +14,12 @@ EXTENSION_PATH = os.path.dirname(os.path.realpath(__file__)) # Read the extension.toml file EXTENSION_TOML_DATA = toml.load(os.path.join(EXTENSION_PATH, "config", "extension.toml")) + +# Minimum dependencies required prior to installation +INSTALL_REQUIRES = [ + "usd-core", +] + # Installation operation setup( name="uwlab_assets", @@ -25,6 +31,7 @@ keywords=EXTENSION_TOML_DATA["package"]["keywords"], license="BSD-3-Clause", include_package_data=True, + install_requires=INSTALL_REQUIRES, python_requires=">=3.10", packages=["uwlab_assets"], classifiers=[ diff --git a/source/uwlab_assets/uwlab_assets/__init__.py b/source/uwlab_assets/uwlab_assets/__init__.py index b5453bbc..0bf6a5f1 100644 --- a/source/uwlab_assets/uwlab_assets/__init__.py +++ b/source/uwlab_assets/uwlab_assets/__init__.py @@ -5,8 +5,13 @@ """Package containing asset and sensor configurations.""" +import logging import os import toml +import urllib.request +from urllib.parse import urlparse + +logger = logging.getLogger(__name__) # Conveniences to other module directories via relative paths UWLAB_ASSETS_EXT_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), "../")) @@ -15,6 +20,70 @@ """Path to the extension data directory.""" UWLAB_ASSETS_METADATA = toml.load(os.path.join(UWLAB_ASSETS_EXT_DIR, "config", "extension.toml")) """Extension metadata dictionary parsed from the extension.toml file.""" -UWLAB_CLOUD_ASSETS_DIR = "https://uwlab-assets.s3.us-west-004.backblazeb2.com" + +UWLAB_CLOUD_ASSETS_DIR = "https://huggingface.co/datasets/UW-Lab/uwlab-assets/resolve/main" + + +def _extract_relative_path(url: str) -> str: + """Strip the HuggingFace resolve-URL prefix, returning the repo-relative path. + + Example: + ``https://huggingface.co/datasets/UW-Lab/uwlab-assets/resolve/main/Props/Custom/Peg/peg.usd`` + -> ``Props/Custom/Peg/peg.usd`` + """ + parsed = urlparse(url) + parts = parsed.path.strip("/").split("/") + try: + idx = parts.index("resolve") + return "/".join(parts[idx + 2 :]) + except ValueError: + return parsed.path.strip("/") + + +def _urlretrieve_quiet(url: str, dest: str) -> None: + """Download *url* to *dest* silently.""" + req = urllib.request.urlopen(url) + chunk_size = 1 << 16 # 64 KiB + with open(dest, "wb") as f: + while True: + chunk = req.read(chunk_size) + if not chunk: + break + f.write(chunk) + req.close() + + +def resolve_cloud_path(path: str) -> str: + """Resolve a cloud asset path to a local file, downloading if needed. + + * Local paths (including already-cached files) are returned immediately. + * HTTPS URLs are downloaded once to ``~/.cache/uwlab/assets/`` + and the local cached path is returned on subsequent calls. + * Downloads are atomic (write to a temp file, then ``os.rename``). + """ + if not path.startswith(("http://", "https://")): + return path + + rel = _extract_relative_path(path) + cache_dir = os.path.join(os.path.expanduser("~"), ".cache", "uwlab", "assets") + local = os.path.join(cache_dir, rel) + + if os.path.isfile(local): + return local + + os.makedirs(os.path.dirname(local), exist_ok=True) + tmp = f"{local}.tmp.{os.getpid()}" + try: + logger.info(f"Downloading {rel} ...") + _urlretrieve_quiet(path, tmp) + os.rename(tmp, local) + except Exception: + if os.path.exists(tmp): + os.remove(tmp) + raise + + return local + + # Configure the module-level variables __version__ = UWLAB_ASSETS_METADATA["package"]["version"] diff --git a/source/uwlab_assets/uwlab_assets/robots/ur5e_robotiq_gripper/__init__.py b/source/uwlab_assets/uwlab_assets/robots/ur5e_robotiq_gripper/__init__.py index cc78c0c4..4efe8e4d 100644 --- a/source/uwlab_assets/uwlab_assets/robots/ur5e_robotiq_gripper/__init__.py +++ b/source/uwlab_assets/uwlab_assets/robots/ur5e_robotiq_gripper/__init__.py @@ -3,4 +3,5 @@ # # SPDX-License-Identifier: BSD-3-Clause from .actions import * -from .ur5e_robotiq_2f85_gripper import EXPLICIT_UR5E_ROBOTIQ_2F85, IMPLICIT_UR5E_ROBOTIQ_2F85, ROBOTIQ_2F85 +from .kinematics import * +from .ur5e_robotiq_2f85_gripper import * diff --git a/source/uwlab_assets/uwlab_assets/robots/ur5e_robotiq_gripper/actions.py b/source/uwlab_assets/uwlab_assets/robots/ur5e_robotiq_gripper/actions.py index 69a411ea..b3b669e5 100644 --- a/source/uwlab_assets/uwlab_assets/robots/ur5e_robotiq_gripper/actions.py +++ b/source/uwlab_assets/uwlab_assets/robots/ur5e_robotiq_gripper/actions.py @@ -13,10 +13,7 @@ from isaaclab.utils import configclass from uwlab.controllers.differential_ik_cfg import MultiConstraintDifferentialIKControllerCfg -from uwlab.envs.mdp.actions.actions_cfg import ( - DefaultJointPositionStaticActionCfg, - MultiConstraintsDifferentialInverseKinematicsActionCfg, -) +from uwlab.envs.mdp.actions.actions_cfg import MultiConstraintsDifferentialInverseKinematicsActionCfg """ UR5E ROBOTIQ 2F85 ACTIONS @@ -67,10 +64,6 @@ close_command_expr={"finger_joint": 0.785398}, ) -ROBOTIQ_COMPLIANT_JOINTS = DefaultJointPositionStaticActionCfg( - asset_name="robot", joint_names=["left_inner_finger_joint", "right_inner_finger_joint"] -) - ROBOTIQ_MC_IK_ABSOLUTE = MultiConstraintsDifferentialInverseKinematicsActionCfg( asset_name="robot", joint_names=["joint.*"], @@ -86,31 +79,26 @@ class Ur5eRobotiq2f85IkAbsoluteAction: arm = UR5E_MC_IKABSOLUTE_ARM gripper = ROBOTIQ_GRIPPER_BINARY_ACTIONS - compliant_joints = ROBOTIQ_COMPLIANT_JOINTS @configclass class Ur5eRobotiq2f85McIkDeltaAction: arm = UR5E_MC_IKDELTA_ARM gripper = ROBOTIQ_GRIPPER_BINARY_ACTIONS - compliant_joints = ROBOTIQ_COMPLIANT_JOINTS @configclass class Ur5eRobotiq2f85JointPositionAction: arm = UR5E_JOINT_POSITION gripper = ROBOTIQ_GRIPPER_BINARY_ACTIONS - compliant_joints = ROBOTIQ_COMPLIANT_JOINTS @configclass class Ur5eRobotiq2f85RelativeJointPositionAction: arm = UR5E_RELATIVE_JOINT_POSITION gripper = ROBOTIQ_GRIPPER_BINARY_ACTIONS - compliant_joints = ROBOTIQ_COMPLIANT_JOINTS @configclass class Robotiq2f85BinaryGripperAction: gripper = ROBOTIQ_GRIPPER_BINARY_ACTIONS - compliant_joints = ROBOTIQ_COMPLIANT_JOINTS diff --git a/source/uwlab_assets/uwlab_assets/robots/ur5e_robotiq_gripper/kinematics.py b/source/uwlab_assets/uwlab_assets/robots/ur5e_robotiq_gripper/kinematics.py new file mode 100644 index 00000000..cd4d3991 --- /dev/null +++ b/source/uwlab_assets/uwlab_assets/robots/ur5e_robotiq_gripper/kinematics.py @@ -0,0 +1,288 @@ +# Copyright (c) 2024-2026, The UW Lab Project Developers. (https://github.com/uw-lab/UWLab/blob/main/CONTRIBUTORS.md). +# All Rights Reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +# Copyright (c) 2024-2025, The UW Lab Project Developers. +# All Rights Reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Calibrated kinematics for UR5e. + +Contains: +- Analytical FK, Jacobian, and Mass Matrix computation (batched PyTorch) + +Calibrated joint parameters and link inertials are loaded lazily from +``metadata.yaml`` co-located with the robot USD (via :func:`_load_calibration`). + +All functions operate on the 6 arm joints only and output in the REP-103 +base_link frame (180 deg Z rotation from base_link_inertia). +""" + +import functools +import os +import tempfile +import torch +import yaml + +from isaaclab.utils.assets import retrieve_file_path + +# ============================================================================ +# Constants +# ============================================================================ + +ARM_JOINT_NAMES = [ + "shoulder_pan_joint", + "shoulder_lift_joint", + "elbow_joint", + "wrist_1_joint", + "wrist_2_joint", + "wrist_3_joint", +] +EE_BODY_NAME = "wrist_3_link" +NUM_ARM_JOINTS = 6 + +# 180 deg rotation around Z-axis (base_link_inertia -> base_link conversion) +R_180Z = torch.tensor([[-1, 0, 0], [0, -1, 0], [0, 0, 1]], dtype=torch.float32) + + +# ============================================================================ +# Lazy-loaded calibration data (from metadata.yaml next to the robot USD) +# ============================================================================ + + +@functools.lru_cache(maxsize=1) +def _load_calibration() -> dict[str, torch.Tensor]: + """Download (once) and parse calibrated kinematics from the robot metadata.""" + from .ur5e_robotiq_2f85_gripper import UR5E_ARTICULATION + + usd_dir = os.path.dirname(UR5E_ARTICULATION.spawn.usd_path) + meta_path = os.path.join(usd_dir, "metadata.yaml") + local = retrieve_file_path(meta_path, download_dir=tempfile.gettempdir()) + with open(local) as f: + metadata = yaml.safe_load(f) + if metadata is None: + raise RuntimeError(f"metadata.yaml is empty or failed to load: {local} (source: {meta_path})") + joints = metadata["calibrated_joints"] + inertials = metadata["link_inertials"] + return { + "joints_xyz": torch.tensor(joints["xyz"], dtype=torch.float32), + "joints_rpy": torch.tensor(joints["rpy"], dtype=torch.float32), + "link_masses": torch.tensor(inertials["masses"], dtype=torch.float32), + "link_coms": torch.tensor(inertials["coms"], dtype=torch.float32), + "link_inertias": torch.tensor(inertials["inertias"], dtype=torch.float32), + } + + +# ============================================================================ +# Kinematics helpers +# ============================================================================ + + +def rpy_to_matrix_torch(rpy: torch.Tensor) -> torch.Tensor: + """Convert roll-pitch-yaw to rotation matrix (single or batched).""" + if rpy.dim() == 1: + roll, pitch, yaw = rpy[0], rpy[1], rpy[2] + cr, sr = torch.cos(roll), torch.sin(roll) + cp, sp = torch.cos(pitch), torch.sin(pitch) + cy, sy = torch.cos(yaw), torch.sin(yaw) + R = torch.stack([ + torch.stack([cy * cp, cy * sp * sr - sy * cr, cy * sp * cr + sy * sr]), + torch.stack([sy * cp, sy * sp * sr + cy * cr, sy * sp * cr - cy * sr]), + torch.stack([-sp, cp * sr, cp * cr]), + ]) + return R + else: + roll, pitch, yaw = rpy[:, 0], rpy[:, 1], rpy[:, 2] + cr, sr = torch.cos(roll), torch.sin(roll) + cp, sp = torch.cos(pitch), torch.sin(pitch) + cy, sy = torch.cos(yaw), torch.sin(yaw) + R = torch.zeros(rpy.shape[0], 3, 3, device=rpy.device, dtype=rpy.dtype) + R[:, 0, 0] = cy * cp + R[:, 0, 1] = cy * sp * sr - sy * cr + R[:, 0, 2] = cy * sp * cr + sy * sr + R[:, 1, 0] = sy * cp + R[:, 1, 1] = sy * sp * sr + cy * cr + R[:, 1, 2] = sy * sp * cr - cy * sr + R[:, 2, 0] = -sp + R[:, 2, 1] = cp * sr + R[:, 2, 2] = cp * cr + return R + + +# ============================================================================ +# Analytical Jacobian +# ============================================================================ + + +def compute_jacobian_analytical(joint_angles: torch.Tensor, device: str = "cuda") -> torch.Tensor: + """Compute geometric Jacobian using calibrated kinematics (batched). + + Computes to wrist_3_link frame origin (NOT COM), matching real robot code. + + Args: + joint_angles: (N, 6) joint angles in radians. + Returns: + J: (N, 6, 6) Jacobian [linear; angular]. + """ + N = joint_angles.shape[0] + cal = _load_calibration() + xyz_all = cal["joints_xyz"].to(device) + rpy_all = cal["joints_rpy"].to(device) + R_180Z_dev = R_180Z.to(device) + + # FK to get EE position + T = torch.eye(4, device=device, dtype=torch.float32).unsqueeze(0).repeat(N, 1, 1) + for i in range(6): + R_fixed = rpy_to_matrix_torch(rpy_all[i]) + T_fixed = torch.eye(4, device=device, dtype=torch.float32) + T_fixed[:3, :3] = R_fixed + T_fixed[:3, 3] = xyz_all[i] + T_fixed = T_fixed.unsqueeze(0).repeat(N, 1, 1) + theta = joint_angles[:, i] + ct, st = torch.cos(theta), torch.sin(theta) + T_joint = torch.eye(4, device=device, dtype=torch.float32).unsqueeze(0).repeat(N, 1, 1) + T_joint[:, 0, 0] = ct + T_joint[:, 0, 1] = -st + T_joint[:, 1, 0] = st + T_joint[:, 1, 1] = ct + T = torch.bmm(torch.bmm(T, T_fixed), T_joint) + p_ee = T[:, :3, 3] + + # Jacobian columns + J = torch.zeros(N, 6, 6, device=device, dtype=torch.float32) + T = torch.eye(4, device=device, dtype=torch.float32).unsqueeze(0).repeat(N, 1, 1) + for i in range(6): + R_fixed = rpy_to_matrix_torch(rpy_all[i]) + T_fixed = torch.eye(4, device=device, dtype=torch.float32) + T_fixed[:3, :3] = R_fixed + T_fixed[:3, 3] = xyz_all[i] + T_fixed = T_fixed.unsqueeze(0).repeat(N, 1, 1) + T_joint_frame = torch.bmm(T, T_fixed) + z_i = T_joint_frame[:, :3, 2] + p_i = T_joint_frame[:, :3, 3] + J[:, :3, i] = torch.cross(z_i, p_ee - p_i, dim=1) + J[:, 3:, i] = z_i + theta = joint_angles[:, i] + ct, st = torch.cos(theta), torch.sin(theta) + T_joint_rot = torch.eye(4, device=device, dtype=torch.float32).unsqueeze(0).repeat(N, 1, 1) + T_joint_rot[:, 0, 0] = ct + T_joint_rot[:, 0, 1] = -st + T_joint_rot[:, 1, 0] = st + T_joint_rot[:, 1, 1] = ct + T = torch.bmm(T_joint_frame, T_joint_rot) + + # Rotate from base_link_inertia to base_link (REP-103) + R_180Z_batch = R_180Z_dev.unsqueeze(0).repeat(N, 1, 1) + J[:, :3, :] = torch.bmm(R_180Z_batch, J[:, :3, :]) + J[:, 3:, :] = torch.bmm(R_180Z_batch, J[:, 3:, :]) + return J + + +# ============================================================================ +# Analytical Mass Matrix (CRBA) +# ============================================================================ + + +def compute_mass_matrix_analytical(joint_angles: torch.Tensor, device: str = "cuda") -> torch.Tensor: + """Compute 6x6 joint-space mass matrix using CRBA. + + Uses the same inertia parameters as real robot for consistency. + + Args: + joint_angles: (N, 6) joint angles in radians. + Returns: + M: (N, 6, 6) mass matrix. + """ + N = joint_angles.shape[0] + cal = _load_calibration() + xyz_all = cal["joints_xyz"].to(device) + rpy_all = cal["joints_rpy"].to(device) + masses = cal["link_masses"].to(device) + coms = cal["link_coms"].to(device) + inertias = cal["link_inertias"].to(device) + + M = torch.zeros(N, 6, 6, device=device, dtype=torch.float32) + + R_fixed_all = [] + T_fixed_all = [] + for i in range(6): + R_fixed = rpy_to_matrix_torch(rpy_all[i]) + T_fixed = torch.eye(4, device=device, dtype=torch.float32) + T_fixed[:3, :3] = R_fixed + T_fixed[:3, 3] = xyz_all[i] + R_fixed_all.append(R_fixed) + T_fixed_all.append(T_fixed.unsqueeze(0).expand(N, -1, -1).clone()) + + transforms = [] + T = torch.eye(4, device=device, dtype=torch.float32).unsqueeze(0).repeat(N, 1, 1) + transforms.append(T.clone()) + for i in range(6): + theta = joint_angles[:, i] + ct, st = torch.cos(theta), torch.sin(theta) + T_joint = torch.eye(4, device=device, dtype=torch.float32).unsqueeze(0).repeat(N, 1, 1) + T_joint[:, 0, 0] = ct + T_joint[:, 0, 1] = -st + T_joint[:, 1, 0] = st + T_joint[:, 1, 1] = ct + T = torch.bmm(torch.bmm(T, T_fixed_all[i]), T_joint) + transforms.append(T.clone()) + + def make_joint_rot(theta_batch): + ct, st = torch.cos(theta_batch), torch.sin(theta_batch) + T_rot = torch.eye(4, device=device, dtype=torch.float32).unsqueeze(0).repeat(N, 1, 1) + T_rot[:, 0, 0] = ct + T_rot[:, 0, 1] = -st + T_rot[:, 1, 0] = st + T_rot[:, 1, 1] = ct + return T_rot + + for link_idx in range(6): + m = masses[link_idx] + com_local = coms[link_idx] + I_local = inertias[link_idx] + I_tensor = torch.zeros(3, 3, device=device, dtype=torch.float32) + I_tensor[0, 0] = I_local[0] + I_tensor[1, 1] = I_local[1] + I_tensor[2, 2] = I_local[2] + I_tensor[0, 1] = I_tensor[1, 0] = I_local[3] + I_tensor[0, 2] = I_tensor[2, 0] = I_local[4] + I_tensor[1, 2] = I_tensor[2, 1] = I_local[5] + + T_link = transforms[link_idx + 1] + R_link = T_link[:, :3, :3] + p_link = T_link[:, :3, 3] + p_com = p_link + torch.bmm(R_link, com_local.view(1, 3, 1).expand(N, -1, -1)).squeeze(-1) + I_tensor_batch = I_tensor.unsqueeze(0).expand(N, -1, -1) + I_world = torch.bmm(torch.bmm(R_link, I_tensor_batch), R_link.transpose(-1, -2)) + + T_j = torch.eye(4, device=device, dtype=torch.float32).unsqueeze(0).repeat(N, 1, 1) + for j in range(link_idx + 1): + T_joint_frame_j = torch.bmm(T_j, T_fixed_all[j]) + z_j = T_joint_frame_j[:, :3, 2] + p_j = T_joint_frame_j[:, :3, 3] + J_v_j = torch.cross(z_j, p_com - p_j, dim=1) + J_w_j = z_j + + for k in range(j + 1): + T_k = torch.eye(4, device=device, dtype=torch.float32).unsqueeze(0).repeat(N, 1, 1) + for kk in range(k + 1): + T_joint_frame_kk = torch.bmm(T_k, T_fixed_all[kk]) + if kk < k: + T_k = torch.bmm(T_joint_frame_kk, make_joint_rot(joint_angles[:, kk])) + else: + T_k = T_joint_frame_kk + z_k = T_k[:, :3, 2] + p_k = T_k[:, :3, 3] + J_v_k = torch.cross(z_k, p_com - p_k, dim=1) + J_w_k = z_k + term1 = m * torch.sum(J_v_j * J_v_k, dim=1) + term2 = torch.sum(J_w_j * torch.bmm(I_world, J_w_k.unsqueeze(-1)).squeeze(-1), dim=1) + M[:, j, k] += term1 + term2 + if j != k: + M[:, k, j] += term1 + term2 + T_j = torch.bmm(T_joint_frame_j, make_joint_rot(joint_angles[:, j])) + + M += torch.eye(6, device=device, dtype=torch.float32).unsqueeze(0) * 1e-6 + return M diff --git a/source/uwlab_assets/uwlab_assets/robots/ur5e_robotiq_gripper/ur5e_robotiq_2f85_gripper.py b/source/uwlab_assets/uwlab_assets/robots/ur5e_robotiq_gripper/ur5e_robotiq_2f85_gripper.py index e1ea0c7f..5d52f343 100644 --- a/source/uwlab_assets/uwlab_assets/robots/ur5e_robotiq_gripper/ur5e_robotiq_2f85_gripper.py +++ b/source/uwlab_assets/uwlab_assets/robots/ur5e_robotiq_gripper/ur5e_robotiq_2f85_gripper.py @@ -3,27 +3,29 @@ # # SPDX-License-Identifier: BSD-3-Clause -"""Configuration for the UR5 robots. +"""Configuration for the UR5e + Robotiq 2F-85 robot. The following configurations are available: -* :obj:`UR5E_CFG`: Ur5e robot +* :obj:`UR5E_ARTICULATION`: Base articulation (USD, init state). +* :obj:`EXPLICIT_UR5E_ROBOTIQ_2F85`: Full robot with DelayedPDActuator arm (PD delay, for sim2real finetuning). +* :obj:`IMPLICIT_UR5E_ROBOTIQ_2F85`: Full robot with ImplicitActuator arm (no motor delay, for RL training). +* :obj:`UR5E_ROBOTIQ_2F85`: Alias for ``EXPLICIT_UR5E_ROBOTIQ_2F85`` (backward compatibility). """ import isaaclab.sim as sim_utils -from isaaclab.actuators import ImplicitActuatorCfg +from isaaclab.actuators import DelayedPDActuatorCfg, ImplicitActuatorCfg from isaaclab.assets.articulation import ArticulationCfg from uwlab_assets import UWLAB_CLOUD_ASSETS_DIR ROBOTIQ_2F85_DEFAULT_JOINT_POS = { "finger_joint": 0.0, - "right_outer.*": 0.0, - "left_outer.*": 0.0, + "right_outer_knuckle_joint": 0.0, + "left_inner_knuckle_joint": 0.0, + "right_inner_knuckle_joint": 0.0, "left_inner_finger_knuckle_joint": 0.0, "right_inner_finger_knuckle_joint": 0.0, - "left_inner_finger_joint": -0.785398, - "right_inner_finger_joint": 0.785398, } UR5E_DEFAULT_JOINT_POS = { @@ -36,9 +38,27 @@ **ROBOTIQ_2F85_DEFAULT_JOINT_POS, } +UR5E_VELOCITY_LIMITS = { + "shoulder_pan_joint": 1.5708, + "shoulder_lift_joint": 1.5708, + "elbow_joint": 1.5708, + "wrist_1_joint": 3.1415, + "wrist_2_joint": 3.1415, + "wrist_3_joint": 3.1415, +} + +UR5E_EFFORT_LIMITS = { + "shoulder_pan_joint": 150.0, + "shoulder_lift_joint": 150.0, + "elbow_joint": 150.0, + "wrist_1_joint": 28.0, + "wrist_2_joint": 28.0, + "wrist_3_joint": 28.0, +} + UR5E_ARTICULATION = ArticulationCfg( spawn=sim_utils.UsdFileCfg( - usd_path=f"{UWLAB_CLOUD_ASSETS_DIR}/Robots/UniversalRobots/Ur5e2f85RobotiqGripper/ur5e_robotiq_gripper_d415_mount_safety.usd", + usd_path=f"{UWLAB_CLOUD_ASSETS_DIR}/Robots/UniversalRobots/Ur5e2f85RobotiqGripperCalibrated/ur5e_robotiq_gripper_d415_mount_safety_calibrated.usd", activate_contact_sensors=False, rigid_props=sim_utils.RigidBodyPropertiesCfg( disable_gravity=True, @@ -55,7 +75,7 @@ ROBOTIQ_2F85 = ArticulationCfg( prim_path="{ENV_REGEX_NS}/RobotiqGripper", spawn=sim_utils.UsdFileCfg( - usd_path=f"{UWLAB_CLOUD_ASSETS_DIR}/Robots/UniversalRobots/2f85RobotiqGripper/robotiq_2f85_gripper.usd", + usd_path=f"{UWLAB_CLOUD_ASSETS_DIR}/Robots/UniversalRobots/2f85RobotiqGripperCalibrated/robotiq_2f85_gripper_calibrated.usd", activate_contact_sensors=False, rigid_props=sim_utils.RigidBodyPropertiesCfg( disable_gravity=True, @@ -74,70 +94,36 @@ joint_names_expr=["finger_joint"], stiffness=17, damping=5, - effort_limit_sim=165, - ), - "inner_finger": ImplicitActuatorCfg( - joint_names_expr=[".*_inner_finger_joint"], - stiffness=0.2, - damping=0.02, - effort_limit_sim=0.5, + effort_limit_sim=60, ), }, soft_joint_pos_limit_factor=1, ) -IMPLICIT_UR5E_ROBOTIQ_2F85 = UR5E_ARTICULATION.copy() # type: ignore -IMPLICIT_UR5E_ROBOTIQ_2F85.actuators = { - "arm": ImplicitActuatorCfg( +EXPLICIT_UR5E_ROBOTIQ_2F85 = UR5E_ARTICULATION.copy() # type: ignore +EXPLICIT_UR5E_ROBOTIQ_2F85.actuators = { + "arm": DelayedPDActuatorCfg( joint_names_expr=["shoulder.*", "elbow.*", "wrist.*"], - stiffness={ - "shoulder_pan_joint": 4.63, - "shoulder_lift_joint": 5.41, - "elbow_joint": 8.06, - "wrist_1_joint": 7.28, - "wrist_2_joint": 8.04, - "wrist_3_joint": 7.18, - }, - damping={ - "shoulder_pan_joint": 8.84, - "shoulder_lift_joint": 6.47, - "elbow_joint": 9.46, - "wrist_1_joint": 2.80, - "wrist_2_joint": 2.41, - "wrist_3_joint": 1.90, - }, - velocity_limit_sim=3.14, - effort_limit_sim={ - "shoulder_pan_joint": 150.0, - "shoulder_lift_joint": 150.0, - "elbow_joint": 150.0, - "wrist_1_joint": 28.0, - "wrist_2_joint": 28.0, - "wrist_3_joint": 28.0, - }, - armature=0.01, + stiffness=0.0, + damping=0.0, + effort_limit=UR5E_EFFORT_LIMITS, + effort_limit_sim=UR5E_EFFORT_LIMITS, + velocity_limit=UR5E_VELOCITY_LIMITS, + velocity_limit_sim=UR5E_VELOCITY_LIMITS, + min_delay=0, + max_delay=1, ), "gripper": ROBOTIQ_2F85.actuators["gripper"], - "inner_finger": ROBOTIQ_2F85.actuators["inner_finger"], } -EXPLICIT_UR5E_ROBOTIQ_2F85 = UR5E_ARTICULATION.copy() # type: ignore -EXPLICIT_UR5E_ROBOTIQ_2F85.actuators = { +IMPLICIT_UR5E_ROBOTIQ_2F85 = UR5E_ARTICULATION.copy() # type: ignore +IMPLICIT_UR5E_ROBOTIQ_2F85.actuators = { "arm": ImplicitActuatorCfg( joint_names_expr=["shoulder.*", "elbow.*", "wrist.*"], stiffness=0.0, damping=0.0, - velocity_limit_sim=3.14, - effort_limit_sim={ - "shoulder_pan_joint": 150.0, - "shoulder_lift_joint": 150.0, - "elbow_joint": 150.0, - "wrist_1_joint": 28.0, - "wrist_2_joint": 28.0, - "wrist_3_joint": 28.0, - }, - armature=0.01, + effort_limit_sim=UR5E_EFFORT_LIMITS, + velocity_limit_sim=UR5E_VELOCITY_LIMITS, ), "gripper": ROBOTIQ_2F85.actuators["gripper"], - "inner_finger": ROBOTIQ_2F85.actuators["inner_finger"], } diff --git a/source/uwlab_assets/uwlab_assets/robots/xarm_leap/actions.py b/source/uwlab_assets/uwlab_assets/robots/xarm_leap/actions.py index a7acd64a..71cd5282 100644 --- a/source/uwlab_assets/uwlab_assets/robots/xarm_leap/actions.py +++ b/source/uwlab_assets/uwlab_assets/robots/xarm_leap/actions.py @@ -8,7 +8,7 @@ from isaaclab.envs.mdp.actions.actions_cfg import JointPositionActionCfg from isaaclab.utils import configclass -from uwlab_assets import UWLAB_CLOUD_ASSETS_DIR +from uwlab_assets import UWLAB_CLOUD_ASSETS_DIR, resolve_cloud_path from uwlab.controllers.differential_ik_cfg import MultiConstraintDifferentialIKControllerCfg from uwlab.envs.mdp.actions.actions_cfg import ( @@ -74,7 +74,7 @@ asset_name="robot", joint_names=["joint.*", "j[0-9]+"], scale=1.0, - eigenspace_path=f"{UWLAB_CLOUD_ASSETS_DIR}/dataset/misc/hammer_grasping_pca_components.npy", + eigenspace_path=resolve_cloud_path(f"{UWLAB_CLOUD_ASSETS_DIR}/dataset/misc/hammer_grasping_pca_components.npy"), joint_range=(-3.14, 3.14), ) diff --git a/source/uwlab_rl/uwlab_rl/rsl_rl/exporter.py b/source/uwlab_rl/uwlab_rl/rsl_rl/exporter.py new file mode 100644 index 00000000..b0df39dd --- /dev/null +++ b/source/uwlab_rl/uwlab_rl/rsl_rl/exporter.py @@ -0,0 +1,185 @@ +# Copyright (c) 2024-2026, The UW Lab Project Developers. (https://github.com/uw-lab/UWLab/blob/main/CONTRIBUTORS.md). +# All Rights Reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +# Copyright (c) 2022-2025, The Isaac Lab Project Developers. +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +import os +import torch +from torch import nn + +from isaaclab_rl.rsl_rl.exporter import _OnnxPolicyExporter, _TorchPolicyExporter + + +def export_policy_as_jit(policy: object, normalizer: object | None, path: str, filename="policy.pt"): + """Export policy into a Torch JIT file. + + Args: + policy: The policy torch module. + normalizer: The empirical normalizer module. If None, Identity is used. + path: The path to the saving directory. + filename: The name of exported JIT file. Defaults to "policy.pt". + """ + policy_exporter = _TorchPolicyExporterExtended(policy, normalizer) + policy_exporter.export(path, filename) + + +def export_policy_as_onnx( + policy: object, path: str, normalizer: object | None = None, filename="policy.onnx", verbose=False +): + """Export policy into a Torch ONNX file. + + Args: + policy: The policy torch module. + normalizer: The empirical normalizer module. If None, Identity is used. + path: The path to the saving directory. + filename: The name of exported ONNX file. Defaults to "policy.onnx". + verbose: Whether to print the model summary. Defaults to False. + """ + if not os.path.exists(path): + os.makedirs(path, exist_ok=True) + policy_exporter = _OnnxPolicyExporterExtended(policy, normalizer, verbose) + policy_exporter.export(path, filename) + + +""" +Helper Classes - Private. +""" + + +class _StateDependentPolicyMixin(nn.Module): + """Mixin class to handle state-dependent policy logic.""" + + def _setup_state_dependent_policy(self, policy): + """Setup state-dependent policy components.""" + self.actor_features = self.actor[:-1] # type: ignore + self.actor_final = self.actor[-1] # type: ignore + + self.register_buffer("log_std", policy.log_std.clone()) + self.epsilon = 1e-6 + + def _setup_regular_policy(self, policy): + """Setup regular policy components.""" + self.actor_features = self.actor[:-1] # type: ignore + self.actor_final = self.actor[-1] # type: ignore + + if hasattr(policy, "std"): + self.register_buffer("std", policy.std.clone()) + if hasattr(policy, "log_std"): + self.register_buffer("log_std", policy.log_std.clone()) + if hasattr(policy, "noise_std_type"): + self.noise_std_type = policy.noise_std_type + else: + self.noise_std_type = "scalar" + + # For GSDE, ensure epsilon is set + if self.noise_std_type == "gsde": + self.epsilon = 1e-6 + + def _ensure_compatibility_attributes(self, policy): + """Ensure all attributes exist for TorchScript compatibility.""" + if not hasattr(self, "std"): + if hasattr(policy, "std"): + self.register_buffer("std", policy.std.clone()) + else: + # Create a default std tensor + default_std = torch.ones(policy.num_actions if hasattr(policy, "num_actions") else 1) + self.register_buffer("std", default_std) + + if not hasattr(self, "log_std"): + if hasattr(policy, "log_std"): + self.register_buffer("log_std", policy.log_std.clone()) + else: + # Create a default log_std tensor + default_log_std = torch.zeros(policy.num_actions if hasattr(policy, "num_actions") else 1) + self.register_buffer("log_std", default_log_std) + + if not hasattr(self, "epsilon"): + self.epsilon = 1e-6 + + if not hasattr(self, "noise_std_type"): + if hasattr(policy, "noise_std_type"): + self.noise_std_type = policy.noise_std_type + else: + self.noise_std_type = "scalar" # Default fallback + + # Ensure epsilon is set for GSDE + if self.noise_std_type == "gsde" and not hasattr(self, "epsilon"): + self.epsilon = 1e-6 + + def _compute_distribution(self, observations): + """Compute mean and std for distribution.""" + if self.is_state_dependent.item(): # type: ignore + # Use the separated layers + features = self.actor_features(observations) # type: ignore + mean = self.actor_final(features) # type: ignore + + # Compute variance using exploration matrices and torch.mm + variance = torch.mm(features**2, torch.exp(self.log_std) ** 2) # type: ignore + std = torch.sqrt(variance + self.epsilon) + + return mean, std + else: + # Regular ActorCritic logic + mean = self.actor(observations) # type: ignore + + if self.noise_std_type == "scalar": + std = self.std.expand_as(mean) # type: ignore + elif self.noise_std_type == "log": + std = torch.exp(self.log_std).expand_as(mean) # type: ignore + elif self.noise_std_type == "gsde": + # GSDE: log_std is a matrix (hidden_dim, num_actions) + # Compute features from actor[:-1] (all layers except last) + features = self.actor_features(observations) # type: ignore + # Compute variance: variance = torch.mm(features**2, exp(log_std)**2) + # features shape: (batch, hidden_dim), log_std shape: (hidden_dim, num_actions) + variance = torch.mm(features**2, torch.exp(self.log_std) ** 2) # type: ignore + std = torch.sqrt(variance + self.epsilon) + else: + std = torch.ones_like(mean) + + return mean, std + + +class _TorchPolicyExporterExtended(_TorchPolicyExporter, _StateDependentPolicyMixin): + def __init__(self, policy, normalizer=None): + super().__init__(policy, normalizer) + + # Detect policy type + is_state_dependent = hasattr(policy, "use_state_dependent_noise") and policy.use_state_dependent_noise + self.register_buffer("is_state_dependent", torch.tensor(is_state_dependent, dtype=torch.bool)) + + if is_state_dependent: + self._setup_state_dependent_policy(policy) + else: + self._setup_regular_policy(policy) + + # Ensure all attributes exist for TorchScript compatibility + self._ensure_compatibility_attributes(policy) + + @torch.jit.export + def compute_distribution(self, x): + observations = self.normalizer(x) + return self._compute_distribution(observations) + + +class _OnnxPolicyExporterExtended(_OnnxPolicyExporter, _StateDependentPolicyMixin): + def __init__(self, policy, normalizer=None, verbose=False): + super().__init__(policy, normalizer, verbose) + + is_state_dependent = hasattr(policy, "use_state_dependent_noise") and policy.use_state_dependent_noise + self.register_buffer("is_state_dependent", torch.tensor(is_state_dependent, dtype=torch.bool)) + + if is_state_dependent: + self._setup_state_dependent_policy(policy) + else: + self._setup_regular_policy(policy) + + @torch.jit.export + def compute_distribution(self, x): + observations = self.normalizer(x) + return self._compute_distribution(observations) diff --git a/source/uwlab_rl/uwlab_rl/rsl_rl/rl_cfg.py b/source/uwlab_rl/uwlab_rl/rsl_rl/rl_cfg.py index c692bb28..11917539 100644 --- a/source/uwlab_rl/uwlab_rl/rsl_rl/rl_cfg.py +++ b/source/uwlab_rl/uwlab_rl/rsl_rl/rl_cfg.py @@ -27,6 +27,9 @@ class BehaviorCloningCfg: experts_observation_func: callable = None """The function that returns expert observation data, default is None, same as student observation.""" + experts_action_group_cfg: str | None = None + """The action group of the expert which may be different from student""" + learn_std: bool = False """Whether to learn the standard deviation of the expert policy.""" diff --git a/source/uwlab_rl/uwlab_rl/wrappers/__init__.py b/source/uwlab_rl/uwlab_rl/wrappers/__init__.py new file mode 100644 index 00000000..20878b9f --- /dev/null +++ b/source/uwlab_rl/uwlab_rl/wrappers/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) 2024-2026, The UW Lab Project Developers. (https://github.com/uw-lab/UWLab/blob/main/CONTRIBUTORS.md). +# All Rights Reserved. +# +# SPDX-License-Identifier: BSD-3-Clause diff --git a/source/uwlab_rl/uwlab_rl/wrappers/diffusion.py b/source/uwlab_rl/uwlab_rl/wrappers/diffusion.py new file mode 100644 index 00000000..0abad3bd --- /dev/null +++ b/source/uwlab_rl/uwlab_rl/wrappers/diffusion.py @@ -0,0 +1,322 @@ +# Copyright (c) 2024-2026, The UW Lab Project Developers. (https://github.com/uw-lab/UWLab/blob/main/CONTRIBUTORS.md). +# All Rights Reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +import torch +from abc import ABC, abstractmethod +from typing import Any + + +class ObservationHistoryManager(ABC): + """Abstract base class for managing observation history.""" + + def __init__(self, num_envs: int, n_obs_steps: int, device: torch.device): + self.num_envs = num_envs + self.n_obs_steps = n_obs_steps + self.device = device + self.history = None + self.needs_init = set() # Track environments that need initialization + + @abstractmethod + def initialize(self, processed_obs: dict[str, torch.Tensor]): + """Initialize the history with the first observation.""" + pass + + @abstractmethod + def update(self, processed_obs: dict[str, torch.Tensor]): + """Update history with new observations.""" + pass + + @abstractmethod + def get_batch(self, env_indices: list[int]) -> dict[str, torch.Tensor]: + """Get observation batch for specific environments.""" + pass + + @abstractmethod + def reset_envs(self, env_indices: list[int]): + """Reset history for specific environments.""" + pass + + +class LowDimObservationHistory(ObservationHistoryManager): + """Manages observation history for low-dimensional policies.""" + + def initialize(self, processed_obs: dict[str, torch.Tensor]): + """Initialize history as a single tensor.""" + obs_shape = processed_obs["obs"].shape + history_shape = (self.num_envs, self.n_obs_steps, obs_shape[-1]) + self.history = torch.zeros(history_shape, device=self.device, dtype=processed_obs["obs"].dtype) + + def update(self, processed_obs: dict[str, torch.Tensor]): + """Update history by shifting and adding new observations.""" + if self.history is None: + self.initialize(processed_obs) + # Handle environments that need initialization after reset + if self.needs_init: + for env_idx in list(self.needs_init): + # Fill entire history with the first observation + first_obs = processed_obs["obs"][env_idx : env_idx + 1] # Keep batch dimension + for step in range(self.n_obs_steps): + self.history[env_idx, step] = first_obs[0] + self.needs_init.remove(env_idx) + # Update history by shifting and adding new observations + self.history[:, :-1] = self.history[:, 1:].clone() + # Add new observation at the end + self.history[:, -1] = processed_obs["obs"] + + def get_batch(self, env_indices: list[int]) -> dict[str, torch.Tensor]: + """Get observation batch for specific environments.""" + if self.history is None: + return {"obs": torch.zeros((len(env_indices), self.n_obs_steps, 0), device=self.device)} + + # Select observations for specific environments + env_obs = self.history[env_indices] # Shape: (batch, n_obs_steps, obs_dim) + return {"obs": env_obs} + + def reset_envs(self, env_indices: list[int]): + """Reset history for specific environments.""" + for i in env_indices: + self.needs_init.add(i) + + +class ImageObservationHistory(ObservationHistoryManager): + """Manages observation history for image-based policies.""" + + def __init__(self, num_envs: int, n_obs_steps: int, device: torch.device): + super().__init__(num_envs, n_obs_steps, device) + self.obs_keys = None + + def initialize(self, processed_obs: dict[str, torch.Tensor]): + """Initialize history as a dictionary of tensors.""" + self.obs_keys = list(processed_obs.keys()) + self.history = {} + for key in self.obs_keys: + # Shape: (num_envs, n_obs_steps, ...) + obs_shape = processed_obs[key].shape + history_shape = (self.num_envs, self.n_obs_steps) + obs_shape[1:] + self.history[key] = torch.zeros(history_shape, device=self.device, dtype=processed_obs[key].dtype) + + def update(self, processed_obs: dict[str, torch.Tensor]): + """Update history by shifting and adding new observations.""" + if self.history is None: + self.initialize(processed_obs) + # Handle environments that need initialization after reset + if self.needs_init and self.obs_keys is not None: + for env_idx in list(self.needs_init): + if env_idx < self.num_envs: + # Fill entire history with the first observation for each key + for key in self.obs_keys: + first_obs = processed_obs[key][env_idx : env_idx + 1] # Keep batch dimension + for step in range(self.n_obs_steps): + self.history[key][env_idx, step] = first_obs[0] + self.needs_init.remove(env_idx) + # Update history by shifting and adding new observations + if self.obs_keys is not None: + for key in self.obs_keys: + # Shift history: (num_envs, n_obs_steps-1, ...) -> (num_envs, 1:n_obs_steps, ...) + self.history[key][:, :-1] = self.history[key][:, 1:].clone() + # Add new observation at the end + self.history[key][:, -1] = processed_obs[key] + + def get_batch(self, env_indices: list[int]) -> dict[str, torch.Tensor]: + """Get observation batch for specific environments.""" + if self.history is None or self.obs_keys is None: + return {} + obs_batch = {} + for key in self.obs_keys: + # Select observations for specific environments and transpose to (batch, time, ...) + env_obs = self.history[key][env_indices] # Shape: (batch, n_obs_steps, ...) + obs_batch[key] = env_obs + return obs_batch + + def reset_envs(self, env_indices: list[int]): + """Reset history for specific environments.""" + for i in env_indices: + self.needs_init.add(i) + + +class DiffusionPolicyWrapper: + """Wraps diffusion policy to handle Isaac Lab environment observations and action execution.""" + + def __init__(self, policy, device: torch.device, n_obs_steps: int = 2, num_envs: int = 1): + """Initialize the policy wrapper. + + Args: + policy: The diffusion policy to wrap. + device: Device to run the policy on. + n_obs_steps: Number of observation steps to maintain in history. + num_envs: Number of environments to handle. + execute_horizon: Number of actions to execute from each chunk before + replanning. None = execute full chunk (open-loop). 1 = replan + every step (receding horizon). + """ + self.policy = policy + self.device = device + self.n_obs_steps = n_obs_steps + self.num_envs = num_envs + + # Initialize observation history manager based on policy type + self.is_image_policy = self._is_image_policy() + if self.is_image_policy: + self.obs_history_manager = ImageObservationHistory(num_envs, n_obs_steps, device) + else: + self.obs_history_manager = LowDimObservationHistory(num_envs, n_obs_steps, device) + + # Initialize action queue as list of lists for each environment + self.action_queue = [[] for _ in range(num_envs)] + + # Reset the policy to initialize its internal queues + self.policy.reset() + + def _is_image_policy(self) -> bool: + """Detect if this is an image policy based on class name.""" + policy_class_name = self.policy.__class__.__name__.lower() + image_policy_indicators = ["image", "hybrid", "video"] + return any(indicator in policy_class_name for indicator in image_policy_indicators) + + def reset(self, reset_ids: torch.Tensor): + """Reset the policy wrapper and clear observation history and action queue.""" + reset_indices = reset_ids.tolist() if hasattr(reset_ids, "tolist") else reset_ids + for i in reset_indices: + self.action_queue[i].clear() + + # Reset observation history for these environments + if isinstance(reset_indices, torch.Tensor): + reset_indices = reset_indices.tolist() + self.obs_history_manager.reset_envs(reset_indices) + self.policy.reset() + + def predict_action(self, obs_dict: dict[str, Any]) -> torch.Tensor: + """Predict action given Isaac Lab environment observations. + + Args: + obs_dict: Raw observations from Isaac Lab environment + + Returns: + Action tensor for environment execution with shape (num_envs, action_dim) + """ + # Process observations to format expected by diffusion policy + processed_obs = self._process_obs(obs_dict) + + # Update observation history with batched operations + self.obs_history_manager.update(processed_obs) + + # Find environments that need new action chunks + need_new_actions = [i for i in range(self.num_envs) if len(self.action_queue[i]) == 0] + + if need_new_actions: + # Get new action chunks for environments that need them + new_actions = self._get_action_chunks(need_new_actions) + + # Distribute action chunks to respective queues + for idx, env_idx in enumerate(need_new_actions): + self.action_queue[env_idx].extend(new_actions[idx]) + + # Extract next action for each environment + actions = torch.zeros(self.num_envs, self.action_queue[0][0].shape[-1], device=self.device, dtype=torch.float32) + for i in range(self.num_envs): + actions[i] = self.action_queue[i].pop(0) + + return actions + + def _process_obs(self, obs_dict: dict[str, Any]) -> dict[str, torch.Tensor]: + """Convert Isaac Lab observations to format expected by diffusion policy. + + Args: + obs_dict: Raw observations from environment + + Returns: + Processed observation dictionary with batched tensors + """ + # Get policy observations + if isinstance(obs_dict, dict): + obs = obs_dict.get("policy", obs_dict) + else: + obs = obs_dict + + if self.is_image_policy: + return self._process_image_obs(obs) + else: + return self._process_lowdim_obs(obs) + + def _process_image_obs(self, obs: dict[str, Any]) -> dict[str, torch.Tensor]: + """Process observations for image-based policies with batched operations. + + Args: + obs: Raw observations from environment + + Returns: + Processed observation dictionary for image policy + """ + processed_obs = {} + for key, value in obs.items(): + if isinstance(value, torch.Tensor): + tensor = value.to(self.device) + else: + tensor = torch.tensor(value, device=self.device) + processed_obs[key] = tensor + return processed_obs + + def _process_lowdim_obs(self, obs: dict[str, Any]) -> dict[str, torch.Tensor]: + """Process observations for low-dimensional policies with batched operations. + + Args: + obs: Raw observations from environment + + Returns: + Processed observation dictionary for low-dim policy + """ + # Concatenate all observation components into a single vector + obs_components = [] + for key in sorted(obs.keys()): + value = obs[key] + if isinstance(value, torch.Tensor): + obs_components.append(value.to(self.device)) + else: + obs_components.append(torch.tensor(value, device=self.device)) + + # Concatenate all components along the feature dimension + if obs_components: + obs_tensor = torch.cat(obs_components, dim=-1) + # Ensure proper shape: (num_envs, features) + if obs_tensor.ndim == 1: + obs_tensor = obs_tensor.unsqueeze(0) + processed_obs = {"obs": obs_tensor} + else: + processed_obs = {"obs": torch.zeros((self.num_envs, 0), device=self.device)} + + return processed_obs + + def _get_action_chunks(self, env_indices: list[int]) -> list[torch.Tensor]: + """Get action chunks for specific environments. + + Args: + env_indices: List of environment indices that need new action chunks + + Returns: + List of action chunks for each environment + """ + # Create observation batch for the environments that need new actions + obs_batch = self.obs_history_manager.get_batch(env_indices) + + # Get action chunk from policy + result = self.policy.predict_action(obs_batch) + if isinstance(result, dict): + action_chunk = result["action"] + else: + action_chunk = result + + # Process action chunk for each environment + action_chunks = [] + if action_chunk.ndim == 3: + # Shape: (batch_size, action_chunk_len, action_dim) + for i in range(action_chunk.shape[0]): + env_action_chunk = action_chunk[i] # Shape: (action_chunk_len, action_dim) + action_chunks.append(env_action_chunk) + else: + # Single action case: (batch_size, action_dim) -> list of (1, action_dim) per env + for i in range(action_chunk.shape[0]): + action_chunks.append(action_chunk[i].unsqueeze(0)) + + return action_chunks diff --git a/source/uwlab_tasks/setup.py b/source/uwlab_tasks/setup.py index e7935976..ef5632e8 100644 --- a/source/uwlab_tasks/setup.py +++ b/source/uwlab_tasks/setup.py @@ -18,7 +18,30 @@ EXTENSION_TOML_DATA = toml.load(os.path.join(EXTENSION_PATH, "config", "extension.toml")) # Minimum dependencies required prior to installation -INSTALL_REQUIRES = [] +INSTALL_REQUIRES = [ + # OmniReset dependencies + "rtree", + "zarr==2.18.3", + "numcodecs==0.13.1", + "cmaes", +] + +is_linux_x86_64 = platform.system() == "Linux" and platform.machine() in ("x86_64", "AMD64") +py = f"cp{sys.version_info.major}{sys.version_info.minor}" + +wheel_by_py = { + "cp311": ( + "https://github.com/MiroPsota/torch_packages_builder/releases/download/pytorch3d-0.7.8/" + "pytorch3d-0.7.8%2Bpt2.7.0cu128-cp311-cp311-linux_x86_64.whl" + ), + "cp310": ( + "https://github.com/MiroPsota/torch_packages_builder/releases/download/pytorch3d-0.7.8/" + "pytorch3d-0.7.8%2Bpt2.7.0cu128-cp310-cp310-linux_x86_64.whl" + ), +} + +if is_linux_x86_64 and py in wheel_by_py: + INSTALL_REQUIRES.append(f"pytorch3d @ {wheel_by_py[py]}") is_linux_x86_64 = platform.system() == "Linux" and platform.machine() in ("x86_64", "AMD64") py = f"cp{sys.version_info.major}{sys.version_info.minor}" diff --git a/source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/reset_states/__init__.py b/source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/omnireset/__init__.py similarity index 100% rename from source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/reset_states/__init__.py rename to source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/omnireset/__init__.py diff --git a/source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/reset_states/assembly_keypoints.py b/source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/omnireset/assembly_keypoints.py similarity index 100% rename from source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/reset_states/assembly_keypoints.py rename to source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/omnireset/assembly_keypoints.py diff --git a/source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/reset_states/config/__init__.py b/source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/omnireset/config/__init__.py similarity index 100% rename from source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/reset_states/config/__init__.py rename to source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/omnireset/config/__init__.py diff --git a/source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/reset_states/config/ur5e_robotiq_2f85/__init__.py b/source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/omnireset/config/ur5e_robotiq_2f85/__init__.py similarity index 59% rename from source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/reset_states/config/ur5e_robotiq_2f85/__init__.py rename to source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/omnireset/config/ur5e_robotiq_2f85/__init__.py index 1e261d82..1e240e13 100644 --- a/source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/reset_states/config/ur5e_robotiq_2f85/__init__.py +++ b/source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/omnireset/config/ur5e_robotiq_2f85/__init__.py @@ -61,43 +61,104 @@ kwargs={"env_cfg_entry_point": f"{__name__}.reset_states_cfg:ObjectPartiallyAssembledEEGraspedResetStatesCfg"}, ) +# Register SysID env +gym.register( + id="OmniReset-Ur5eRobotiq2f85-Sysid-v0", + entry_point="isaaclab.envs:ManagerBasedRLEnv", + disable_env_checker=True, + kwargs={"env_cfg_entry_point": f"{__name__}.sysid_cfg:SysidEnvCfg"}, +) + +# Register Camera Alignment env +gym.register( + id="OmniReset-Ur5eRobotiq2f85-CameraAlign-v0", + entry_point="isaaclab.envs:ManagerBasedRLEnv", + disable_env_checker=True, + kwargs={"env_cfg_entry_point": f"{__name__}.camera_align_cfg:CameraAlignEnvCfg"}, +) + # Register RL state environments gym.register( - id="OmniReset-Ur5eRobotiq2f85-RelJointPos-State-v0", + id="OmniReset-Ur5eRobotiq2f85-RelCartesianOSC-State-v0", entry_point="isaaclab.envs:ManagerBasedRLEnv", disable_env_checker=True, kwargs={ - "env_cfg_entry_point": f"{__name__}.rl_state_cfg:Ur5eRobotiq2f85RelJointPosTrainCfg", + "env_cfg_entry_point": f"{__name__}.rl_state_cfg:Ur5eRobotiq2f85RelCartesianOSCTrainCfg", "rsl_rl_cfg_entry_point": f"{agents.__name__}.rsl_rl_cfg:Base_PPORunnerCfg", }, ) gym.register( - id="OmniReset-Ur5eRobotiq2f85-RelJointPos-State-Play-v0", + id="OmniReset-Ur5eRobotiq2f85-RelCartesianOSC-State-Finetune-v0", entry_point="isaaclab.envs:ManagerBasedRLEnv", disable_env_checker=True, kwargs={ - "env_cfg_entry_point": f"{__name__}.rl_state_cfg:Ur5eRobotiq2f85RelJointPosEvalCfg", + "env_cfg_entry_point": f"{__name__}.rl_state_cfg:Ur5eRobotiq2f85RelCartesianOSCFinetuneCfg", "rsl_rl_cfg_entry_point": f"{agents.__name__}.rsl_rl_cfg:Base_PPORunnerCfg", }, ) gym.register( - id="OmniReset-Ur5eRobotiq2f85-RelCartesianOSC-State-v0", + id="OmniReset-Ur5eRobotiq2f85-RelCartesianOSC-State-Play-v0", entry_point="isaaclab.envs:ManagerBasedRLEnv", disable_env_checker=True, kwargs={ - "env_cfg_entry_point": f"{__name__}.rl_state_cfg:Ur5eRobotiq2f85RelCartesianOSCTrainCfg", + "env_cfg_entry_point": f"{__name__}.rl_state_cfg:Ur5eRobotiq2f85RelCartesianOSCEvalCfg", "rsl_rl_cfg_entry_point": f"{agents.__name__}.rsl_rl_cfg:Base_PPORunnerCfg", }, ) gym.register( - id="OmniReset-Ur5eRobotiq2f85-RelCartesianOSC-State-Play-v0", + id="OmniReset-Ur5eRobotiq2f85-RelCartesianOSC-State-Finetune-Play-v0", entry_point="isaaclab.envs:ManagerBasedRLEnv", disable_env_checker=True, kwargs={ - "env_cfg_entry_point": f"{__name__}.rl_state_cfg:Ur5eRobotiq2f85RelCartesianOSCEvalCfg", + "env_cfg_entry_point": f"{__name__}.rl_state_cfg:Ur5eRobotiq2f85RelCartesianOSCFinetuneEvalCfg", "rsl_rl_cfg_entry_point": f"{agents.__name__}.rsl_rl_cfg:Base_PPORunnerCfg", }, ) + + +# RGB environments for data collection and evaluation +gym.register( + id="OmniReset-Ur5eRobotiq2f85-RelCartesianOSC-RGB-DataCollection-v0", + entry_point="isaaclab.envs:ManagerBasedRLEnv", + disable_env_checker=True, + kwargs={ + "env_cfg_entry_point": f"{__name__}.data_collection_rgb_cfg:Ur5eRobotiq2f85DataCollectionRGBRelCartesianOSCCfg", + "rsl_rl_cfg_entry_point": f"{agents.__name__}.rsl_rl_cfg:Base_DAggerRunnerCfg", + }, +) + +gym.register( + id="OmniReset-Ur5eRobotiq2f85-RelCartesianOSC-RGB-Play-v0", + entry_point="isaaclab.envs:ManagerBasedRLEnv", + disable_env_checker=True, + kwargs={ + "env_cfg_entry_point": f"{__name__}.data_collection_rgb_cfg:Ur5eRobotiq2f85EvalRGBRelCartesianOSCCfg", + "rsl_rl_cfg_entry_point": f"{agents.__name__}.rsl_rl_cfg:Base_DAggerRunnerCfg", + }, +) + +# OOD (out-of-distribution) RGB environments +gym.register( + id="OmniReset-Ur5eRobotiq2f85-RelCartesianOSC-RGB-OOD-DataCollection-v0", + entry_point="isaaclab.envs:ManagerBasedRLEnv", + disable_env_checker=True, + kwargs={ + "env_cfg_entry_point": ( + f"{__name__}.data_collection_rgb_cfg:Ur5eRobotiq2f85DataCollectionRGBRelCartesianOSCOODCfg" + ), + "rsl_rl_cfg_entry_point": f"{agents.__name__}.rsl_rl_cfg:Base_DAggerRunnerCfg", + }, +) + +gym.register( + id="OmniReset-Ur5eRobotiq2f85-RelCartesianOSC-RGB-OOD-Play-v0", + entry_point="isaaclab.envs:ManagerBasedRLEnv", + disable_env_checker=True, + kwargs={ + "env_cfg_entry_point": f"{__name__}.data_collection_rgb_cfg:Ur5eRobotiq2f85EvalRGBRelCartesianOSCOODCfg", + "rsl_rl_cfg_entry_point": f"{agents.__name__}.rsl_rl_cfg:Base_DAggerRunnerCfg", + }, +) diff --git a/source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/omnireset/config/ur5e_robotiq_2f85/actions.py b/source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/omnireset/config/ur5e_robotiq_2f85/actions.py new file mode 100644 index 00000000..5c3907cb --- /dev/null +++ b/source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/omnireset/config/ur5e_robotiq_2f85/actions.py @@ -0,0 +1,69 @@ +# Copyright (c) 2024-2026, The UW Lab Project Developers. (https://github.com/uw-lab/UWLab/blob/main/CONTRIBUTORS.md). +# All Rights Reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from __future__ import annotations + +from isaaclab.utils import configclass + +from uwlab_assets.robots.ur5e_robotiq_gripper.actions import ROBOTIQ_GRIPPER_BINARY_ACTIONS + +from ...mdp.actions.actions_cfg import RelCartesianOSCActionCfg + +# Pre-train gains (soft initial Kp; curriculum ramps to stiff terminal) +UR5E_ROBOTIQ_2F85_RELATIVE_OSC = RelCartesianOSCActionCfg( + asset_name="robot", + joint_names=["shoulder.*", "elbow.*", "wrist.*"], + body_name="wrist_3_link", + scale_xyz_axisangle=(0.02, 0.02, 0.02, 0.02, 0.02, 0.2), + motion_stiffness=(200.0, 200.0, 200.0, 3.0, 3.0, 3.0), + motion_damping_ratio=(3.0, 3.0, 3.0, 1.0, 1.0, 1.0), + torque_limit=(150.0, 150.0, 150.0, 28.0, 28.0, 28.0), +) + +# Eval / sim2real gains (high Kp matched to sysid friction, end-of-curriculum values) +UR5E_ROBOTIQ_2F85_RELATIVE_OSC_EVAL = RelCartesianOSCActionCfg( + asset_name="robot", + joint_names=["shoulder.*", "elbow.*", "wrist.*"], + body_name="wrist_3_link", + scale_xyz_axisangle=(0.01, 0.01, 0.002, 0.02, 0.02, 0.2), + motion_stiffness=(1000.0, 1000.0, 1000.0, 50.0, 50.0, 50.0), + motion_damping_ratio=(1.0, 1.0, 1.0, 1.0, 1.0, 1.0), + torque_limit=(150.0, 150.0, 150.0, 28.0, 28.0, 28.0), +) + +# Unscaled (for sysid scripts) +UR5E_ROBOTIQ_2F85_RELATIVE_OSC_UNSCALED = RelCartesianOSCActionCfg( + asset_name="robot", + joint_names=["shoulder.*", "elbow.*", "wrist.*"], + body_name="wrist_3_link", + scale_xyz_axisangle=(1.0, 1.0, 1.0, 1.0, 1.0, 1.0), + motion_stiffness=(1000.0, 1000.0, 1000.0, 50.0, 50.0, 50.0), + motion_damping_ratio=(1.0, 1.0, 1.0, 1.0, 1.0, 1.0), + torque_limit=(150.0, 150.0, 150.0, 28.0, 28.0, 28.0), +) + + +@configclass +class Ur5eRobotiq2f85RelativeOSCAction: + """Action config using the analytical OSC + binary gripper.""" + + arm = UR5E_ROBOTIQ_2F85_RELATIVE_OSC + gripper = ROBOTIQ_GRIPPER_BINARY_ACTIONS + + +@configclass +class Ur5eRobotiq2f85RelativeOSCEvalAction: + """Action config with high Kp gains (end-of-curriculum values) for eval / data-collection.""" + + arm = UR5E_ROBOTIQ_2F85_RELATIVE_OSC_EVAL + gripper = ROBOTIQ_GRIPPER_BINARY_ACTIONS + + +@configclass +class Ur5eRobotiq2f85SysidOSCAction: + """Unscaled arm action (Cartesian delta) + binary gripper. For Sysid env / scripts.""" + + arm = UR5E_ROBOTIQ_2F85_RELATIVE_OSC_UNSCALED + gripper = ROBOTIQ_GRIPPER_BINARY_ACTIONS diff --git a/source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/reset_states/config/ur5e_robotiq_2f85/agents/__init__.py b/source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/omnireset/config/ur5e_robotiq_2f85/agents/__init__.py similarity index 100% rename from source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/reset_states/config/ur5e_robotiq_2f85/agents/__init__.py rename to source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/omnireset/config/ur5e_robotiq_2f85/agents/__init__.py diff --git a/source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/omnireset/config/ur5e_robotiq_2f85/agents/rsl_rl_cfg.py b/source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/omnireset/config/ur5e_robotiq_2f85/agents/rsl_rl_cfg.py new file mode 100644 index 00000000..8f6db98e --- /dev/null +++ b/source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/omnireset/config/ur5e_robotiq_2f85/agents/rsl_rl_cfg.py @@ -0,0 +1,83 @@ +# Copyright (c) 2024-2026, The UW Lab Project Developers. (https://github.com/uw-lab/UWLab/blob/main/CONTRIBUTORS.md). +# All Rights Reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from isaaclab.utils import configclass +from isaaclab_rl.rsl_rl import RslRlOnPolicyRunnerCfg, RslRlPpoAlgorithmCfg + +from uwlab_rl.rsl_rl.rl_cfg import ( + BehaviorCloningCfg, + OffPolicyAlgorithmCfg, + RslRlFancyActorCriticCfg, + RslRlFancyPpoAlgorithmCfg, +) + + +def my_experts_observation_func(env): + obs = env.unwrapped.obs_buf["expert_obs"] + return obs + + +@configclass +class Base_PPORunnerCfg(RslRlOnPolicyRunnerCfg): + num_steps_per_env = 32 + max_iterations = 40000 + save_interval = 100 + resume = False + experiment_name = "ur5e_robotiq_2f85_omnireset_agent" + policy = RslRlFancyActorCriticCfg( + init_noise_std=1.0, + actor_obs_normalization=True, + critic_obs_normalization=True, + actor_hidden_dims=[512, 256, 128, 64], + critic_hidden_dims=[512, 256, 128, 64], + activation="elu", + noise_std_type="gsde", + state_dependent_std=False, + ) + algorithm = RslRlPpoAlgorithmCfg( + value_loss_coef=1.0, + use_clipped_value_loss=True, + normalize_advantage_per_mini_batch=False, + clip_param=0.2, + entropy_coef=0.006, + num_learning_epochs=5, + num_mini_batches=4, + learning_rate=1.0e-4, + schedule="adaptive", + gamma=0.99, + lam=0.95, + desired_kl=0.01, + max_grad_norm=1.0, + ) + + +@configclass +class Base_DAggerRunnerCfg(Base_PPORunnerCfg): + algorithm = RslRlFancyPpoAlgorithmCfg( + value_loss_coef=1.0, + use_clipped_value_loss=True, + normalize_advantage_per_mini_batch=False, + clip_param=0.2, + entropy_coef=0.006, + num_learning_epochs=5, + num_mini_batches=4, + learning_rate=1.0e-4, + schedule="adaptive", + gamma=0.99, + lam=0.95, + desired_kl=0.01, + max_grad_norm=1.0, + offline_algorithm_cfg=OffPolicyAlgorithmCfg( + behavior_cloning_cfg=BehaviorCloningCfg( + experts_path=[""], + experts_loader="torch.jit.load", + experts_observation_group_cfg="uwlab_tasks.manager_based.manipulation.omnireset.config.ur5e_robotiq_2f85.rl_state_cfg:ObservationsCfg.PolicyCfg", + experts_observation_func=my_experts_observation_func, + experts_action_group_cfg="uwlab_tasks.manager_based.manipulation.omnireset.config.ur5e_robotiq_2f85.actions:Ur5eRobotiq2f85RelativeOSCAction", + cloning_loss_coeff=1.0, + loss_decay=1.0, + ) + ), + ) diff --git a/source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/omnireset/config/ur5e_robotiq_2f85/camera_align_cfg.py b/source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/omnireset/config/ur5e_robotiq_2f85/camera_align_cfg.py new file mode 100644 index 00000000..6167c7b4 --- /dev/null +++ b/source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/omnireset/config/ur5e_robotiq_2f85/camera_align_cfg.py @@ -0,0 +1,218 @@ +# Copyright (c) 2024-2026, The UW Lab Project Developers. (https://github.com/uw-lab/UWLab/blob/main/CONTRIBUTORS.md). +# All Rights Reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +# Copyright (c) 2024-2025, The UW Lab Project Developers. +# All Rights Reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Scene and env config for sim2real camera alignment. + +Minimal env with robot + cameras (from data_collection_rgb_cfg) but NO +randomization. The interactive alignment script (scripts_v2/tools/sim2real/align_cameras.py) +uses keyboard controls to move/rotate the sim camera and overlay the sim +render on a real reference image, then prints the final (pos, rot, focal_length) +to paste back into data_collection_rgb_cfg.py. + +Mirrors the sysid pattern: + sysid_cfg.py + scripts_v2/tools/sim2real/sysid_ur5e_osc.py + camera_align_cfg.py + scripts_v2/tools/sim2real/align_cameras.py +""" + +from __future__ import annotations + +import isaaclab.sim as sim_utils +from isaaclab.assets import RigidObjectCfg +from isaaclab.envs import ManagerBasedRLEnvCfg +from isaaclab.managers import ObservationGroupCfg as ObsGroup +from isaaclab.managers import ObservationTermCfg as ObsTerm +from isaaclab.managers import SceneEntityCfg +from isaaclab.managers import TerminationTermCfg as DoneTerm +from isaaclab.sensors import TiledCameraCfg +from isaaclab.utils import configclass + +from uwlab_assets.robots.ur5e_robotiq_gripper import EXPLICIT_UR5E_ROBOTIQ_2F85 + +from ... import mdp as task_mdp +from .actions import Ur5eRobotiq2f85SysidOSCAction +from .rl_state_cfg import RlStateSceneCfg + +# Same sim dt as sysid / finetune (500 Hz) +CAMERA_ALIGN_SIM_DT = 1.0 / 500.0 + + +@configclass +class CameraAlignSceneCfg(RlStateSceneCfg): + """Scene for camera alignment. + + Inherits from RlStateSceneCfg (robot, table, ur5_metal_support, ground, + sky_light, insertive/receptive objects) and adds curtains + cameras. + Same structure as DataCollectionRGBObjectSceneCfg but with NO randomization. + """ + + # Use explicit (sysid-tuned) actuator model + robot = EXPLICIT_UR5E_ROBOTIQ_2F85.replace(prim_path="{ENV_REGEX_NS}/Robot") + + # --- Background curtains (match real workspace) --- + curtain_left = RigidObjectCfg( + prim_path="{ENV_REGEX_NS}/CurtainLeft", + init_state=RigidObjectCfg.InitialStateCfg(pos=(0.4, -0.68, 0.519), rot=(0.707, 0.0, 0.0, -0.707)), + spawn=sim_utils.CuboidCfg( + size=(0.01, 1.0, 1.125), + rigid_props=sim_utils.RigidBodyPropertiesCfg(kinematic_enabled=True), + visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(1.0, 1.0, 1.0)), + collision_props=sim_utils.CollisionPropertiesCfg(collision_enabled=False), + ), + ) + curtain_back = RigidObjectCfg( + prim_path="{ENV_REGEX_NS}/CurtainBack", + init_state=RigidObjectCfg.InitialStateCfg(pos=(-0.15, 0.0, 0.519), rot=(1.0, 0.0, 0.0, 0.0)), + spawn=sim_utils.CuboidCfg( + size=(0.01, 1.3, 1.125), + rigid_props=sim_utils.RigidBodyPropertiesCfg(kinematic_enabled=True), + visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(1.0, 1.0, 1.0)), + collision_props=sim_utils.CollisionPropertiesCfg(collision_enabled=False), + ), + ) + curtain_right = RigidObjectCfg( + prim_path="{ENV_REGEX_NS}/CurtainRight", + init_state=RigidObjectCfg.InitialStateCfg(pos=(0.4, 0.68, 0.519), rot=(0.707, 0.0, 0.0, -0.707)), + spawn=sim_utils.CuboidCfg( + size=(0.01, 1.0, 1.125), + rigid_props=sim_utils.RigidBodyPropertiesCfg(kinematic_enabled=True), + visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(1.0, 1.0, 1.0)), + collision_props=sim_utils.CollisionPropertiesCfg(collision_enabled=False), + ), + ) + + # --- Cameras (initial poses from data_collection_rgb_cfg) --- + front_camera = TiledCameraCfg( + prim_path="{ENV_REGEX_NS}/Robot/rgb_front_camera", + update_period=0, + height=480, + width=640, + offset=TiledCameraCfg.OffsetCfg( + pos=(1.0770121, -0.1679045, 0.4486344), + rot=(0.70564552, 0.46613815, 0.25072644, 0.47107948), + convention="opengl", + ), + data_types=["rgb"], + spawn=sim_utils.PinholeCameraCfg(focal_length=13.20), + ) + + side_camera = TiledCameraCfg( + prim_path="{ENV_REGEX_NS}/Robot/rgb_side_camera", + update_period=0, + height=480, + width=640, + offset=TiledCameraCfg.OffsetCfg( + pos=(0.8323904, 0.5877843, 0.2805111), + rot=(0.29008842, 0.22122445, 0.51336143, 0.77676798), + convention="opengl", + ), + data_types=["rgb"], + spawn=sim_utils.PinholeCameraCfg(focal_length=20.10), + ) + + wrist_camera = TiledCameraCfg( + prim_path="{ENV_REGEX_NS}/Robot/robotiq_base_link/rgb_wrist_camera", + update_period=0, + height=480, + width=640, + offset=TiledCameraCfg.OffsetCfg( + pos=(0.0182505, -0.00408447, -0.0689107), + rot=(0.34254336, -0.61819255, -0.6160212, 0.347879), + convention="opengl", + ), + data_types=["rgb"], + spawn=sim_utils.PinholeCameraCfg(focal_length=24.55), + ) + + +# --------------------------------------------------------------------------- +# Minimal MDP (camera alignment only needs RGB obs + joint_pos action) +# --------------------------------------------------------------------------- +@configclass +class CameraAlignObservationsCfg: + @configclass + class PolicyCfg(ObsGroup): + joint_pos = ObsTerm(func=task_mdp.joint_pos) + + front_rgb = ObsTerm( + func=task_mdp.process_image, + params={ + "sensor_cfg": SceneEntityCfg("front_camera"), + "data_type": "rgb", + "process_image": False, + "output_size": (240, 320), + }, + ) + side_rgb = ObsTerm( + func=task_mdp.process_image, + params={ + "sensor_cfg": SceneEntityCfg("side_camera"), + "data_type": "rgb", + "process_image": False, + "output_size": (240, 320), + }, + ) + wrist_rgb = ObsTerm( + func=task_mdp.process_image, + params={ + "sensor_cfg": SceneEntityCfg("wrist_camera"), + "data_type": "rgb", + "process_image": False, + "output_size": (240, 320), + }, + ) + + def __post_init__(self): + self.enable_corruption = False + self.concatenate_terms = False + + policy: PolicyCfg = PolicyCfg() + + +@configclass +class CameraAlignRewardsCfg: + pass + + +@configclass +class CameraAlignTerminationsCfg: + time_out = DoneTerm(func=task_mdp.time_out, time_out=True) + + +@configclass +class CameraAlignEnvCfg(ManagerBasedRLEnvCfg): + """Env for interactive sim2real camera alignment. + + Uses the same robot/action as sysid so the robot can be positioned + at arbitrary joint angles. Only 1 env needed (interactive tool). + """ + + scene: CameraAlignSceneCfg = CameraAlignSceneCfg(num_envs=1, env_spacing=2.0) + actions: Ur5eRobotiq2f85SysidOSCAction = Ur5eRobotiq2f85SysidOSCAction() + observations: CameraAlignObservationsCfg = CameraAlignObservationsCfg() + rewards: CameraAlignRewardsCfg = CameraAlignRewardsCfg() + terminations: CameraAlignTerminationsCfg = CameraAlignTerminationsCfg() + + def __post_init__(self) -> None: + self.decimation = 1 + self.episode_length_s = 99999.0 + self.sim.dt = CAMERA_ALIGN_SIM_DT + + # Place robot at average real-world position (reset_states_cfg y avg = -0.039). + self.scene.robot.init_state.pos = (0.0, -0.039, 0.0) + self.scene.ur5_metal_support.init_state.pos = (0.0, -0.039, -0.013) + + # Render settings for visual fidelity + self.sim.render.enable_ambient_occlusion = True + self.sim.render.enable_reflections = True + self.sim.render.enable_dl_denoiser = True + self.sim.render_interval = 1 + + # rerender on reset + self.num_rerenders_on_reset = 1 diff --git a/source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/omnireset/config/ur5e_robotiq_2f85/data_collection_rgb_cfg.py b/source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/omnireset/config/ur5e_robotiq_2f85/data_collection_rgb_cfg.py new file mode 100644 index 00000000..a5e06b11 --- /dev/null +++ b/source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/omnireset/config/ur5e_robotiq_2f85/data_collection_rgb_cfg.py @@ -0,0 +1,833 @@ +# Copyright (c) 2024-2026, The UW Lab Project Developers. (https://github.com/uw-lab/UWLab/blob/main/CONTRIBUTORS.md). +# All Rights Reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + + +from __future__ import annotations + +from pathlib import Path + +import isaaclab.sim as sim_utils +from isaaclab.assets import RigidObjectCfg +from isaaclab.managers import EventTermCfg as EventTerm +from isaaclab.managers import ObservationGroupCfg as ObsGroup +from isaaclab.managers import ObservationTermCfg as ObsTerm +from isaaclab.managers import SceneEntityCfg +from isaaclab.managers import TerminationTermCfg as DoneTerm +from isaaclab.sensors import TiledCameraCfg +from isaaclab.utils import configclass + +from uwlab_assets import UWLAB_CLOUD_ASSETS_DIR + +from ... import mdp as task_mdp +from .actions import Ur5eRobotiq2f85RelativeOSCEvalAction +from .rl_state_cfg import FinetuneEvalEventCfg, RlStateSceneCfg, Ur5eRobotiq2f85RlStateCfg + + +@configclass +class DataCollectionRGBObjectSceneCfg(RlStateSceneCfg): + # background + curtain_left = RigidObjectCfg( + prim_path="{ENV_REGEX_NS}/CurtainLeft", + init_state=RigidObjectCfg.InitialStateCfg(pos=(0.4, -0.68, 0.519), rot=(0.707, 0.0, 0.0, -0.707)), + spawn=sim_utils.CuboidCfg( + size=(0.01, 1.0, 1.125), + rigid_props=sim_utils.RigidBodyPropertiesCfg(kinematic_enabled=True), + visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(0.0, 0.0, 0.0)), + collision_props=sim_utils.CollisionPropertiesCfg( + collision_enabled=False, + ), + ), + ) + + curtain_back = RigidObjectCfg( + prim_path="{ENV_REGEX_NS}/CurtainBack", + init_state=RigidObjectCfg.InitialStateCfg(pos=(-0.15, 0.0, 0.519), rot=(1.0, 0.0, 0.0, 0.0)), + spawn=sim_utils.CuboidCfg( + size=(0.01, 1.3, 1.125), + rigid_props=sim_utils.RigidBodyPropertiesCfg(kinematic_enabled=True), + visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(0.0, 0.0, 0.0)), + collision_props=sim_utils.CollisionPropertiesCfg( + collision_enabled=False, + ), + ), + ) + + curtain_right = RigidObjectCfg( + prim_path="{ENV_REGEX_NS}/CurtainRight", + init_state=RigidObjectCfg.InitialStateCfg(pos=(0.4, 0.68, 0.519), rot=(0.707, 0.0, 0.0, -0.707)), + spawn=sim_utils.CuboidCfg( + size=(0.01, 1.0, 1.125), + rigid_props=sim_utils.RigidBodyPropertiesCfg(kinematic_enabled=True), + visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(0.0, 0.0, 0.0)), + collision_props=sim_utils.CollisionPropertiesCfg( + collision_enabled=False, + ), + ), + ) + + front_camera = TiledCameraCfg( + prim_path="{ENV_REGEX_NS}/Robot/rgb_front_camera", + update_period=0, + height=240, + width=320, + offset=TiledCameraCfg.OffsetCfg( + pos=(1.0770121, -0.1679045, 0.4486344), + rot=(0.70564552, 0.46613815, 0.25072644, 0.47107948), + convention="opengl", + ), + data_types=["rgb"], + spawn=sim_utils.PinholeCameraCfg(focal_length=13.20), + ) + + side_camera = TiledCameraCfg( + prim_path="{ENV_REGEX_NS}/Robot/rgb_side_camera", + update_period=0, + height=240, + width=320, + offset=TiledCameraCfg.OffsetCfg( + pos=(0.8323904, 0.5877843, 0.2805111), + rot=(0.29008842, 0.22122445, 0.51336143, 0.77676798), + convention="opengl", + ), + data_types=["rgb"], + spawn=sim_utils.PinholeCameraCfg(focal_length=20.10), + ) + + wrist_camera = TiledCameraCfg( + prim_path="{ENV_REGEX_NS}/Robot/robotiq_base_link/rgb_wrist_camera", + update_period=0, + height=240, + width=320, + offset=TiledCameraCfg.OffsetCfg( + pos=(0.0182505, -0.00408447, -0.0689107), + rot=(0.34254336, -0.61819255, -0.6160212, 0.347879), + convention="opengl", + ), + data_types=["rgb"], + spawn=sim_utils.PinholeCameraCfg(focal_length=24.55), + ) + + +@configclass +class BaseRGBEventCfg(FinetuneEvalEventCfg): + """RGB events: inherits fixed sysid + OSC gains from FinetuneEvalEventCfg, adds camera randomization.""" + + # randomize camera pose + randomize_front_camera = EventTerm( + func=task_mdp.randomize_tiled_cameras, + mode="reset", + params={ + "camera_path_template": "/World/envs/env_{}/Robot/rgb_front_camera", + # Base values from TiledCameraCfg + "base_position": (1.0770121, -0.1679045, 0.4486344), + "base_rotation": (0.70564552, 0.46613815, 0.25072644, 0.47107948), + # Delta ranges for position (in meters) + "position_deltas": {"x": (-0.05, 0.05), "y": (-0.05, 0.05), "z": (-0.05, 0.05)}, + # Delta ranges for euler angles (in degrees) + "euler_deltas": {"pitch": (-2.0, 2.0), "yaw": (-2.0, 2.0), "roll": (-2.0, 2.0)}, + }, + ) + + randomize_front_camera_focal_length = EventTerm( + func=task_mdp.randomize_camera_focal_length, + mode="reset", + params={ + "camera_path_template": "/World/envs/env_{}/Robot/rgb_front_camera", + "focal_length_range": (11.2, 15.2), + }, + ) + + randomize_side_camera = EventTerm( + func=task_mdp.randomize_tiled_cameras, + mode="reset", + params={ + "camera_path_template": "/World/envs/env_{}/Robot/rgb_side_camera", + # Base values from TiledCameraCfg + "base_position": (0.8323904, 0.5877843, 0.2805111), + "base_rotation": (0.29008842, 0.22122445, 0.51336143, 0.77676798), + # Delta ranges for position (in meters) + "position_deltas": {"x": (-0.05, 0.05), "y": (-0.05, 0.05), "z": (-0.05, 0.05)}, + # Delta ranges for euler angles (in degrees) + "euler_deltas": {"pitch": (-2.0, 2.0), "yaw": (-2.0, 2.0), "roll": (-2.0, 2.0)}, + }, + ) + + randomize_side_camera_focal_length = EventTerm( + func=task_mdp.randomize_camera_focal_length, + mode="reset", + params={"camera_path_template": "/World/envs/env_{}/Robot/rgb_side_camera", "focal_length_range": (18.1, 22.1)}, + ) + + randomize_wrist_camera = EventTerm( + func=task_mdp.randomize_tiled_cameras, + mode="reset", + params={ + "camera_path_template": "/World/envs/env_{}/Robot/robotiq_base_link/rgb_wrist_camera", + # Base values from TiledCameraCfg + "base_position": (0.0182505, -0.00408447, -0.0689107), + "base_rotation": (0.34254336, -0.61819255, -0.6160212, 0.347879), + # Delta ranges for position (in meters) + "position_deltas": {"x": (-0.01, 0.01), "y": (-0.01, 0.01), "z": (-0.01, 0.01)}, + # Delta ranges for euler angles (in degrees) + "euler_deltas": {"pitch": (-1.0, 1.0), "yaw": (-1.0, 1.0), "roll": (-1.0, 1.0)}, + }, + ) + + randomize_wrist_camera_focal_length = EventTerm( + func=task_mdp.randomize_camera_focal_length, + mode="reset", + params={ + "camera_path_template": "/World/envs/env_{}/Robot/robotiq_base_link/rgb_wrist_camera", + "focal_length_range": (23.55, 25.55), # Range from wide-angle to telephoto + }, + ) + + +@configclass +class RGBEventCfg(BaseRGBEventCfg): + """Configuration for randomization.""" + + randomize_wrist_mount_appearance = EventTerm( + func=task_mdp.randomize_visual_appearance_multiple_meshes, + mode="interval", + interval_range_s=(4.0, 4.0), + params={ + "asset_cfg": SceneEntityCfg("robot"), + "event_name": "randomize_wrist_mount_event", + "mesh_names": ["robotiq_base_link/visuals/D415_to_Robotiq_Mount"], + "texture_prob": 0.5, + "texture_config_path": str(Path(__file__).parent / "resources" / "texture_paths.yaml"), + "diffuse_tint_range": ((0.0, 0.0, 0.0), (1.0, 1.0, 1.0)), + "colors": {"r": (0.0, 1.0), "g": (0.0, 1.0), "b": (0.0, 1.0)}, + "texture_scale_range": (0.7, 5.0), + "roughness_range": (0.2, 1.0), + "metallic_range": (0.0, 0.8), + "specular_range": (0.0, 1.0), + }, + ) + + randomize_inner_finger_appearance = EventTerm( + func=task_mdp.randomize_visual_appearance_multiple_meshes, + mode="interval", + interval_range_s=(4.0, 4.0), + params={ + "asset_cfg": SceneEntityCfg("robot"), + "event_name": "randomize_inner_finger_event", + "mesh_names": ["left_inner_finger/visuals/mesh_1", "right_inner_finger/visuals/mesh_1"], + "texture_prob": 0.5, + "texture_config_path": str(Path(__file__).parent / "resources" / "texture_paths.yaml"), + "diffuse_tint_range": ((0.0, 0.0, 0.0), (1.0, 1.0, 1.0)), + "colors": {"r": (0.0, 1.0), "g": (0.0, 1.0), "b": (0.0, 1.0)}, + "texture_scale_range": (0.7, 5.0), + "roughness_range": (0.2, 1.0), + "metallic_range": (0.0, 0.8), + "specular_range": (0.0, 1.0), + }, + ) + + randomize_insertive_object_appearance = EventTerm( + func=task_mdp.randomize_visual_appearance_multiple_meshes, + mode="interval", + interval_range_s=(4.0, 4.0), + params={ + "asset_cfg": SceneEntityCfg("insertive_object"), + "event_name": "randomize_insertive_object_event", + "mesh_names": [], + "texture_prob": 0.5, + "texture_config_path": str(Path(__file__).parent / "resources" / "texture_paths.yaml"), + "diffuse_tint_range": ((0.0, 0.0, 0.0), (1.0, 1.0, 1.0)), + "colors": {"r": (0.0, 1.0), "g": (0.0, 1.0), "b": (0.0, 1.0)}, + "texture_scale_range": (0.7, 5.0), + "roughness_range": (0.0, 1.0), + "metallic_range": (0.0, 1.0), + "specular_range": (0.0, 1.0), + }, + ) + + randomize_receptive_object_appearance = EventTerm( + func=task_mdp.randomize_visual_appearance_multiple_meshes, + mode="interval", + interval_range_s=(4.0, 4.0), + params={ + "asset_cfg": SceneEntityCfg("receptive_object"), + "event_name": "randomize_receptive_object_event", + "mesh_names": [], + "texture_prob": 0.5, + "texture_config_path": str(Path(__file__).parent / "resources" / "texture_paths.yaml"), + "diffuse_tint_range": ((0.0, 0.0, 0.0), (1.0, 1.0, 1.0)), + "colors": {"r": (0.0, 1.0), "g": (0.0, 1.0), "b": (0.0, 1.0)}, + "texture_scale_range": (0.7, 5.0), + "roughness_range": (0.0, 1.0), + "metallic_range": (0.0, 1.0), + "specular_range": (0.0, 1.0), + }, + ) + + randomize_table_appearance = EventTerm( + func=task_mdp.randomize_visual_appearance_multiple_meshes, + mode="interval", + interval_range_s=(4.0, 4.0), + params={ + "asset_cfg": SceneEntityCfg("table"), + "event_name": "randomize_table_event", + "mesh_names": ["visuals/vention_mat"], + "texture_prob": 0.5, + "texture_config_path": str(Path(__file__).parent / "resources" / "texture_paths.yaml"), + "diffuse_tint_range": ((0.0, 0.0, 0.0), (1.0, 1.0, 1.0)), + "colors": {"r": (0.0, 1.0), "g": (0.0, 1.0), "b": (0.0, 1.0)}, + "texture_scale_range": (0.7, 5.0), + "roughness_range": (0.3, 0.9), + "metallic_range": (0.0, 0.3), + "specular_range": (0.0, 1.0), + }, + ) + + randomize_curtain_left_appearance = EventTerm( + func=task_mdp.randomize_visual_appearance_multiple_meshes, + mode="interval", + interval_range_s=(4.0, 4.0), + params={ + "asset_cfg": SceneEntityCfg("curtain_left"), + "event_name": "randomize_curtain_left_event", + "mesh_names": [], + "texture_prob": 0.5, + "texture_config_path": str(Path(__file__).parent / "resources" / "texture_paths.yaml"), + "diffuse_tint_range": ((0.0, 0.0, 0.0), (1.0, 1.0, 1.0)), + "colors": {"r": (0.0, 1.0), "g": (0.0, 1.0), "b": (0.0, 1.0)}, + "texture_scale_range": (0.7, 5.0), + "roughness_range": (0.0, 1.0), + "metallic_range": (0.0, 1.0), + "specular_range": (0.0, 1.0), + }, + ) + + randomize_curtain_back_appearance = EventTerm( + func=task_mdp.randomize_visual_appearance_multiple_meshes, + mode="interval", + interval_range_s=(4.0, 4.0), + params={ + "asset_cfg": SceneEntityCfg("curtain_back"), + "event_name": "randomize_curtain_back_event", + "mesh_names": [], + "texture_prob": 0.5, + "texture_config_path": str(Path(__file__).parent / "resources" / "texture_paths.yaml"), + "diffuse_tint_range": ((0.0, 0.0, 0.0), (1.0, 1.0, 1.0)), + "colors": {"r": (0.0, 1.0), "g": (0.0, 1.0), "b": (0.0, 1.0)}, + "texture_scale_range": (0.7, 5.0), + "roughness_range": (0.0, 1.0), + "metallic_range": (0.0, 1.0), + "specular_range": (0.0, 1.0), + }, + ) + + randomize_curtain_right_appearance = EventTerm( + func=task_mdp.randomize_visual_appearance_multiple_meshes, + mode="interval", + interval_range_s=(4.0, 4.0), + params={ + "asset_cfg": SceneEntityCfg("curtain_right"), + "event_name": "randomize_curtain_right_event", + "mesh_names": [], + "texture_prob": 0.5, + "texture_config_path": str(Path(__file__).parent / "resources" / "texture_paths.yaml"), + "diffuse_tint_range": ((0.0, 0.0, 0.0), (1.0, 1.0, 1.0)), + "colors": {"r": (0.0, 1.0), "g": (0.0, 1.0), "b": (0.0, 1.0)}, + "texture_scale_range": (0.7, 5.0), + "roughness_range": (0.0, 1.0), + "metallic_range": (0.0, 1.0), + "specular_range": (0.0, 1.0), + }, + ) + + # reset background + randomize_sky_light = EventTerm( + func=task_mdp.randomize_hdri, + mode="interval", + interval_range_s=(4.0, 4.0), + params={ + "light_path": "/World/skyLight", + "hdri_config_path": str(Path(__file__).parent / "resources" / "hdri_paths.yaml"), + "intensity_range": (1000.0, 4000.0), + "rotation_range": (0.0, 360.0), + }, + ) + + reset_from_reset_states = EventTerm( + func=task_mdp.MultiResetManager, + mode="reset", + params={ + "dataset_dir": f"{UWLAB_CLOUD_ASSETS_DIR}/Datasets/OmniReset", + "reset_types": ["ObjectAnywhereEEAnywhere"], + "probs": [1.0], + "success": "env.reward_manager.get_term_cfg('progress_context').func.success", + }, + ) + + +@configclass +class DataCollectionRGBEventCfg(RGBEventCfg): + """Data collection events: override reset to sample from all 4 distributions.""" + + reset_from_reset_states = EventTerm( + func=task_mdp.MultiResetManager, + mode="reset", + params={ + "dataset_dir": f"{UWLAB_CLOUD_ASSETS_DIR}/Datasets/OmniReset", + "reset_types": [ + "ObjectAnywhereEEAnywhere", + "ObjectRestingEEGrasped", + "ObjectAnywhereEEGrasped", + "ObjectPartiallyAssembledEEGrasped", + ], + "probs": [0.25, 0.25, 0.25, 0.25], + "success": "env.reward_manager.get_term_cfg('progress_context').func.success", + }, + ) + + +@configclass +class RGBCommandsCfg: + """Command specifications for the MDP.""" + + task_command = task_mdp.TaskCommandCfg( + asset_cfg=SceneEntityCfg("robot", body_names="body"), + resampling_time_range=(1e6, 1e6), + insertive_asset_cfg=SceneEntityCfg("insertive_object"), + receptive_asset_cfg=SceneEntityCfg("receptive_object"), + ) + + +@configclass +class RGBObservationsCfg: + @configclass + class RGBPolicyCfg(ObsGroup): + """Observations for policy group (with processed images for evaluation).""" + + last_gripper_action = ObsTerm( + func=task_mdp.last_action, + params={ + "action_name": "gripper", + }, + ) + + last_arm_action = ObsTerm( + func=task_mdp.last_action, + params={ + "action_name": "arm", + }, + ) + + arm_joint_pos = ObsTerm( + func=task_mdp.joint_pos, + params={ + "asset_cfg": SceneEntityCfg("robot", joint_names=["shoulder.*", "elbow.*", "wrist.*"]), + }, + ) + + end_effector_pose = ObsTerm( + func=task_mdp.target_asset_pose_in_root_asset_frame, + params={ + "target_asset_cfg": SceneEntityCfg("robot", body_names="wrist_3_link"), + "root_asset_cfg": SceneEntityCfg("robot"), + "rotation_repr": "axis_angle", + }, + ) + + front_rgb = ObsTerm( + func=task_mdp.process_image, + params={ + "sensor_cfg": SceneEntityCfg("front_camera"), + "data_type": "rgb", + "process_image": True, + "output_size": (224, 224), + }, + ) + + side_rgb = ObsTerm( + func=task_mdp.process_image, + params={ + "sensor_cfg": SceneEntityCfg("side_camera"), + "data_type": "rgb", + "process_image": True, + "output_size": (224, 224), + }, + ) + + wrist_rgb = ObsTerm( + func=task_mdp.process_image, + params={ + "sensor_cfg": SceneEntityCfg("wrist_camera"), + "data_type": "rgb", + "process_image": True, + "output_size": (224, 224), + }, + ) + + def __post_init__(self): + self.enable_corruption = True + self.concatenate_terms = False + + @configclass + class RGBDataCollectionCfg(ObsGroup): + """Observations for data collection group (with unprocessed images for saving).""" + + last_gripper_action = ObsTerm( + func=task_mdp.last_action, + params={ + "action_name": "gripper", + }, + ) + + last_arm_action = ObsTerm( + func=task_mdp.last_action, + params={ + "action_name": "arm", + }, + ) + + arm_joint_pos = ObsTerm( + func=task_mdp.joint_pos, + params={ + "asset_cfg": SceneEntityCfg("robot", joint_names=["shoulder.*", "elbow.*", "wrist.*"]), + }, + ) + + end_effector_pose = ObsTerm( + func=task_mdp.target_asset_pose_in_root_asset_frame, + params={ + "target_asset_cfg": SceneEntityCfg("robot", body_names="wrist_3_link"), + "root_asset_cfg": SceneEntityCfg("robot"), + "rotation_repr": "axis_angle", + }, + ) + + front_rgb = ObsTerm( + func=task_mdp.process_image, + params={ + "sensor_cfg": SceneEntityCfg("front_camera"), + "data_type": "rgb", + # Don't process image since we want save it as int8 + "process_image": False, + "output_size": (224, 224), + }, + ) + + side_rgb = ObsTerm( + func=task_mdp.process_image, + params={ + "sensor_cfg": SceneEntityCfg("side_camera"), + "data_type": "rgb", + # Don't process image since we want save it as int8 + "process_image": False, + "output_size": (224, 224), + }, + ) + + wrist_rgb = ObsTerm( + func=task_mdp.process_image, + params={ + "sensor_cfg": SceneEntityCfg("wrist_camera"), + "data_type": "rgb", + # Don't process image since we want save it as int8 + "process_image": False, + "output_size": (224, 224), + }, + ) + + # Additional observations + binary_contact = ObsTerm( + func=task_mdp.binary_force_contact, + params={ + "asset_cfg": SceneEntityCfg("robot"), + "body_name": "wrist_3_link", + "force_threshold": 25.0, + }, + ) + + insertive_asset_pose = ObsTerm( + func=task_mdp.target_asset_pose_in_root_asset_frame, + params={ + "target_asset_cfg": SceneEntityCfg("insertive_object"), + "root_asset_cfg": SceneEntityCfg("robot", body_names="wrist_3_link"), + "rotation_repr": "axis_angle", + }, + ) + + receptive_asset_pose = ObsTerm( + func=task_mdp.target_asset_pose_in_root_asset_frame, + params={ + "target_asset_cfg": SceneEntityCfg("receptive_object"), + "root_asset_cfg": SceneEntityCfg("robot", body_names="wrist_3_link"), + "rotation_repr": "axis_angle", + }, + ) + + insertive_asset_in_receptive_asset_frame: ObsTerm = ObsTerm( + func=task_mdp.target_asset_pose_in_root_asset_frame, + params={ + "target_asset_cfg": SceneEntityCfg("insertive_object"), + "root_asset_cfg": SceneEntityCfg("receptive_object"), + "rotation_repr": "axis_angle", + }, + ) + + def __post_init__(self): + self.enable_corruption = True + self.concatenate_terms = False + + # observation groups + policy: RGBPolicyCfg = RGBPolicyCfg() + data_collection: RGBDataCollectionCfg = RGBDataCollectionCfg() + + +@configclass +class DataCollectionRGBTerminationsCfg: + + time_out = DoneTerm(func=task_mdp.time_out, time_out=True) + + abnormal_robot = DoneTerm(func=task_mdp.abnormal_robot_state) + + corrupted_camera = DoneTerm( + func=task_mdp.corrupted_camera_detected, + params={"camera_names": ["front_camera", "side_camera", "wrist_camera"], "std_threshold": 10.0}, + ) + + early_success = DoneTerm( + func=task_mdp.early_success_termination, params={"num_consecutive_successes": 5, "min_episode_length": 10} + ) + + success = DoneTerm( + func=task_mdp.consecutive_success_state_with_min_length, + params={"num_consecutive_successes": 5, "min_episode_length": 10}, + ) + + +@configclass +class Ur5eRobotiq2f85RGBRelCartesianOSCEvalCfg(Ur5eRobotiq2f85RlStateCfg): + """RGB base config: fixed sysid + RGB scene/obs/terminations/render.""" + + actions: Ur5eRobotiq2f85RelativeOSCEvalAction = Ur5eRobotiq2f85RelativeOSCEvalAction() + scene: DataCollectionRGBObjectSceneCfg = DataCollectionRGBObjectSceneCfg( + num_envs=32, env_spacing=1.5, replicate_physics=False + ) + observations: RGBObservationsCfg = RGBObservationsCfg() + terminations: DataCollectionRGBTerminationsCfg = DataCollectionRGBTerminationsCfg() + commands: RGBCommandsCfg = RGBCommandsCfg() + + def __post_init__(self): + super().__post_init__() + + self.episode_length_s = 32.0 + + # Render settings + self.sim.render.enable_dlssg = False + self.sim.render.enable_ambient_occlusion = True + self.sim.render.enable_reflections = True + self.sim.render.enable_dl_denoiser = True + self.sim.render.antialiasing_mode = "DLAA" + + # speeds up rendering + self.sim.render_interval = self.decimation + + # rerender on reset + self.num_rerenders_on_reset = 1 + + +@configclass +class Ur5eRobotiq2f85DataCollectionRGBRelCartesianOSCCfg(Ur5eRobotiq2f85RGBRelCartesianOSCEvalCfg): + events: DataCollectionRGBEventCfg = DataCollectionRGBEventCfg() + + +@configclass +class Ur5eRobotiq2f85EvalRGBRelCartesianOSCCfg(Ur5eRobotiq2f85RGBRelCartesianOSCEvalCfg): + """Evaluation config for Cartesian OSC delta actions.""" + + events: RGBEventCfg = RGBEventCfg() + + +# OOD RGB Event Configs # +@configclass +class OODRGBEventCfg(BaseRGBEventCfg): + """Configuration for randomization with OOD (out-of-distribution) textures and HDRIs.""" + + # Override visual appearance randomization to use OOD textures + randomize_wrist_mount_appearance = EventTerm( + func=task_mdp.randomize_visual_appearance_multiple_meshes, + mode="interval", + interval_range_s=(4.0, 4.0), + params={ + "asset_cfg": SceneEntityCfg("robot"), + "event_name": "randomize_wrist_mount_event", + "mesh_names": ["robotiq_base_link/visuals/D415_to_Robotiq_Mount"], + "texture_prob": 0.5, + "texture_config_path": str(Path(__file__).parent / "resources" / "texture_paths_ood.yaml"), + "diffuse_tint_range": ((0.0, 0.0, 0.0), (1.0, 1.0, 1.0)), + "colors": {"r": (0.0, 1.0), "g": (0.0, 1.0), "b": (0.0, 1.0)}, + }, + ) + + randomize_inner_finger_appearance = EventTerm( + func=task_mdp.randomize_visual_appearance_multiple_meshes, + mode="interval", + interval_range_s=(4.0, 4.0), + params={ + "asset_cfg": SceneEntityCfg("robot"), + "event_name": "randomize_inner_finger_event", + "mesh_names": ["left_inner_finger/visuals/mesh_1", "right_inner_finger/visuals/mesh_1"], + "texture_prob": 0.5, + "texture_config_path": str(Path(__file__).parent / "resources" / "texture_paths_ood.yaml"), + "diffuse_tint_range": ((0.0, 0.0, 0.0), (1.0, 1.0, 1.0)), + "colors": {"r": (0.0, 1.0), "g": (0.0, 1.0), "b": (0.0, 1.0)}, + }, + ) + + randomize_insertive_object_appearance = EventTerm( + func=task_mdp.randomize_visual_appearance_multiple_meshes, + mode="interval", + interval_range_s=(4.0, 4.0), + params={ + "asset_cfg": SceneEntityCfg("insertive_object"), + "event_name": "randomize_insertive_object_event", + "mesh_names": [], + "texture_prob": 0.5, + "texture_config_path": str(Path(__file__).parent / "resources" / "texture_paths_ood.yaml"), + "diffuse_tint_range": ((0.0, 0.0, 0.0), (1.0, 1.0, 1.0)), + "colors": {"r": (0.0, 1.0), "g": (0.0, 1.0), "b": (0.0, 1.0)}, + }, + ) + + randomize_receptive_object_appearance = EventTerm( + func=task_mdp.randomize_visual_appearance_multiple_meshes, + mode="interval", + interval_range_s=(4.0, 4.0), + params={ + "asset_cfg": SceneEntityCfg("receptive_object"), + "event_name": "randomize_receptive_object_event", + "mesh_names": [], + "texture_prob": 0.5, + "texture_config_path": str(Path(__file__).parent / "resources" / "texture_paths_ood.yaml"), + "diffuse_tint_range": ((0.0, 0.0, 0.0), (1.0, 1.0, 1.0)), + "colors": {"r": (0.0, 1.0), "g": (0.0, 1.0), "b": (0.0, 1.0)}, + }, + ) + + randomize_table_appearance = EventTerm( + func=task_mdp.randomize_visual_appearance_multiple_meshes, + mode="interval", + interval_range_s=(4.0, 4.0), + params={ + "asset_cfg": SceneEntityCfg("table"), + "event_name": "randomize_table_event", + "mesh_names": ["visuals/vention_mat"], + "texture_prob": 0.5, + "texture_config_path": str(Path(__file__).parent / "resources" / "texture_paths_ood.yaml"), + "diffuse_tint_range": ((0.0, 0.0, 0.0), (1.0, 1.0, 1.0)), + "colors": {"r": (0.0, 1.0), "g": (0.0, 1.0), "b": (0.0, 1.0)}, + }, + ) + + randomize_curtain_left_appearance = EventTerm( + func=task_mdp.randomize_visual_appearance_multiple_meshes, + mode="interval", + interval_range_s=(4.0, 4.0), + params={ + "asset_cfg": SceneEntityCfg("curtain_left"), + "event_name": "randomize_curtain_left_event", + "mesh_names": [], + "texture_prob": 0.5, + "texture_config_path": str(Path(__file__).parent / "resources" / "texture_paths_ood.yaml"), + "diffuse_tint_range": ((0.0, 0.0, 0.0), (1.0, 1.0, 1.0)), + "colors": {"r": (0.0, 1.0), "g": (0.0, 1.0), "b": (0.0, 1.0)}, + }, + ) + + randomize_curtain_back_appearance = EventTerm( + func=task_mdp.randomize_visual_appearance_multiple_meshes, + mode="interval", + interval_range_s=(4.0, 4.0), + params={ + "asset_cfg": SceneEntityCfg("curtain_back"), + "event_name": "randomize_curtain_back_event", + "mesh_names": [], + "texture_prob": 0.5, + "texture_config_path": str(Path(__file__).parent / "resources" / "texture_paths_ood.yaml"), + "diffuse_tint_range": ((0.0, 0.0, 0.0), (1.0, 1.0, 1.0)), + "colors": {"r": (0.0, 1.0), "g": (0.0, 1.0), "b": (0.0, 1.0)}, + }, + ) + + randomize_curtain_right_appearance = EventTerm( + func=task_mdp.randomize_visual_appearance_multiple_meshes, + mode="interval", + interval_range_s=(4.0, 4.0), + params={ + "asset_cfg": SceneEntityCfg("curtain_right"), + "event_name": "randomize_curtain_right_event", + "mesh_names": [], + "texture_prob": 0.5, + "texture_config_path": str(Path(__file__).parent / "resources" / "texture_paths_ood.yaml"), + "diffuse_tint_range": ((0.0, 0.0, 0.0), (1.0, 1.0, 1.0)), + "colors": {"r": (0.0, 1.0), "g": (0.0, 1.0), "b": (0.0, 1.0)}, + }, + ) + + # Override HDRI randomization to use OOD HDRIs + randomize_sky_light = EventTerm( + func=task_mdp.randomize_hdri, + mode="interval", + interval_range_s=(4.0, 4.0), + params={ + "light_path": "/World/skyLight", + "hdri_config_path": str(Path(__file__).parent / "resources" / "hdri_paths_ood.yaml"), + "intensity_range": (1000.0, 4000.0), + "rotation_range": (0.0, 360.0), + }, + ) + + reset_from_reset_states = EventTerm( + func=task_mdp.MultiResetManager, + mode="reset", + params={ + "dataset_dir": f"{UWLAB_CLOUD_ASSETS_DIR}/Datasets/OmniReset", + "reset_types": ["ObjectAnywhereEEAnywhere"], + "probs": [1.0], + "success": "env.reward_manager.get_term_cfg('progress_context').func.success", + }, + ) + + +@configclass +class DataCollectionOODRGBEventCfg(OODRGBEventCfg): + """Data collection OOD events: override reset to sample from all 4 distributions.""" + + reset_from_reset_states = EventTerm( + func=task_mdp.MultiResetManager, + mode="reset", + params={ + "dataset_dir": f"{UWLAB_CLOUD_ASSETS_DIR}/Datasets/OmniReset", + "reset_types": [ + "ObjectAnywhereEEAnywhere", + "ObjectRestingEEGrasped", + "ObjectAnywhereEEGrasped", + "ObjectPartiallyAssembledEEGrasped", + ], + "probs": [0.25, 0.25, 0.25, 0.25], + "success": "env.reward_manager.get_term_cfg('progress_context').func.success", + }, + ) + + +@configclass +class Ur5eRobotiq2f85DataCollectionRGBRelCartesianOSCOODCfg(Ur5eRobotiq2f85DataCollectionRGBRelCartesianOSCCfg): + """Data collection config with OOD (out-of-distribution) textures and HDRIs.""" + + events: DataCollectionOODRGBEventCfg = DataCollectionOODRGBEventCfg() + + +@configclass +class Ur5eRobotiq2f85EvalRGBRelCartesianOSCOODCfg(Ur5eRobotiq2f85EvalRGBRelCartesianOSCCfg): + """Evaluation config with OOD (out-of-distribution) textures and HDRIs.""" + + events: OODRGBEventCfg = OODRGBEventCfg() diff --git a/source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/reset_states/config/ur5e_robotiq_2f85/grasp_sampling_cfg.py b/source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/omnireset/config/ur5e_robotiq_2f85/grasp_sampling_cfg.py similarity index 100% rename from source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/reset_states/config/ur5e_robotiq_2f85/grasp_sampling_cfg.py rename to source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/omnireset/config/ur5e_robotiq_2f85/grasp_sampling_cfg.py diff --git a/source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/reset_states/config/ur5e_robotiq_2f85/partial_assemblies_cfg.py b/source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/omnireset/config/ur5e_robotiq_2f85/partial_assemblies_cfg.py similarity index 99% rename from source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/reset_states/config/ur5e_robotiq_2f85/partial_assemblies_cfg.py rename to source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/omnireset/config/ur5e_robotiq_2f85/partial_assemblies_cfg.py index b0486091..02c0f054 100644 --- a/source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/reset_states/config/ur5e_robotiq_2f85/partial_assemblies_cfg.py +++ b/source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/omnireset/config/ur5e_robotiq_2f85/partial_assemblies_cfg.py @@ -173,7 +173,7 @@ class PartialAssembliesRewardsCfg: "collision_analyzer_cfg": task_mdp.CollisionAnalyzerCfg( num_points=1024, max_dist=0.5, - min_dist=-0.0005, + min_dist=-0.001, asset_cfg=SceneEntityCfg("insertive_object"), obstacle_cfgs=[SceneEntityCfg("receptive_object")], ) diff --git a/source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/reset_states/config/ur5e_robotiq_2f85/reset_states_cfg.py b/source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/omnireset/config/ur5e_robotiq_2f85/reset_states_cfg.py similarity index 93% rename from source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/reset_states/config/ur5e_robotiq_2f85/reset_states_cfg.py rename to source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/omnireset/config/ur5e_robotiq_2f85/reset_states_cfg.py index 79e9a42d..e75d6b62 100644 --- a/source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/reset_states/config/ur5e_robotiq_2f85/reset_states_cfg.py +++ b/source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/omnireset/config/ur5e_robotiq_2f85/reset_states_cfg.py @@ -19,9 +19,9 @@ from isaaclab.utils.assets import ISAAC_NUCLEUS_DIR from uwlab_assets import UWLAB_CLOUD_ASSETS_DIR -from uwlab_assets.robots.ur5e_robotiq_gripper import EXPLICIT_UR5E_ROBOTIQ_2F85 +from uwlab_assets.robots.ur5e_robotiq_gripper import IMPLICIT_UR5E_ROBOTIQ_2F85 -from uwlab_tasks.manager_based.manipulation.reset_states.config.ur5e_robotiq_2f85.actions import ( +from uwlab_tasks.manager_based.manipulation.omnireset.config.ur5e_robotiq_2f85.actions import ( Ur5eRobotiq2f85RelativeOSCAction, ) @@ -32,7 +32,7 @@ class ResetStatesSceneCfg(InteractiveSceneCfg): """Scene configuration for reset states environment.""" - robot = EXPLICIT_UR5E_ROBOTIQ_2F85.replace(prim_path="{ENV_REGEX_NS}/Robot") + robot = IMPLICIT_UR5E_ROBOTIQ_2F85.replace(prim_path="{ENV_REGEX_NS}/Robot") insertive_object: RigidObjectCfg = RigidObjectCfg( prim_path="{ENV_REGEX_NS}/InsertiveObject", @@ -175,7 +175,7 @@ class ResetStatesBaseEventCfg: "pose_range": { "x": (0.3, 0.55), "y": (-0.1, 0.3), - "z": (0.0, 0.001), + "z": (0.0, 0.0), "roll": (0.0, 0.0), "pitch": (0.0, 0.0), "yaw": (-np.pi / 12, np.pi / 12), @@ -236,7 +236,8 @@ class ObjectRestingEEGraspedEventCfg(ResetStatesBaseEventCfg): func=task_mdp.MultiResetManager, mode="reset", params={ - "base_paths": [f"{UWLAB_CLOUD_ASSETS_DIR}/Datasets/Resets/ObjectPairs/ObjectAnywhereEEAnywhere"], + "dataset_dir": f"{UWLAB_CLOUD_ASSETS_DIR}/Datasets/OmniReset", + "reset_types": ["ObjectAnywhereEEAnywhere"], "probs": [1.0], }, ) @@ -245,7 +246,7 @@ class ObjectRestingEEGraspedEventCfg(ResetStatesBaseEventCfg): func=task_mdp.reset_end_effector_from_grasp_dataset, mode="reset", params={ - "base_path": f"{UWLAB_CLOUD_ASSETS_DIR}/Datasets/GraspSampling/ObjectPairs", + "dataset_dir": f"{UWLAB_CLOUD_ASSETS_DIR}/Datasets/OmniReset", "fixed_asset_cfg": SceneEntityCfg("insertive_object"), "robot_ik_cfg": SceneEntityCfg( "robot", joint_names=["shoulder.*", "elbow.*", "wrist.*"], body_names="robotiq_base_link" @@ -288,7 +289,7 @@ class ObjectAnywhereEEGraspedEventCfg(ResetStatesBaseEventCfg): func=task_mdp.reset_end_effector_from_grasp_dataset, mode="reset", params={ - "base_path": f"{UWLAB_CLOUD_ASSETS_DIR}/Datasets/GraspSampling/ObjectPairs", + "dataset_dir": f"{UWLAB_CLOUD_ASSETS_DIR}/Datasets/OmniReset", "fixed_asset_cfg": SceneEntityCfg("insertive_object"), "robot_ik_cfg": SceneEntityCfg( "robot", joint_names=["shoulder.*", "elbow.*", "wrist.*"], body_names="robotiq_base_link" @@ -312,7 +313,7 @@ class ObjectPartiallyAssembledEEAnywhereEventCfg(ResetStatesBaseEventCfg): func=task_mdp.reset_insertive_object_from_partial_assembly_dataset, mode="reset", params={ - "base_path": f"{UWLAB_CLOUD_ASSETS_DIR}/Datasets/PartialAssemblies/ObjectPairs", + "dataset_dir": f"{UWLAB_CLOUD_ASSETS_DIR}/Datasets/OmniReset", "insertive_object_cfg": SceneEntityCfg("insertive_object"), "receptive_object_cfg": SceneEntityCfg("receptive_object"), "pose_range_b": { @@ -353,7 +354,7 @@ class ObjectPartiallyAssembledEEGraspedEventCfg(ResetStatesBaseEventCfg): func=task_mdp.reset_insertive_object_from_partial_assembly_dataset, mode="reset", params={ - "base_path": f"{UWLAB_CLOUD_ASSETS_DIR}/Datasets/PartialAssemblies/ObjectPairs", + "dataset_dir": f"{UWLAB_CLOUD_ASSETS_DIR}/Datasets/OmniReset", "insertive_object_cfg": SceneEntityCfg("insertive_object"), "receptive_object_cfg": SceneEntityCfg("receptive_object"), "pose_range_b": { @@ -371,7 +372,7 @@ class ObjectPartiallyAssembledEEGraspedEventCfg(ResetStatesBaseEventCfg): func=task_mdp.reset_end_effector_from_grasp_dataset, mode="reset", params={ - "base_path": f"{UWLAB_CLOUD_ASSETS_DIR}/Datasets/GraspSampling/ObjectPairs", + "dataset_dir": f"{UWLAB_CLOUD_ASSETS_DIR}/Datasets/OmniReset", "fixed_asset_cfg": SceneEntityCfg("insertive_object"), "robot_ik_cfg": SceneEntityCfg( "robot", joint_names=["shoulder.*", "elbow.*", "wrist.*"], body_names="robotiq_base_link" @@ -421,12 +422,12 @@ class ResetStatesTerminationCfg: task_mdp.CollisionAnalyzerCfg( num_points=1024, max_dist=0.5, - min_dist=-0.0005, + min_dist=-0.001, asset_cfg=SceneEntityCfg("insertive_object"), obstacle_cfgs=[SceneEntityCfg("receptive_object")], ), ], - "max_robot_pos_deviation": 0.05, + "max_robot_pos_deviation": 0.1, "max_object_pos_deviation": MISSING, "pos_z_threshold": -0.02, "consecutive_stability_steps": 5, @@ -585,6 +586,10 @@ class ObjectPartiallyAssembledEEAnywhereResetStatesCfg(UR5eRobotiq2f85ResetState def __post_init__(self): super().__post_init__() self.terminations.success.params["max_object_pos_deviation"] = 0.025 + self.terminations.success.params["insertive_asset_cfg"] = SceneEntityCfg("insertive_object") + self.terminations.success.params["receptive_asset_cfg"] = SceneEntityCfg("receptive_object") + self.terminations.success.params["assembly_success_prob"] = 0.5 + self.terminations.success.params["assembly_threshold_scale"] = 1.5 @configclass @@ -594,3 +599,7 @@ class ObjectPartiallyAssembledEEGraspedResetStatesCfg(UR5eRobotiq2f85ResetStates def __post_init__(self): super().__post_init__() self.terminations.success.params["max_object_pos_deviation"] = 0.025 + self.terminations.success.params["insertive_asset_cfg"] = SceneEntityCfg("insertive_object") + self.terminations.success.params["receptive_asset_cfg"] = SceneEntityCfg("receptive_object") + self.terminations.success.params["assembly_success_prob"] = 0.5 + self.terminations.success.params["assembly_threshold_scale"] = 1.5 diff --git a/source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/omnireset/config/ur5e_robotiq_2f85/resources/hdri_paths.yaml b/source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/omnireset/config/ur5e_robotiq_2f85/resources/hdri_paths.yaml new file mode 100644 index 00000000..9f360871 --- /dev/null +++ b/source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/omnireset/config/ur5e_robotiq_2f85/resources/hdri_paths.yaml @@ -0,0 +1,926 @@ +# Copyright (c) 2024-2026, The UW Lab Project Developers. (https://github.com/uw-lab/UWLab/blob/main/CONTRIBUTORS.md). +# All Rights Reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +cloud: +- Assets/NVIDIA/HDRIs/Clear/evening_road_01_4k.hdr +- Assets/NVIDIA/HDRIs/Clear/kloppenheim_02_4k.hdr +- Assets/NVIDIA/HDRIs/Clear/mealie_road_4k.hdr +- Assets/NVIDIA/HDRIs/Clear/noon_grass_4k.hdr +- Assets/NVIDIA/HDRIs/Clear/qwantani_4k.hdr +- Assets/NVIDIA/HDRIs/Clear/signal_hill_sunrise_4k.hdr +- Assets/NVIDIA/HDRIs/Clear/sunflowers_4k.hdr +- Assets/NVIDIA/HDRIs/Clear/syferfontein_18d_clear_4k.hdr +- Assets/NVIDIA/HDRIs/Clear/venice_sunset_4k.hdr +- Assets/NVIDIA/HDRIs/Clear/white_cliff_top_4k.hdr +- Assets/NVIDIA/HDRIs/Cloudy/abandoned_parking_4k.hdr +- Assets/NVIDIA/HDRIs/Cloudy/champagne_castle_1_4k.hdr +- Assets/NVIDIA/HDRIs/Cloudy/evening_road_01_4k.hdr +- Assets/NVIDIA/HDRIs/Cloudy/kloofendal_48d_partly_cloudy_4k.hdr +- Assets/NVIDIA/HDRIs/Cloudy/lakeside_4k.hdr +- Assets/NVIDIA/HDRIs/Cloudy/sunflowers_4k.hdr +- Assets/NVIDIA/HDRIs/Cloudy/table_mountain_1_4k.hdr +- Assets/NVIDIA/HDRIs/Evening/evening_road_01_4k.hdr +- Assets/NVIDIA/HDRIs/Indoor/ZetoCG_com_WarehouseInterior2b.hdr +- Assets/NVIDIA/HDRIs/Indoor/adams_place_bridge_4k.hdr +- Assets/NVIDIA/HDRIs/Indoor/autoshop_01_4k.hdr +- Assets/NVIDIA/HDRIs/Indoor/bathroom_4k.hdr +- Assets/NVIDIA/HDRIs/Indoor/carpentry_shop_01_4k.hdr +- Assets/NVIDIA/HDRIs/Indoor/en_suite_4k.hdr +- Assets/NVIDIA/HDRIs/Indoor/entrance_hall_4k.hdr +- Assets/NVIDIA/HDRIs/Indoor/hospital_room_4k.hdr +- Assets/NVIDIA/HDRIs/Indoor/hotel_room_4k.hdr +- Assets/NVIDIA/HDRIs/Indoor/lebombo_4k.hdr +- Assets/NVIDIA/HDRIs/Indoor/old_bus_depot_4k.hdr +- Assets/NVIDIA/HDRIs/Indoor/small_empty_house_4k.hdr +- Assets/NVIDIA/HDRIs/Indoor/studio_small_04_4k.hdr +- Assets/NVIDIA/HDRIs/Indoor/surgery_4k.hdr +- Assets/NVIDIA/HDRIs/Indoor/vulture_hide_4k.hdr +- Assets/NVIDIA/HDRIs/Indoor/wooden_lounge_4k.hdr +- Assets/NVIDIA/HDRIs/Night/kloppenheim_02_4k.hdr +- Assets/NVIDIA/HDRIs/Night/moonlit_golf_4k.hdr +- Assets/NVIDIA/HDRIs/Storm/approaching_storm_4k.hdr +- Assets/NVIDIA/HDRIs/Studio/photo_studio_01_4k.hdr +- Assets/NVIDIA/HDRIs/Studio/studio_small_05_4k.hdr +- Assets/NVIDIA/HDRIs/Studio/studio_small_07_4k.hdr +- Assets/PolyHaven/HDRIs/clear/abandoned_hopper_terminal_02_1k.hdr +- Assets/PolyHaven/HDRIs/clear/abandoned_hopper_terminal_04_1k.hdr +- Assets/PolyHaven/HDRIs/clear/abandoned_pathway_1k.hdr +- Assets/PolyHaven/HDRIs/clear/autumn_field_1k.hdr +- Assets/PolyHaven/HDRIs/clear/autumn_field_puresky_1k.hdr +- Assets/PolyHaven/HDRIs/clear/autumn_forest_01_1k.hdr +- Assets/PolyHaven/HDRIs/clear/autumn_meadow_1k.hdr +- Assets/PolyHaven/HDRIs/clear/autumn_park_1k.hdr +- Assets/PolyHaven/HDRIs/clear/aviation_museum_hill_1k.hdr +- Assets/PolyHaven/HDRIs/clear/bamboo_tunnel_1k.hdr +- Assets/PolyHaven/HDRIs/clear/bergen_1k.hdr +- Assets/PolyHaven/HDRIs/clear/blaubeuren_hillside_1k.hdr +- Assets/PolyHaven/HDRIs/clear/bloem_olive_house_1k.hdr +- Assets/PolyHaven/HDRIs/clear/bloem_train_track_clear_1k.hdr +- Assets/PolyHaven/HDRIs/clear/blue_grotto_1k.hdr +- Assets/PolyHaven/HDRIs/clear/brick_factory_01_1k.hdr +- Assets/PolyHaven/HDRIs/clear/brick_factory_02_1k.hdr +- Assets/PolyHaven/HDRIs/clear/chapmans_drive_1k.hdr +- Assets/PolyHaven/HDRIs/clear/chinese_garden_1k.hdr +- Assets/PolyHaven/HDRIs/clear/circus_maximus_2_1k.hdr +- Assets/PolyHaven/HDRIs/clear/cliffside_1k.hdr +- Assets/PolyHaven/HDRIs/clear/colosseum_1k.hdr +- Assets/PolyHaven/HDRIs/clear/countrytrax_midday_1k.hdr +- Assets/PolyHaven/HDRIs/clear/courtyard_1k.hdr +- Assets/PolyHaven/HDRIs/clear/crosswalk_1k.hdr +- Assets/PolyHaven/HDRIs/clear/crystal_falls_1k.hdr +- Assets/PolyHaven/HDRIs/clear/derelict_highway_midday_1k.hdr +- Assets/PolyHaven/HDRIs/clear/derelict_highway_noon_1k.hdr +- Assets/PolyHaven/HDRIs/clear/derelict_underpass_1k.hdr +- Assets/PolyHaven/HDRIs/clear/docklands_02_1k.hdr +- Assets/PolyHaven/HDRIs/clear/drakensberg_solitary_mountain_1k.hdr +- Assets/PolyHaven/HDRIs/clear/drakensberg_solitary_mountain_puresky_1k.hdr +- Assets/PolyHaven/HDRIs/clear/dreifaltigkeitsberg_1k.hdr +- Assets/PolyHaven/HDRIs/clear/felsenlabyrinth_1k.hdr +- Assets/PolyHaven/HDRIs/clear/flower_road_1k.hdr +- Assets/PolyHaven/HDRIs/clear/forest_cave_1k.hdr +- Assets/PolyHaven/HDRIs/clear/forest_slope_1k.hdr +- Assets/PolyHaven/HDRIs/clear/frozen_lake_1k.hdr +- Assets/PolyHaven/HDRIs/clear/geislingen_an_der_steige_1k.hdr +- Assets/PolyHaven/HDRIs/clear/goegap_1k.hdr +- Assets/PolyHaven/HDRIs/clear/gothic_manor_02_1k.hdr +- Assets/PolyHaven/HDRIs/clear/green_sanctuary_1k.hdr +- Assets/PolyHaven/HDRIs/clear/greenwich_park_02_1k.hdr +- Assets/PolyHaven/HDRIs/clear/gum_trees_1k.hdr +- Assets/PolyHaven/HDRIs/clear/harties_cliff_view_1k.hdr +- Assets/PolyHaven/HDRIs/clear/harvest_1k.hdr +- Assets/PolyHaven/HDRIs/clear/hausdorf_clear_sky_1k.hdr +- Assets/PolyHaven/HDRIs/clear/hilltop_construction_1k.hdr +- Assets/PolyHaven/HDRIs/clear/hochsal_field_1k.hdr +- Assets/PolyHaven/HDRIs/clear/horn-koppe_snow_1k.hdr +- Assets/PolyHaven/HDRIs/clear/illovo_beach_balcony_1k.hdr +- Assets/PolyHaven/HDRIs/clear/je_gray_02_1k.hdr +- Assets/PolyHaven/HDRIs/clear/kiara_4_mid-morning_1k.hdr +- Assets/PolyHaven/HDRIs/clear/kiara_5_noon_1k.hdr +- Assets/PolyHaven/HDRIs/clear/kiara_6_afternoon_1k.hdr +- Assets/PolyHaven/HDRIs/clear/kloofendal_43d_clear_puresky_4k.hdr +- Assets/PolyHaven/HDRIs/clear/lakeside_1k.hdr +- Assets/PolyHaven/HDRIs/clear/lauter_waterfall_1k.hdr +- Assets/PolyHaven/HDRIs/clear/monbachtal_riverbank_1k.hdr +- Assets/PolyHaven/HDRIs/clear/monks_forest_1k.hdr +- Assets/PolyHaven/HDRIs/clear/mossy_forest_1k.hdr +- Assets/PolyHaven/HDRIs/clear/moulton_falls_train_tunnel_east_1k.hdr +- Assets/PolyHaven/HDRIs/clear/moulton_station_train_tunnel_west_1k.hdr +- Assets/PolyHaven/HDRIs/clear/nagoya_wall_path_1k.hdr +- Assets/PolyHaven/HDRIs/clear/nkuhlu_1k.hdr +- Assets/PolyHaven/HDRIs/clear/noon_grass_1k.hdr +- Assets/PolyHaven/HDRIs/clear/old_tree_in_city_park_1k.hdr +- Assets/PolyHaven/HDRIs/clear/orlando_stadium_1k.hdr +- Assets/PolyHaven/HDRIs/clear/ox_bridge_morning_1k.hdr +- Assets/PolyHaven/HDRIs/clear/palermo_park_1k.hdr +- Assets/PolyHaven/HDRIs/clear/palermo_sidewalk_1k.hdr +- Assets/PolyHaven/HDRIs/clear/park_parking_1k.hdr +- Assets/PolyHaven/HDRIs/clear/partial_eclipse_1k.hdr +- Assets/PolyHaven/HDRIs/clear/passendorf_snow_1k.hdr +- Assets/PolyHaven/HDRIs/clear/phalzer_forest_01_1k.hdr +- Assets/PolyHaven/HDRIs/clear/piazza_martin_lutero_1k.hdr +- Assets/PolyHaven/HDRIs/clear/pizzo_pernice_1k.hdr +- Assets/PolyHaven/HDRIs/clear/pizzo_pernice_puresky_1k.hdr +- Assets/PolyHaven/HDRIs/clear/pond_1k.hdr +- Assets/PolyHaven/HDRIs/clear/pump_station_1k.hdr +- Assets/PolyHaven/HDRIs/clear/pylons_1k.hdr +- Assets/PolyHaven/HDRIs/clear/qwantani_1k.hdr +- Assets/PolyHaven/HDRIs/clear/qwantani_afternoon_1k.hdr +- Assets/PolyHaven/HDRIs/clear/qwantani_afternoon_puresky_1k.hdr +- Assets/PolyHaven/HDRIs/clear/qwantani_late_afternoon_1k.hdr +- Assets/PolyHaven/HDRIs/clear/qwantani_late_afternoon_puresky_1k.hdr +- Assets/PolyHaven/HDRIs/clear/qwantani_mid_morning_1k.hdr +- Assets/PolyHaven/HDRIs/clear/qwantani_mid_morning_puresky_1k.hdr +- Assets/PolyHaven/HDRIs/clear/qwantani_morning_1k.hdr +- Assets/PolyHaven/HDRIs/clear/qwantani_morning_puresky_1k.hdr +- Assets/PolyHaven/HDRIs/clear/qwantani_noon_1k.hdr +- Assets/PolyHaven/HDRIs/clear/qwantani_noon_puresky_1k.hdr +- Assets/PolyHaven/HDRIs/clear/qwantani_patio_1k.hdr +- Assets/PolyHaven/HDRIs/clear/qwantani_puresky_1k.hdr +- Assets/PolyHaven/HDRIs/clear/radkow_lake_1k.hdr +- Assets/PolyHaven/HDRIs/clear/rainforest_trail_1k.hdr +- Assets/PolyHaven/HDRIs/clear/red_wall_1k.hdr +- Assets/PolyHaven/HDRIs/clear/resting_place_2_1k.hdr +- Assets/PolyHaven/HDRIs/clear/river_alcove_1k.hdr +- Assets/PolyHaven/HDRIs/clear/roofless_ruins_1k.hdr +- Assets/PolyHaven/HDRIs/clear/rosendal_mountain_midmorning_1k.hdr +- Assets/PolyHaven/HDRIs/clear/rural_crossroads_1k.hdr +- Assets/PolyHaven/HDRIs/clear/san_giuseppe_bridge_1k.hdr +- Assets/PolyHaven/HDRIs/clear/shady_patch_1k.hdr +- Assets/PolyHaven/HDRIs/clear/simons_town_road_1k.hdr +- Assets/PolyHaven/HDRIs/clear/skukuza_golf_1k.hdr +- Assets/PolyHaven/HDRIs/clear/small_harbour_morning_1k.hdr +- Assets/PolyHaven/HDRIs/clear/snowy_cemetery_1k.hdr +- Assets/PolyHaven/HDRIs/clear/snowy_forest_path_01_1k.hdr +- Assets/PolyHaven/HDRIs/clear/snowy_forest_path_02_1k.hdr +- Assets/PolyHaven/HDRIs/clear/spiaggia_di_mondello_1k.hdr +- Assets/PolyHaven/HDRIs/clear/stone_alley_02_1k.hdr +- Assets/PolyHaven/HDRIs/clear/stone_alley_03_1k.hdr +- Assets/PolyHaven/HDRIs/clear/stream_1k.hdr +- Assets/PolyHaven/HDRIs/clear/studio_garden_1k.hdr +- Assets/PolyHaven/HDRIs/clear/summer_stage_02_1k.hdr +- Assets/PolyHaven/HDRIs/clear/syferfontein_18d_clear_1k.hdr +- Assets/PolyHaven/HDRIs/clear/syferfontein_18d_clear_puresky_1k.hdr +- Assets/PolyHaven/HDRIs/clear/teutonic_castle_moat_1k.hdr +- Assets/PolyHaven/HDRIs/clear/tiber_1_1k.hdr +- Assets/PolyHaven/HDRIs/clear/tief_etz_1k.hdr +- Assets/PolyHaven/HDRIs/clear/ulmer_muenster_1k.hdr +- Assets/PolyHaven/HDRIs/clear/urban_alley_01_1k.hdr +- Assets/PolyHaven/HDRIs/clear/veld_fire_1k.hdr +- Assets/PolyHaven/HDRIs/clear/venetian_crossroads_1k.hdr +- Assets/PolyHaven/HDRIs/clear/versveldpas_1k.hdr +- Assets/PolyHaven/HDRIs/clear/whipple_creek_gazebo_1k.hdr +- Assets/PolyHaven/HDRIs/clear/wide_street_01_1k.hdr +- Assets/PolyHaven/HDRIs/clear/wide_street_02_1k.hdr +- Assets/PolyHaven/HDRIs/clear/winter_sky_1k.hdr +- Assets/PolyHaven/HDRIs/clear/wobbly_bridge_1k.hdr +- Assets/PolyHaven/HDRIs/clear/wooden_motel_1k.hdr +- Assets/PolyHaven/HDRIs/clear/woods_1k.hdr +- Assets/PolyHaven/HDRIs/clear/xiequ_yuan_1k.hdr +- Assets/PolyHaven/HDRIs/clear/zhengyang_gate_1k.hdr +- Assets/PolyHaven/HDRIs/clear/zwartkops_curve_afternoon_1k.hdr +- Assets/PolyHaven/HDRIs/clear/zwartkops_curve_morning_1k.hdr +- Assets/PolyHaven/HDRIs/clear/zwartkops_start_afternoon_1k.hdr +- Assets/PolyHaven/HDRIs/clear/zwartkops_straight_afternoon_1k.hdr +- Assets/PolyHaven/HDRIs/clear/zwartkops_straight_morning_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/abandoned_church_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/abandoned_hopper_terminal_01_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/abandoned_hopper_terminal_03_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/abandoned_parking_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/abandoned_slipway_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/abandoned_tank_farm_01_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/abandoned_tank_farm_02_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/abandoned_tank_farm_03_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/abandoned_tank_farm_04_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/abandoned_waterworks_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/afrikaans_church_exterior_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/ahornsteig_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/air_museum_playground_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/aloe_farm_shade_house_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/alps_field_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/amphitheatre_zanzibar_fort_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/approaching_storm_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/aristea_wreck_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/aristea_wreck_puresky_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/autumn_crossing_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/autumn_forest_02_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/autumn_forest_03_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/autumn_ground_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/autumn_hill_view_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/autumn_hockey_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/autumn_road_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/balcony_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/ballawley_park_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/barnaslingan_01_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/barnaslingan_02_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/beach_cloudy_bridge_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/belfast_farmhouse_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/belfast_open_field_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/belvedere_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/bethnal_green_entrance_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/binnenalster_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/blau_river_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/blaubeuren_outskirts_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/bloem_hill_01_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/bloem_hill_02_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/bloem_hill_03_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/bloem_train_track_cloudy_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/blue_lagoon_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/borghese_gardens_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/buikslotermeerplein_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/cambridge_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/canary_wharf_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/cannon_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/castel_st_angelo_roof_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/cayley_lookout_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/cedar_bridge_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/cedar_bridge_2_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/charolettenbrunn_park_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/circus_maximus_1_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/clarens_midday_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/cloud_layers_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/cloudy_cliffside_road_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/cloudy_netted_nursery_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/cloudy_vondelpark_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/construction_yard_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/dalkey_view_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/dam_bridge_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/dam_wall_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/delta_2_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/derelict_airfield_01_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/derelict_airfield_02_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/derelict_overpass_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/dirt_bike_track_01_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/docklands_01_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/dresden_moat_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/dresden_square_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/driving_school_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/dry_cracked_lake_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/dry_field_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/dry_hay_field_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/dry_meadow_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/dry_orchard_meadow_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/ehingen_hillside_02_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/eilenriede_labyrinth_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/emmarentia_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/epping_forest_01_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/epping_forest_02_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/evening_meadow_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/evening_museum_courtyard_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/factory_yard_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/farm_field_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/farm_field_puresky_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/farmland_overcast_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/flower_hillside_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/fort_schanskop_morning_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/fouriesburg_mountain_cloudy_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/fouriesburg_mountain_midday_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/furry_clouds_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/furstenstein_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/future_parking_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/gamrig_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/garden_nook_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/german_town_street_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/glencairn_expressway_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/golden_gate_hills_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/graveyard_pathways_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/gray_pier_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/green_point_park_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/greenwich_park_03_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/greenwich_park_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/hanger_exterior_cloudy_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/harties_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/hay_bales_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/herkulessaulen_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/hochsal_forest_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/horn-koppe_spring_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/hotel_rooftop_balcony_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/immenstadter_horn_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/irish_institute_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/je_gray_park_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/kiara_3_morning_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/killesberg_park_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/kloetzle_blei_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/kloofendal_28d_misty_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/kloofendal_28d_misty_puresky_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/kloofendal_38d_partly_cloudy_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/kloofendal_38d_partly_cloudy_puresky_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/kloofendal_43d_clear_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/kloofendal_43d_clear_puresky_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/kloofendal_48d_partly_cloudy_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/kloofendal_48d_partly_cloudy_puresky_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/kloofendal_misty_morning_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/kloofendal_misty_morning_puresky_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/kloofendal_overcast_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/kloofendal_overcast_puresky_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/kloppenheim_03_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/kloppenheim_03_puresky_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/kloppenheim_05_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/kloppenheim_05_puresky_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/konzerthaus_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/lago_disola_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/lenong_1_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/lenong_3_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/limehouse_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/limpopo_golf_course_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/lush_dirt_path_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/lythwood_field_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/lythwood_terrace_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/mall_parking_lot_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/meadow_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/meadow_2_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/medieval_cafe_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/missile_launch_facility_02_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/missile_launch_facility_03_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/misty_farm_road_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/misty_pines_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/modern_buildings_2_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/modern_evening_street_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/monte_scherbelino_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/montorfano_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/mud_road_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/mud_road_puresky_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/muddy_autumn_forest_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/museum_of_history_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/museumplein_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/near_the_river_01_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/niederwihl_forest_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/oberer_kuhberg_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/old_outdoor_theater_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/old_quarry_gerlingen_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/ostrich_road_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/ouchy_pier_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/outdoor_workshop_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/overcast_industrial_courtyard_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/overcast_soil_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/overcast_soil_2_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/overcast_soil_puresky_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/parched_canal_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/park_music_stage_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/petit_port_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/piazza_bologni_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/pine_picnic_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/plac_wolnosci_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/pool_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/poolbeg_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/portland_landing_pad_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/potsdamer_platz_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/pretoria_gardens_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/pretville_street_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/promenade_de_vidy_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/quadrangle_cloudy_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/quadrangle_sunny_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/quarry_02_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/quarry_03_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/quarry_cloudy_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/red_hill_cloudy_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/red_hill_curve_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/red_hill_straight_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/reichstag_1_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/residential_garden_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/resting_place_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/rhodes_memorial_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/river_rocks_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/rogland_overcast_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/roof_garden_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/rooftop_day_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/rosendal_plains_1_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/rosendal_plains_2_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/round_platform_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/ruckenkreuz_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/rural_asphalt_road_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/rural_graffiti_tower_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/rustig_koppie_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/rustig_koppie_puresky_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/schachen_forest_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/schadowplatz_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/secluded_beach_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/shudu_lake_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/simons_town_harbour_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/simons_town_rocks_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/skate_park_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/skidpan_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/small_rural_road_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/smelting_tower_01_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/smelting_tower_02_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/snow_field_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/snow_field_2_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/snow_field_2_puresky_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/snow_field_puresky_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/snowy_field_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/snowy_forest_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/snowy_hillside_02_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/snowy_hillside_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/snowy_park_01_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/soliltude_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/stadium_exterior_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/stone_alley_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/stone_pines_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/stuttgart_hillside_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/suburban_field_01_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/suburban_field_02_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/suburban_football_field_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/sunflowers_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/sunflowers_puresky_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/sunny_country_road_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/sunny_rose_garden_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/symmetrical_garden_02_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/symmetrical_garden_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/teufelsberg_ground_1_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/teufelsberg_ground_2_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/teufelsberg_roof_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/tiber_2_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/tiber_island_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/tiergarten_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/tucker_wreck_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/turning_area_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/urban_courtyard_02_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/urban_courtyard_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/urban_street_01_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/urban_street_02_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/urban_street_03_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/urban_street_04_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/victoria_curve_01_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/victoria_curve_02_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/vignaioli_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/waterbuck_trail_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/whipple_creek_regional_park_01_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/whipple_creek_regional_park_04_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/white_cliff_top_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/winter_river_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/xanderklinge_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/yellow_field_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/zavelstein_1k.hdr +- Assets/PolyHaven/HDRIs/cloudy/zawiszy_czarnego_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/abandoned_bakery_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/abandoned_construction_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/abandoned_factory_canteen_01_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/abandoned_factory_canteen_02_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/abandoned_games_room_01_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/abandoned_games_room_02_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/abandoned_garage_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/abandoned_greenhouse_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/abandoned_hall_01_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/abandoned_tiled_room_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/abandoned_workshop_02_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/abandoned_workshop_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/acoustical_shell_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/adams_place_bridge_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/aerodynamics_workshop_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/afrikaans_church_interior_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/aft_lounge_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/aircraft_workshop_01_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/anniversary_lounge_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/art_studio_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/artist_workshop_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/auto_service_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/autoshop_01_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/ballroom_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/bank_vault_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/basement_boxing_ring_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/bathroom_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/billiard_hall_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/birbeck_street_underpass_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/blender_institute_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/blinds_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/blocky_photo_studio_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/blue_photo_studio_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/boiler_room_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/boma_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/brown_photostudio_01_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/brown_photostudio_02_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/brown_photostudio_03_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/brown_photostudio_04_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/brown_photostudio_05_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/brown_photostudio_06_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/brown_photostudio_07_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/burnt_warehouse_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/bush_restaurant_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/cabin_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/carpentry_shop_01_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/carpentry_shop_02_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/castle_zavelstein_cellar_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/cave_wall_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/cayley_interior_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/chapel_day_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/childrens_hospital_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/christmas_photo_studio_01_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/christmas_photo_studio_02_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/christmas_photo_studio_03_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/christmas_photo_studio_04_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/christmas_photo_studio_05_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/christmas_photo_studio_06_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/christmas_photo_studio_07_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/cinema_hall_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/cinema_lobby_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/circus_arena_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/climbing_gym_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/colorful_studio_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/combination_room_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/comfy_cafe_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/concrete_tunnel_02_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/concrete_tunnel_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/country_club_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/creepy_bathroom_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/cyclorama_hard_light_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/dancing_hall_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/de_balie_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/decor_shop_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/distribution_board_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/drachenfels_cellar_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/dresden_station_night_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/empty_play_room_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/empty_warehouse_01_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/empty_workshop_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/en_suite_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/entrance_hall_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/events_hall_interior_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/fireplace_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/floral_tent_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/garage_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/gear_store_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/georgentor_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/glass_passage_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/graffiti_shelter_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/gym_01_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/gym_entrance_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/hall_of_finfish_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/hall_of_mammals_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/hamburg_hbf_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/hangar_interior_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/hayloft_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/hikers_cave_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/hospital_room_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/hospital_room_2_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/hotel_room_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/indoor_pool_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/industrial_pipe_and_valve_01_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/industrial_pipe_and_valve_02_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/industrial_wooden_attic_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/industrial_workshop_foundry_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/interior_construction_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/kart_club_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/kiara_interior_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/lapa_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/large_corridor_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/leadenhall_market_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/lebombo_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/lookout_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/lythwood_lounge_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/lythwood_room_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/machine_shop_01_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/machine_shop_02_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/machine_shop_03_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/marry_hall_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/metro_noord_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/metro_vijzelgracht_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/mirrored_hall_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/missile_launch_facility_01_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/modern_bathroom_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/monkstown_castle_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/moon_lab_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/mosaic_tunnel_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/museum_of_ethnography_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/music_hall_01_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/music_hall_02_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/mutianyu_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/neon_photostudio_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/old_apartments_walkway_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/old_bus_depot_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/old_depot_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/old_hall_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/old_room_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/outdoor_chapel_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/parking_garage_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/paul_lobe_haus_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/peppermint_powerplant_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/peppermint_powerplant_2_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/phone_shop_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/photo_studio_01_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/photo_studio_broadway_hall_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/photo_studio_loft_hall_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/photo_studio_london_hall_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/pillars_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/pine_attic_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/poly_haven_studio_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/pretville_cinema_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/provence_studio_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/pump_house_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/reading_room_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/reinforced_concrete_01_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/rostock_arches_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/rostock_laage_airport_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/royal_esplanade_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/school_hall_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/school_quad_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/sculpture_exhibition_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/sepulchral_chapel_basement_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/sepulchral_chapel_rotunda_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/short_tunnel_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/skylit_garage_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/small_cathedral_02_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/small_cathedral_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/small_cave_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/small_empty_house_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/small_empty_room_1_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/small_empty_room_2_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/small_empty_room_3_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/small_empty_room_4_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/small_hangar_01_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/small_hangar_02_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/small_workshop_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/smelting_tower_interior_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/solitude_interior_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/squash_court_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/st_fagans_interior_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/storeroom_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/studio_country_hall_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/studio_small_01_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/studio_small_02_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/studio_small_03_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/studio_small_04_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/studio_small_05_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/studio_small_06_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/studio_small_07_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/studio_small_08_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/studio_small_09_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/subway_entrance_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/surgery_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/teufelsberg_inner_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/teufelsberg_lookout_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/thatch_chapel_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/theater_01_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/theater_02_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/trekker_monument_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/tv_studio_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/unfinished_office_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/unfinished_office_night_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/university_workshop_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/veranda_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/vestibule_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/vintage_measuring_lab_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/voortrekker_interior_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/vulture_hide_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/warm_bar_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/warm_reception_dinner_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/warm_restaurant_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/warm_restaurant_night_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/whale_skeleton_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/wooden_lounge_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/workshop_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/wrestling_gym_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/yaris_interior_garage_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/yoga_room_1k.hdr +- Assets/PolyHaven/HDRIs/indoor/zwartkops_pit_1k.hdr +- Assets/PolyHaven/HDRIs/night/blaubeuren_church_square_1k.hdr +- Assets/PolyHaven/HDRIs/night/blaubeuren_night_1k.hdr +- Assets/PolyHaven/HDRIs/night/blue_lagoon_night_1k.hdr +- Assets/PolyHaven/HDRIs/night/clarens_night_01_1k.hdr +- Assets/PolyHaven/HDRIs/night/clarens_night_02_1k.hdr +- Assets/PolyHaven/HDRIs/night/cobblestone_street_night_1k.hdr +- Assets/PolyHaven/HDRIs/night/courtyard_night_1k.hdr +- Assets/PolyHaven/HDRIs/night/dikhololo_night_1k.hdr +- Assets/PolyHaven/HDRIs/night/golden_bay_1k.hdr +- Assets/PolyHaven/HDRIs/night/hansaplatz_1k.hdr +- Assets/PolyHaven/HDRIs/night/kloppenheim_02_1k.hdr +- Assets/PolyHaven/HDRIs/night/kloppenheim_02_puresky_1k.hdr +- Assets/PolyHaven/HDRIs/night/kloppenheim_04_1k.hdr +- Assets/PolyHaven/HDRIs/night/kloppenheim_07_1k.hdr +- Assets/PolyHaven/HDRIs/night/kloppenheim_07_puresky_1k.hdr +- Assets/PolyHaven/HDRIs/night/lakeside_night_1k.hdr +- Assets/PolyHaven/HDRIs/night/laufenurg_church_1k.hdr +- Assets/PolyHaven/HDRIs/night/modern_buildings_night_1k.hdr +- Assets/PolyHaven/HDRIs/night/moonless_golf_1k.hdr +- Assets/PolyHaven/HDRIs/night/moonlit_golf_1k.hdr +- Assets/PolyHaven/HDRIs/night/narrow_moonlit_road_1k.hdr +- Assets/PolyHaven/HDRIs/night/neuer_zollhof_1k.hdr +- Assets/PolyHaven/HDRIs/night/night_bridge_1k.hdr +- Assets/PolyHaven/HDRIs/night/pond_bridge_night_1k.hdr +- Assets/PolyHaven/HDRIs/night/preller_drive_1k.hdr +- Assets/PolyHaven/HDRIs/night/qwantani_moon_noon_1k.hdr +- Assets/PolyHaven/HDRIs/night/qwantani_moon_noon_puresky_1k.hdr +- Assets/PolyHaven/HDRIs/night/qwantani_moonrise_1k.hdr +- Assets/PolyHaven/HDRIs/night/qwantani_moonrise_puresky_1k.hdr +- Assets/PolyHaven/HDRIs/night/qwantani_night_1k.hdr +- Assets/PolyHaven/HDRIs/night/qwantani_night_puresky_1k.hdr +- Assets/PolyHaven/HDRIs/night/rathaus_1k.hdr +- Assets/PolyHaven/HDRIs/night/rogland_clear_night_1k.hdr +- Assets/PolyHaven/HDRIs/night/rogland_moonlit_night_1k.hdr +- Assets/PolyHaven/HDRIs/night/rooftop_night_1k.hdr +- Assets/PolyHaven/HDRIs/night/sandsloot_1k.hdr +- Assets/PolyHaven/HDRIs/night/satara_night_1k.hdr +- Assets/PolyHaven/HDRIs/night/satara_night_no_lamps_1k.hdr +- Assets/PolyHaven/HDRIs/night/shanghai_bund_1k.hdr +- Assets/PolyHaven/HDRIs/night/solitude_night_1k.hdr +- Assets/PolyHaven/HDRIs/night/st_peters_square_night_1k.hdr +- Assets/PolyHaven/HDRIs/night/street_lamp_1k.hdr +- Assets/PolyHaven/HDRIs/night/viale_giuseppe_garibaldi_1k.hdr +- Assets/PolyHaven/HDRIs/night/vignaioli_night_1k.hdr +- Assets/PolyHaven/HDRIs/night/winter_evening_1k.hdr +- Assets/PolyHaven/HDRIs/night/zwinger_night_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/abandoned_tank_farm_05_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/altanka_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/arboretum_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/autumn_forest_04_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/aviation_museum_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/bambanani_sunset_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/beach_parking_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/belfast_sunset_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/belfast_sunset_puresky_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/bell_park_dawn_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/bell_park_pier_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/between_bridges_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/birchwood_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/bismarckturm_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/bismarckturm_hillside_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/bloem_field_sunrise_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/blouberg_sunrise_1_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/blouberg_sunrise_2_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/bryanston_park_sunrise_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/cape_hill_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/cedar_bridge_sunset_1_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/cedar_bridge_sunset_2_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/champagne_castle_1_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/citrus_orchard_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/citrus_orchard_puresky_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/citrus_orchard_road_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/citrus_orchard_road_puresky_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/dam_road_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/dark_autumn_forest_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/dikhololo_sunset_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/drackenstein_quarry_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/drackenstein_quarry_puresky_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/dusseldorf_bridge_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/ehingen_hillside_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/eilenriede_park_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/etzwihl_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/evening_field_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/evening_road_01_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/evening_road_01_puresky_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/farm_sunset_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/fish_eagle_hill_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/fish_hoek_beach_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/flamingo_pan_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/forest_grove_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/forgotten_miniland_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/fouriesburg_mountain_lookout_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/fouriesburg_mountain_lookout_2_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/freight_station_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/goegap_road_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/golf_course_sunrise_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/gothic_manor_01_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/hamburg_canal_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/hilly_terrain_01_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/hilly_terrain_01_puresky_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/industrial_sunset_02_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/industrial_sunset_02_puresky_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/industrial_sunset_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/industrial_sunset_puresky_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/kiara_1_dawn_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/kiara_2_sunrise_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/kiara_7_late-afternoon_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/kiara_8_sunset_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/kiara_9_dusk_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/klippad_dawn_1_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/klippad_dawn_2_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/klippad_sunrise_1_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/klippad_sunrise_2_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/kloppenheim_01_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/kloppenheim_01_puresky_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/kloppenheim_06_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/kloppenheim_06_puresky_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/konigsallee_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/lake_pier_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/lakes_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/lakeside_dawn_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/lakeside_sunrise_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/learner_park_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/leibstadt_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/lenong_2_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/lilienstein_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/little_paris_eiffel_tower_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/little_paris_under_tower_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/lonely_road_afternoon_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/lonely_road_afternoon_puresky_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/lot_01_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/lot_02_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/magalies_field_sunset_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/mealie_road_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/minedump_flats_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/misty_dawn_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/modern_buildings_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/mpumalanga_veld_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/mpumalanga_veld_puresky_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/nature_reserve_forest_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/near_the_river_02_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/netball_court_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/neurathen_rock_castle_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/ninomaru_teien_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/noga_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/northcliff_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/orbita_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/outdoor_umbrellas_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/palermo_square_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/park_bench_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/pathway_morning_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/pedestrian_overpass_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/piazza_san_marco_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/pink_sunrise_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/pisztyk_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/plains_sunset_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/quarry_01_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/quarry_01_puresky_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/quarry_04_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/quarry_04_puresky_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/quattro_canti_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/qwantani_dawn_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/qwantani_dawn_puresky_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/qwantani_dusk_1_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/qwantani_dusk_1_puresky_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/qwantani_dusk_2_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/qwantani_dusk_2_puresky_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/qwantani_sunrise_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/qwantani_sunrise_puresky_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/qwantani_sunset_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/qwantani_sunset_puresky_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/railway_bridge_02_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/railway_bridges_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/reinforced_concrete_02_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/river_walk_1_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/river_walk_2_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/rocky_ridge_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/rocky_ridge_puresky_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/rogland_sunset_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/rolling_hills_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/rooitou_park_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/rosendal_park_sunset_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/rosendal_park_sunset_puresky_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/rotes_rathaus_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/rural_evening_road_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/rural_landscape_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/rural_winter_roadside_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/sabie_tent_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/safari_sunset_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/scythian_tombs_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/scythian_tombs_2_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/scythian_tombs_puresky_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/shanghai_riverside_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/signal_hill_dawn_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/signal_hill_sunrise_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/sisulu_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/small_harbor_01_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/small_harbor_02_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/small_harbour_sunset_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/small_rural_road_02_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/spaichingen_hill_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/spooky_bamboo_morning_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/spree_bank_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/spruit_dawn_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/spruit_sunrise_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/stadium_01_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/steinbach_field_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/sterkspruit_falls_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/stierberg_sunrise_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/straw_rolls_field_01_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/stuttgart_suburbs_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/suburban_parking_area_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/summer_stage_01_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/sunny_vondelpark_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/sunset_fairway_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/sunset_forest_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/sunset_in_the_chalk_quarry_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/sunset_jhbcentral_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/syferfontein_0d_clear_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/syferfontein_0d_clear_puresky_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/syferfontein_1d_clear_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/syferfontein_1d_clear_puresky_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/syferfontein_6d_clear_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/syferfontein_6d_clear_puresky_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/table_mountain_1_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/table_mountain_1_puresky_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/table_mountain_2_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/table_mountain_2_puresky_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/tears_of_steel_bridge_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/teatro_massimo_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/the_lost_city_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/the_sky_is_on_fire_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/treetop_balcony_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/twilight_sunset_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/umhlanga_sunrise_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/under_bridge_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/vatican_road_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/venice_dawn_1_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/venice_dawn_2_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/venice_sunrise_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/venice_sunset_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/victoria_sunset_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/wasteland_clouds_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/wasteland_clouds_puresky_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/wildflower_field_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/winter_lake_01_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/winter_orchard_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/zwartkops_curve_sunset_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/zwartkops_start_morning_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/zwartkops_start_sunset_1k.hdr +- Assets/PolyHaven/HDRIs/sunset/zwartkops_straight_sunset_1k.hdr diff --git a/source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/omnireset/config/ur5e_robotiq_2f85/resources/hdri_paths_ood.yaml b/source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/omnireset/config/ur5e_robotiq_2f85/resources/hdri_paths_ood.yaml new file mode 100644 index 00000000..c73008e7 --- /dev/null +++ b/source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/omnireset/config/ur5e_robotiq_2f85/resources/hdri_paths_ood.yaml @@ -0,0 +1,113 @@ +# Copyright (c) 2024-2026, The UW Lab Project Developers. (https://github.com/uw-lab/UWLab/blob/main/CONTRIBUTORS.md). +# All Rights Reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +# hdri_paths_ood.yaml +# Out-of-distribution HDRIs from AmbientCG (EXR format) +# These are completely different from Poly Haven HDRIs used in training + +# AmbientCG HDRIs (cloud - Backblaze B2) +cloud: + # Day Environment HDRIs (20 textures) + - Assets/AmbientCG/HDRIs/DayEnvironmentHDRI001_1K_HDR.exr + - Assets/AmbientCG/HDRIs/DayEnvironmentHDRI005_1K_HDR.exr + - Assets/AmbientCG/HDRIs/DayEnvironmentHDRI010_1K_HDR.exr + - Assets/AmbientCG/HDRIs/DayEnvironmentHDRI015_1K_HDR.exr + - Assets/AmbientCG/HDRIs/DayEnvironmentHDRI020_1K_HDR.exr + - Assets/AmbientCG/HDRIs/DayEnvironmentHDRI025_1K_HDR.exr + - Assets/AmbientCG/HDRIs/DayEnvironmentHDRI030_1K_HDR.exr + - Assets/AmbientCG/HDRIs/DayEnvironmentHDRI035_1K_HDR.exr + - Assets/AmbientCG/HDRIs/DayEnvironmentHDRI040_1K_HDR.exr + - Assets/AmbientCG/HDRIs/DayEnvironmentHDRI045_1K_HDR.exr + - Assets/AmbientCG/HDRIs/DayEnvironmentHDRI050_1K_HDR.exr + - Assets/AmbientCG/HDRIs/DayEnvironmentHDRI055_1K_HDR.exr + - Assets/AmbientCG/HDRIs/DayEnvironmentHDRI060_1K_HDR.exr + - Assets/AmbientCG/HDRIs/DayEnvironmentHDRI065_1K_HDR.exr + - Assets/AmbientCG/HDRIs/DayEnvironmentHDRI070_1K_HDR.exr + - Assets/AmbientCG/HDRIs/DayEnvironmentHDRI075_1K_HDR.exr + - Assets/AmbientCG/HDRIs/DayEnvironmentHDRI080_1K_HDR.exr + - Assets/AmbientCG/HDRIs/DayEnvironmentHDRI085_1K_HDR.exr + - Assets/AmbientCG/HDRIs/DayEnvironmentHDRI090_1K_HDR.exr + - Assets/AmbientCG/HDRIs/DayEnvironmentHDRI095_1K_HDR.exr + + # Day Sky HDRIs (7 textures) + - Assets/AmbientCG/HDRIs/DaySkyHDRI001A_1K_HDR.exr + - Assets/AmbientCG/HDRIs/DaySkyHDRI010A_1K_HDR.exr + - Assets/AmbientCG/HDRIs/DaySkyHDRI020A_1K_HDR.exr + - Assets/AmbientCG/HDRIs/DaySkyHDRI030A_1K_HDR.exr + - Assets/AmbientCG/HDRIs/DaySkyHDRI040A_1K_HDR.exr + - Assets/AmbientCG/HDRIs/DaySkyHDRI050A_1K_HDR.exr + - Assets/AmbientCG/HDRIs/DaySkyHDRI060A_1K_HDR.exr + + # Evening Environment HDRIs (5 textures) + - Assets/AmbientCG/HDRIs/EveningEnvironmentHDRI001_1K_HDR.exr + - Assets/AmbientCG/HDRIs/EveningEnvironmentHDRI002_1K_HDR.exr + - Assets/AmbientCG/HDRIs/EveningEnvironmentHDRI003_1K_HDR.exr + - Assets/AmbientCG/HDRIs/EveningEnvironmentHDRI004_1K_HDR.exr + - Assets/AmbientCG/HDRIs/EveningEnvironmentHDRI005_1K_HDR.exr + + # Evening Sky HDRIs (9 textures) + - Assets/AmbientCG/HDRIs/EveningSkyHDRI001A_1K_HDR.exr + - Assets/AmbientCG/HDRIs/EveningSkyHDRI005A_1K_HDR.exr + - Assets/AmbientCG/HDRIs/EveningSkyHDRI010A_1K_HDR.exr + - Assets/AmbientCG/HDRIs/EveningSkyHDRI015A_1K_HDR.exr + - Assets/AmbientCG/HDRIs/EveningSkyHDRI020A_1K_HDR.exr + - Assets/AmbientCG/HDRIs/EveningSkyHDRI025A_1K_HDR.exr + - Assets/AmbientCG/HDRIs/EveningSkyHDRI030A_1K_HDR.exr + - Assets/AmbientCG/HDRIs/EveningSkyHDRI035A_1K_HDR.exr + - Assets/AmbientCG/HDRIs/EveningSkyHDRI040A_1K_HDR.exr + + # Indoor Environment HDRIs (22 textures) + - Assets/AmbientCG/HDRIs/IndoorEnvironmentHDRI001_1K_HDR.exr + - Assets/AmbientCG/HDRIs/IndoorEnvironmentHDRI002_1K_HDR.exr + - Assets/AmbientCG/HDRIs/IndoorEnvironmentHDRI003_1K_HDR.exr + - Assets/AmbientCG/HDRIs/IndoorEnvironmentHDRI004_1K_HDR.exr + - Assets/AmbientCG/HDRIs/IndoorEnvironmentHDRI005_1K_HDR.exr + - Assets/AmbientCG/HDRIs/IndoorEnvironmentHDRI006_1K_HDR.exr + - Assets/AmbientCG/HDRIs/IndoorEnvironmentHDRI007_1K_HDR.exr + - Assets/AmbientCG/HDRIs/IndoorEnvironmentHDRI008_1K_HDR.exr + - Assets/AmbientCG/HDRIs/IndoorEnvironmentHDRI009_1K_HDR.exr + - Assets/AmbientCG/HDRIs/IndoorEnvironmentHDRI010_1K_HDR.exr + - Assets/AmbientCG/HDRIs/IndoorEnvironmentHDRI011_1K_HDR.exr + - Assets/AmbientCG/HDRIs/IndoorEnvironmentHDRI012_1K_HDR.exr + - Assets/AmbientCG/HDRIs/IndoorEnvironmentHDRI013_1K_HDR.exr + - Assets/AmbientCG/HDRIs/IndoorEnvironmentHDRI014_1K_HDR.exr + - Assets/AmbientCG/HDRIs/IndoorEnvironmentHDRI015_1K_HDR.exr + - Assets/AmbientCG/HDRIs/IndoorEnvironmentHDRI016_1K_HDR.exr + - Assets/AmbientCG/HDRIs/IndoorEnvironmentHDRI017_1K_HDR.exr + - Assets/AmbientCG/HDRIs/IndoorEnvironmentHDRI018_1K_HDR.exr + - Assets/AmbientCG/HDRIs/IndoorEnvironmentHDRI019_1K_HDR.exr + - Assets/AmbientCG/HDRIs/IndoorEnvironmentHDRI020_1K_HDR.exr + - Assets/AmbientCG/HDRIs/IndoorEnvironmentHDRI021_1K_HDR.exr + - Assets/AmbientCG/HDRIs/IndoorEnvironmentHDRI022_1K_HDR.exr + + # Morning Sky HDRIs (6 textures) + - Assets/AmbientCG/HDRIs/MorningSkyHDRI001A_1K_HDR.exr + - Assets/AmbientCG/HDRIs/MorningSkyHDRI003A_1K_HDR.exr + - Assets/AmbientCG/HDRIs/MorningSkyHDRI005A_1K_HDR.exr + - Assets/AmbientCG/HDRIs/MorningSkyHDRI007A_1K_HDR.exr + - Assets/AmbientCG/HDRIs/MorningSkyHDRI009A_1K_HDR.exr + - Assets/AmbientCG/HDRIs/MorningSkyHDRI011A_1K_HDR.exr + + # Night Environment HDRIs (10 textures) + - Assets/AmbientCG/HDRIs/NightEnvironmentHDRI001_1K_HDR.exr + - Assets/AmbientCG/HDRIs/NightEnvironmentHDRI002_1K_HDR.exr + - Assets/AmbientCG/HDRIs/NightEnvironmentHDRI003_1K_HDR.exr + - Assets/AmbientCG/HDRIs/NightEnvironmentHDRI004_1K_HDR.exr + - Assets/AmbientCG/HDRIs/NightEnvironmentHDRI005_1K_HDR.exr + - Assets/AmbientCG/HDRIs/NightEnvironmentHDRI006_1K_HDR.exr + - Assets/AmbientCG/HDRIs/NightEnvironmentHDRI007_1K_HDR.exr + - Assets/AmbientCG/HDRIs/NightEnvironmentHDRI008_1K_HDR.exr + - Assets/AmbientCG/HDRIs/NightEnvironmentHDRI009_1K_HDR.exr + - Assets/AmbientCG/HDRIs/NightEnvironmentHDRI010_1K_HDR.exr + + # Night Sky HDRIs (8 textures) + - Assets/AmbientCG/HDRIs/NightSkyHDRI001_1K_HDR.exr + - Assets/AmbientCG/HDRIs/NightSkyHDRI003_1K_HDR.exr + - Assets/AmbientCG/HDRIs/NightSkyHDRI005_1K_HDR.exr + - Assets/AmbientCG/HDRIs/NightSkyHDRI007_1K_HDR.exr + - Assets/AmbientCG/HDRIs/NightSkyHDRI009_1K_HDR.exr + - Assets/AmbientCG/HDRIs/NightSkyHDRI011_1K_HDR.exr + - Assets/AmbientCG/HDRIs/NightSkyHDRI013_1K_HDR.exr + - Assets/AmbientCG/HDRIs/NightSkyHDRI015_1K_HDR.exr diff --git a/source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/omnireset/config/ur5e_robotiq_2f85/resources/texture_paths.yaml b/source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/omnireset/config/ur5e_robotiq_2f85/resources/texture_paths.yaml new file mode 100644 index 00000000..4d7bbf00 --- /dev/null +++ b/source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/omnireset/config/ur5e_robotiq_2f85/resources/texture_paths.yaml @@ -0,0 +1,963 @@ +# Copyright (c) 2024-2026, The UW Lab Project Developers. (https://github.com/uw-lab/UWLab/blob/main/CONTRIBUTORS.md). +# All Rights Reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +cloud: +- Assets/NVIDIA/Textures/Base/Architecture/Ceiling_Tiles/Ceiling_Tiles_BaseColor.png +- Assets/NVIDIA/Textures/Base/Architecture/Roof_Tiles/Roof_Tiles_BaseColor.png +- Assets/NVIDIA/Textures/Base/Architecture/Shingles_01/Shingles_01_BaseColor.png +- Assets/NVIDIA/Textures/Base/Carpet/Carpet_Beige/Carpet_Beige_BaseColor.png +- Assets/NVIDIA/Textures/Base/Carpet/Carpet_Berber_Gray/Carpet_Berber_Gray_BaseColor.png +- Assets/NVIDIA/Textures/Base/Carpet/Carpet_Berber_Multi/Carpet_Berber_Multi_BaseColor.png +- Assets/NVIDIA/Textures/Base/Carpet/Carpet_Charcoal/Carpet_Charcoal_BaseColor.png +- Assets/NVIDIA/Textures/Base/Carpet/Carpet_Cream/Carpet_Cream_BaseColor.png +- Assets/NVIDIA/Textures/Base/Carpet/Carpet_Diamond_Olive/Carpet_Diamond_Olive_BaseColor.png +- Assets/NVIDIA/Textures/Base/Carpet/Carpet_Diamond_Yellow/Carpet_Diamond_Yellow_BaseColor.png +- Assets/NVIDIA/Textures/Base/Carpet/Carpet_Forest/Carpet_Forest_BaseColor.png +- Assets/NVIDIA/Textures/Base/Carpet/Carpet_Gray/Carpet_Gray_BaseColor.png +- Assets/NVIDIA/Textures/Base/Carpet/Carpet_Pattern_Leaf_Squares_Tan/Carpet_Pattern_Leaf_Squares_Tan_BaseColor.png +- Assets/NVIDIA/Textures/Base/Carpet/Carpet_Pattern_Loop/Carpet_Pattern_Loop_BaseColor.png +- Assets/NVIDIA/Textures/Base/Carpet/Carpet_Pattern_Squares_Multi/Carpet_Pattern_Squares_Multi_BaseColor.png +- Assets/NVIDIA/Textures/Base/Masonry/Adobe_Brick/Adobe_Brick_BaseColor.png +- Assets/NVIDIA/Textures/Base/Masonry/Brick_Pavers/Brick_Pavers_BaseColor.png +- Assets/NVIDIA/Textures/Base/Masonry/Brick_Wall_Brown/Brick_Wall_Brown_BaseColor.png +- Assets/NVIDIA/Textures/Base/Masonry/Brick_Wall_Red/Brick_Wall_Red_BaseColor.png +- Assets/NVIDIA/Textures/Base/Masonry/Concrete_Block/Concrete_Block_BaseColor.png +- Assets/NVIDIA/Textures/Base/Masonry/Concrete_Formed/Concrete_Formed_BaseColor.png +- Assets/NVIDIA/Textures/Base/Masonry/Concrete_Polished/Concrete_Polished_BaseColor.png +- Assets/NVIDIA/Textures/Base/Masonry/Concrete_Rough/Concrete_Rough_BaseColor.png +- Assets/NVIDIA/Textures/Base/Masonry/Concrete_Smooth/Concrete_Smooth_BaseColor.png +- Assets/NVIDIA/Textures/Base/Masonry/Stucco/Stucco_BaseColor.png +- Assets/NVIDIA/Textures/Base/Metals/Aluminum_Anodized/Aluminum_Anodized_BaseColor.png +- Assets/NVIDIA/Textures/Base/Metals/Aluminum_Cast/Aluminum_Cast_BaseColor.png +- Assets/NVIDIA/Textures/Base/Metals/Aluminum_Polished/Aluminum_Polished_BaseColor.png +- Assets/NVIDIA/Textures/Base/Metals/Brass/Brass_BaseColor.png +- Assets/NVIDIA/Textures/Base/Metals/Bronze/Bronze_BaseColor.png +- Assets/NVIDIA/Textures/Base/Metals/Brushed_Antique_Copper/Brushed_Antique_Copper_BaseColor.png +- Assets/NVIDIA/Textures/Base/Metals/Cast_Metal_Silver_Vein/Cast_Metal_Silver_Vein_BaseColor.png +- Assets/NVIDIA/Textures/Base/Metals/Chrome/Chrome_BaseColor.png +- Assets/NVIDIA/Textures/Base/Metals/Copper/Copper_BaseColor.png +- Assets/NVIDIA/Textures/Base/Metals/CorrugatedMetal/CorrugatedMetal_BaseColor.png +- Assets/NVIDIA/Textures/Base/Metals/Gold/Gold_BaseColor.png +- Assets/NVIDIA/Textures/Base/Metals/Iron/Iron_BaseColor.png +- Assets/NVIDIA/Textures/Base/Metals/Metal_Door/Metal_Door_BaseColor.png +- Assets/NVIDIA/Textures/Base/Metals/Metal_Seamed_Roof/Metal_Seamed_Roof_BaseColor.png +- Assets/NVIDIA/Textures/Base/Metals/RustedMetal/RustedMetal_BaseColor.png +- Assets/NVIDIA/Textures/Base/Metals/Silver/Silver_BaseColor.png +- Assets/NVIDIA/Textures/Base/Metals/Steel_Blued/Steel_Blued_BaseColor.png +- Assets/NVIDIA/Textures/Base/Metals/Steel_Carbon/Steel_Carbon_BaseColor.png +- Assets/NVIDIA/Textures/Base/Metals/Steel_Cast/Steel_Cast_BaseColor.png +- Assets/NVIDIA/Textures/Base/Metals/Steel_Stainless/Steel_Stainless_BaseColor.png +- Assets/NVIDIA/Textures/Base/Miscellaneous/Chain_Link_Fence/Chain_Link_Fence_BaseColor.png +- Assets/NVIDIA/Textures/Base/Miscellaneous/Paint_Gloss/Paint_Gloss_Finish_BaseColor.png +- Assets/NVIDIA/Textures/Base/Miscellaneous/Paint_Gloss_Finish/Paint_Gloss_Finish_BaseColor.png +- Assets/NVIDIA/Textures/Base/Miscellaneous/Paint_Matte/Paint_Matte_Finish_BaseColor.png +- Assets/NVIDIA/Textures/Base/Miscellaneous/Paint_Matte_Finish/Paint_Matte_Finish_BaseColor.png +- Assets/NVIDIA/Textures/Base/Miscellaneous/Paint_Satin/Paint_Satin_Finish_BaseColor.png +- Assets/NVIDIA/Textures/Base/Miscellaneous/Paint_Satin_Finish/Paint_Satin_Finish_BaseColor.png +- Assets/NVIDIA/Textures/Base/Natural/Asphalt/Asphalt_BaseColor.png +- Assets/NVIDIA/Textures/Base/Natural/Dirt/Dirt_BaseColor.png +- Assets/NVIDIA/Textures/Base/Natural/Grass_Countryside/Grass_Countryside_BaseColor.png +- Assets/NVIDIA/Textures/Base/Natural/Grass_Cut/Grass_Cut_BaseColor.png +- Assets/NVIDIA/Textures/Base/Natural/Grass_Winter/Grass_Winter_baseColor.png +- Assets/NVIDIA/Textures/Base/Natural/Leaves/Leaves_BaseColor.png +- Assets/NVIDIA/Textures/Base/Natural/Mulch_Brown/Mulch_Brown_baseColor.png +- Assets/NVIDIA/Textures/Base/Natural/Sand/Sand_BaseColor.png +- Assets/NVIDIA/Textures/Base/Natural/Soil_Rocky/Soil_Rocky_BaseColor.png +- Assets/NVIDIA/Textures/Base/Plastics/Rubber_Smooth/Rubber_Smooth_BaseColor.png +- Assets/NVIDIA/Textures/Base/Plastics/Rubber_Textured/Rubber_Textured_BaseColor.png +- Assets/NVIDIA/Textures/Base/Plastics/Veneer_OU_Walnut/Veneer_OU_Walnut_BaseColor.png +- Assets/NVIDIA/Textures/Base/Stone/Adobe_Octagon_Dots/Adobe_Octagon_Dots_BaseColor.png +- Assets/NVIDIA/Textures/Base/Stone/Ceramic_Smooth_Fired/Ceramic_Smooth_Fired_BaseColor.png +- Assets/NVIDIA/Textures/Base/Stone/Ceramic_Tile_12/Ceramic_Tile_12_BaseColor.png +- Assets/NVIDIA/Textures/Base/Stone/Ceramic_Tile_18/Ceramic_Tile_18_BaseColor.png +- Assets/NVIDIA/Textures/Base/Stone/Ceramic_Tile_6/Ceramic_Tile_6_BaseColor.png +- Assets/NVIDIA/Textures/Base/Stone/Fieldstone/Fieldstone_BaseColor.png +- Assets/NVIDIA/Textures/Base/Stone/Granite_Dark/Granite_Dark_BaseColor.png +- Assets/NVIDIA/Textures/Base/Stone/Granite_Light/Granite_Light_BaseColor.png +- Assets/NVIDIA/Textures/Base/Stone/Gravel/Gravel_BaseColor.png +- Assets/NVIDIA/Textures/Base/Stone/Gravel_River_Rock/Gravel_River_Rock_BaseColor.png +- Assets/NVIDIA/Textures/Base/Stone/Marble/Marble_BaseColor.png +- Assets/NVIDIA/Textures/Base/Stone/Marble_Smooth/Marble_Smooth_BaseColor.png +- Assets/NVIDIA/Textures/Base/Stone/Marble_Tile_12/Marble_Tile_12_BaseColor.png +- Assets/NVIDIA/Textures/Base/Stone/Marble_Tile_18/Marble_Tile_18_BaseColor.png +- Assets/NVIDIA/Textures/Base/Stone/Pea_Gravel/Pea_Gravel_BaseColor.png +- Assets/NVIDIA/Textures/Base/Stone/Porcelain_Smooth/Porcelain_Smooth_BaseColor.png +- Assets/NVIDIA/Textures/Base/Stone/Porcelain_Tile_4/Porcelain_Tile_4_BaseColor.png +- Assets/NVIDIA/Textures/Base/Stone/Porcelain_Tile_4_Linen/Porcelain_Tile_4_Linen_BaseColor.png +- Assets/NVIDIA/Textures/Base/Stone/Porcelain_Tile_6/Porcelain_Tile_6_BaseColor.png +- Assets/NVIDIA/Textures/Base/Stone/Porcelain_Tile_6_Linen/Porcelain_Tile_6_Linen_BaseColor.png +- Assets/NVIDIA/Textures/Base/Stone/Retaining_Block/Retaining_Block_BaseColor.png +- Assets/NVIDIA/Textures/Base/Stone/Slate/Slate_Tile_BaseColor.png +- Assets/NVIDIA/Textures/Base/Stone/Stone_Wall/Stone_Wall_BaseColor.png +- Assets/NVIDIA/Textures/Base/Stone/Terracotta/Terracotta_BaseColor.png +- Assets/NVIDIA/Textures/Base/Stone/Terrazzo/Terrazzo_BaseColor.png +- Assets/NVIDIA/Textures/Base/Textiles/Cloth_Black/Cloth_Black_BaseColor.png +- Assets/NVIDIA/Textures/Base/Textiles/Cloth_Gray/Cloth_Gray_BaseColor.png +- Assets/NVIDIA/Textures/Base/Textiles/Leather_Black/Leather_Black_BaseColor.png +- Assets/NVIDIA/Textures/Base/Textiles/Leather_Brown/Leather_Brown_BaseColor.png +- Assets/NVIDIA/Textures/Base/Textiles/Leather_Pumpkin/Leather_Pumpkin_BaseColor.png +- Assets/NVIDIA/Textures/Base/Textiles/Linen_Beige/Linen_Beige_BaseColor.png +- Assets/NVIDIA/Textures/Base/Textiles/Linen_Blue/Linen_Blue_BaseColor.png +- Assets/NVIDIA/Textures/Base/Textiles/Linen_White/Linen_White_BaseColor.png +- Assets/NVIDIA/Textures/Base/Wall_Board/Cardboard/Cardboard_BaseColor.png +- Assets/NVIDIA/Textures/Base/Wall_Board/Gypsum/Gypsum_BaseColor.png +- Assets/NVIDIA/Textures/Base/Wall_Board/Paper/Paper_BaseColor.png +- Assets/NVIDIA/Textures/Base/Wall_Board/Plaster/Plaster_BaseColor.png +- Assets/NVIDIA/Textures/Base/Wood/Ash/Ash_BaseColor.png +- Assets/NVIDIA/Textures/Base/Wood/Ash_Planks/Ash_Planks_BaseColor.png +- Assets/NVIDIA/Textures/Base/Wood/Bamboo/Bamboo_BaseColor.png +- Assets/NVIDIA/Textures/Base/Wood/Bamboo_Planks/Bamboo_Planks_BaseColor.png +- Assets/NVIDIA/Textures/Base/Wood/Birch/Birch_BaseColor.png +- Assets/NVIDIA/Textures/Base/Wood/Birch_Planks/Birch_Planks_BaseColor.png +- Assets/NVIDIA/Textures/Base/Wood/Cherry/Cherry_BaseColor.png +- Assets/NVIDIA/Textures/Base/Wood/Cherry_Planks/Cherry_Planks_BaseColor.png +- Assets/NVIDIA/Textures/Base/Wood/Cork/Cork_BaseColor.png +- Assets/NVIDIA/Textures/Base/Wood/Mahogany/Mahogany_baseColor.png +- Assets/NVIDIA/Textures/Base/Wood/Mahogany_Planks/Mahogany_Planks_BaseColor.png +- Assets/NVIDIA/Textures/Base/Wood/Oak/Oak_BaseColor.png +- Assets/NVIDIA/Textures/Base/Wood/Oak_Planks/Oak_Planks_BaseColor.png +- Assets/NVIDIA/Textures/Base/Wood/Parquet_Floor/Parquet_Floor_BaseColor.png +- Assets/NVIDIA/Textures/Base/Wood/Plywood/Plywood_BaseColor.png +- Assets/NVIDIA/Textures/Base/Wood/Timber/Timber_BaseColor.png +- Assets/NVIDIA/Textures/Base/Wood/Timber_Cladding/Timber_Cladding_BaseColor.png +- Assets/NVIDIA/Textures/Base/Wood/Walnut/Walnut_BaseColor.png +- Assets/NVIDIA/Textures/Base/Wood/Walnut_Planks/Walnut_Planks_BaseColor.png +- Assets/NVIDIA/Textures/vMaterials_2/Carpet/textures/carpet_rough_woven_diff.jpg +- Assets/NVIDIA/Textures/vMaterials_2/Carpet/textures/long_floor_carpet_diff.jpg +- Assets/NVIDIA/Textures/vMaterials_2/Ceramic/textures/fired_clay_fired_clay_diff.jpg +- Assets/NVIDIA/Textures/vMaterials_2/Ceramic/textures/mortar_diff.jpg +- Assets/NVIDIA/Textures/vMaterials_2/Concrete/textures/concrete_floor_damage_diff.jpg +- Assets/NVIDIA/Textures/vMaterials_2/Concrete/textures/concrete_rough_diff.jpg +- Assets/NVIDIA/Textures/vMaterials_2/Concrete/textures/concrete_wall_aged_diff.jpg +- Assets/NVIDIA/Textures/vMaterials_2/Concrete/textures/concrete_wall_aged_scratched_diff.jpg +- Assets/NVIDIA/Textures/vMaterials_2/Concrete/textures/concrete_wall_even_diff.jpg +- Assets/NVIDIA/Textures/vMaterials_2/Concrete/textures/conrete_formed_diff.jpg +- Assets/NVIDIA/Textures/vMaterials_2/Concrete/textures/conrete_polished_diff.jpg +- Assets/NVIDIA/Textures/vMaterials_2/Concrete/textures/mortar_diff.jpg +- Assets/NVIDIA/Textures/vMaterials_2/Concrete/textures/moss_diff.jpg +- Assets/NVIDIA/Textures/vMaterials_2/Concrete/textures/precastconcrete_diff.png +- Assets/NVIDIA/Textures/vMaterials_2/Concrete/textures/spongy_concrete_weatheredMoss_diff.jpg +- Assets/NVIDIA/Textures/vMaterials_2/Concrete/textures/spongy_concrete_weathered_diff.jpg +- Assets/NVIDIA/Textures/vMaterials_2/Concrete/textures/stone_diff.jpg +- Assets/NVIDIA/Textures/vMaterials_2/Fabric/textures/cashmere_wool_suiting_R_diff.jpg +- Assets/NVIDIA/Textures/vMaterials_2/Fabric/textures/cotton_roughly_woven_diff.jpg +- Assets/NVIDIA/Textures/vMaterials_2/Fabric/textures/felt_white_diff.jpg +- Assets/NVIDIA/Textures/vMaterials_2/Fabric/textures/fine_woven_cotton_diff.jpg +- Assets/NVIDIA/Textures/vMaterials_2/Fabric/textures/pique_weave_diff.jpg +- Assets/NVIDIA/Textures/vMaterials_2/Fabric/textures/silk_plain_chiffon_R_diff.jpg +- Assets/NVIDIA/Textures/vMaterials_2/Fabric/textures/single_jersey_R_diff.jpg +- Assets/NVIDIA/Textures/vMaterials_2/Fabric/textures/tweed_fibers_diff.jpg +- Assets/NVIDIA/Textures/vMaterials_2/Fabric/textures/tweed_fuzz_diff.png +- Assets/NVIDIA/Textures/vMaterials_2/Fabric/textures/twill_gabardine_R_diff.jpg +- Assets/NVIDIA/Textures/vMaterials_2/Fabric/textures/velvet_diff.jpg +- Assets/NVIDIA/Textures/vMaterials_2/Fabric/textures/wool_knit_R_diff.jpg +- Assets/NVIDIA/Textures/vMaterials_2/Ground/textures/Large_Granite_Paving_diff.jpg +- Assets/NVIDIA/Textures/vMaterials_2/Ground/textures/aggregate_exposed_diff.jpg +- Assets/NVIDIA/Textures/vMaterials_2/Ground/textures/asphalt_fine_tarred_diff.jpg +- Assets/NVIDIA/Textures/vMaterials_2/Ground/textures/cobblestone_big_and_loose_diff.jpg +- Assets/NVIDIA/Textures/vMaterials_2/Ground/textures/cobblestone_medieval_diff.jpg +- Assets/NVIDIA/Textures/vMaterials_2/Ground/textures/cobblestone_tiny_diff.jpg +- Assets/NVIDIA/Textures/vMaterials_2/Ground/textures/gravel_track_ballast_diff.jpg +- Assets/NVIDIA/Textures/vMaterials_2/Ground/textures/ground_leaves_oak_diff.jpg +- Assets/NVIDIA/Textures/vMaterials_2/Ground/textures/hard_court_mono_diff.jpg +- Assets/NVIDIA/Textures/vMaterials_2/Ground/textures/leaves_diff.jpg +- Assets/NVIDIA/Textures/vMaterials_2/Ground/textures/leaves_grayscale_diff.png +- Assets/NVIDIA/Textures/vMaterials_2/Ground/textures/moss_diff.jpg +- Assets/NVIDIA/Textures/vMaterials_2/Ground/textures/mulch_diff.jpg +- Assets/NVIDIA/Textures/vMaterials_2/Ground/textures/paving_stones_diff.jpg +- Assets/NVIDIA/Textures/vMaterials_2/Ground/textures/rough_gravel_diff.jpg +- Assets/NVIDIA/Textures/vMaterials_2/Leather/textures/PU_split_leather_back_diff.jpg +- Assets/NVIDIA/Textures/vMaterials_2/Leather/textures/PU_split_leather_front_diff.jpg +- Assets/NVIDIA/Textures/vMaterials_2/Leather/textures/pigmented_smooth_leather_back_diff.jpg +- Assets/NVIDIA/Textures/vMaterials_2/Leather/textures/pigmented_smooth_leather_front_diff.jpg +- Assets/NVIDIA/Textures/vMaterials_2/Leather/textures/pullUp_leather_back_diff.jpg +- Assets/NVIDIA/Textures/vMaterials_2/Leather/textures/pullUp_leather_front_diff.jpg +- Assets/NVIDIA/Textures/vMaterials_2/Leather/textures/semi_aniline_back_diff.jpg +- Assets/NVIDIA/Textures/vMaterials_2/Leather/textures/semi_aniline_front_diff.jpg +- Assets/NVIDIA/Textures/vMaterials_2/Leather/textures/suede_leather_back_diff.jpg +- Assets/NVIDIA/Textures/vMaterials_2/Leather/textures/suede_leather_front_diff.jpg +- Assets/NVIDIA/Textures/vMaterials_2/Masonry/textures/bricks_grey_diff.jpg +- Assets/NVIDIA/Textures/vMaterials_2/Masonry/textures/clinker_diff.jpg +- Assets/NVIDIA/Textures/vMaterials_2/Masonry/textures/sandstone_brick_diff.jpg +- Assets/NVIDIA/Textures/vMaterials_2/Metal/textures/blued_steel_diff.jpg +- Assets/NVIDIA/Textures/vMaterials_2/Metal/textures/brass_antique_diff.jpg +- Assets/NVIDIA/Textures/vMaterials_2/Metal/textures/bronze_antique_diff.jpg +- Assets/NVIDIA/Textures/vMaterials_2/Metal/textures/brushed_antique_copper_diff.jpg +- Assets/NVIDIA/Textures/vMaterials_2/Metal/textures/brushed_antique_copper_patina_diff.jpg +- Assets/NVIDIA/Textures/vMaterials_2/Metal/textures/copper_patina_diff.jpg +- Assets/NVIDIA/Textures/vMaterials_2/Metal/textures/iron_pitted_steel_heat_diff.jpg +- Assets/NVIDIA/Textures/vMaterials_2/Metal/textures/iron_pitted_steel_no_dents_diff.jpg +- Assets/NVIDIA/Textures/vMaterials_2/Metal/textures/pcb_copper_diff.jpg +- Assets/NVIDIA/Textures/vMaterials_2/Paint/textures/hammer_paint_multi_diff.jpg +- Assets/NVIDIA/Textures/vMaterials_2/Paper/textures/cardboard_new_01_diff.jpg +- Assets/NVIDIA/Textures/vMaterials_2/Paper/textures/cardboard_worn_01_diff.jpg +- Assets/NVIDIA/Textures/vMaterials_2/Paper/textures/lq_simple_cardboard_diff.jpg +- Assets/NVIDIA/Textures/vMaterials_2/Plaster/textures/mosaic_multi_color_stone_diff.jpg +- Assets/NVIDIA/Textures/vMaterials_2/Plaster/textures/plaster_rough_diff.jpg +- Assets/NVIDIA/Textures/vMaterials_2/Plaster/textures/plaster_wall_multi_r_ao_g_grunge_b_diff.jpg +- Assets/NVIDIA/Textures/vMaterials_2/Plastic/textures/pcb_prepreg_diff.jpg +- Assets/NVIDIA/Textures/vMaterials_2/Plastic/textures/pcb_solder_mask_grey_diff.jpg +- Assets/NVIDIA/Textures/vMaterials_2/Plastic/textures/styrofoam_diff.jpg +- Assets/NVIDIA/Textures/vMaterials_2/Stone/textures/ardesia_diff.jpg +- Assets/NVIDIA/Textures/vMaterials_2/Stone/textures/baltic_brown_diff.png +- Assets/NVIDIA/Textures/vMaterials_2/Stone/textures/baltic_red_diff.png +- Assets/NVIDIA/Textures/vMaterials_2/Stone/textures/basaltite_diff.jpg +- Assets/NVIDIA/Textures/vMaterials_2/Stone/textures/butterfly_verde_diff.png +- Assets/NVIDIA/Textures/vMaterials_2/Stone/textures/caledonia_diff.png +- Assets/NVIDIA/Textures/vMaterials_2/Stone/textures/castor_brown_diff.png +- Assets/NVIDIA/Textures/vMaterials_2/Stone/textures/devil_black_diff.jpg +- Assets/NVIDIA/Textures/vMaterials_2/Stone/textures/emerald_pearl_diff.png +- Assets/NVIDIA/Textures/vMaterials_2/Stone/textures/giallo_fiorito_diff.png +- Assets/NVIDIA/Textures/vMaterials_2/Stone/textures/golden_galaxy_diff.png +- Assets/NVIDIA/Textures/vMaterials_2/Stone/textures/granite_blackgalaxy_diff.png +- Assets/NVIDIA/Textures/vMaterials_2/Stone/textures/granite_blue_eyes_diff.png +- Assets/NVIDIA/Textures/vMaterials_2/Stone/textures/ice_kynite_diff.png +- Assets/NVIDIA/Textures/vMaterials_2/Stone/textures/imperial_red_diff.jpg +- Assets/NVIDIA/Textures/vMaterials_2/Stone/textures/morning_rose_diff.png +- Assets/NVIDIA/Textures/vMaterials_2/Stone/textures/natural_stone_black_diff.jpg +- Assets/NVIDIA/Textures/vMaterials_2/Stone/textures/padang_dark_grey_diff.jpg +- Assets/NVIDIA/Textures/vMaterials_2/Stone/textures/rosa_beta_diff.jpg +- Assets/NVIDIA/Textures/vMaterials_2/Stone/textures/serizzo_gneis_diff.jpg +- Assets/NVIDIA/Textures/vMaterials_2/Stone/textures/star_galaxy_diff.jpg +- Assets/NVIDIA/Textures/vMaterials_2/Stone/textures/steel_grey_diff.jpg +- Assets/NVIDIA/Textures/vMaterials_2/Stone/textures/stone_mediterranian_diff.jpg +- Assets/NVIDIA/Textures/vMaterials_2/Stone/textures/tan_brown_diff.png +- Assets/NVIDIA/Textures/vMaterials_2/Stone/textures/terrazzo_diff.jpg +- Assets/NVIDIA/Textures/vMaterials_2/Stone/textures/tiger_skin_gold_diff.png +- Assets/NVIDIA/Textures/vMaterials_2/Stone/textures/volga_blue_diff.jpg +- Assets/NVIDIA/Textures/vMaterials_2/Wood/textures/bark_oak_diff.jpg +- Assets/NVIDIA/Textures/vMaterials_2/Wood/textures/beech_diff.jpg +- Assets/NVIDIA/Textures/vMaterials_2/Wood/textures/concrete_diff.jpg +- Assets/NVIDIA/Textures/vMaterials_2/Wood/textures/fineline_diff.jpg +- Assets/NVIDIA/Textures/vMaterials_2/Wood/textures/osb_wood_diff.jpg +- Assets/NVIDIA/Textures/vMaterials_2/Wood/textures/pine_diff.jpg +- Assets/NVIDIA/Textures/vMaterials_2/Wood/textures/poplar_diff.jpg +- Assets/NVIDIA/Textures/vMaterials_2/Wood/textures/wood_ash_diff.jpg +- Assets/NVIDIA/Textures/vMaterials_2/Wood/textures/wood_cork_diff.jpg +- Assets/NVIDIA/Textures/vMaterials_2/Wood/textures/wood_oak_mountain_diff.jpg +- Assets/NVIDIA/Textures/vMaterials_2/Wood/textures/wood_walnut_diff.jpg +- Assets/PolyHaven/Textures/brick/blue_floor_tiles_01_1k.png +- Assets/PolyHaven/Textures/brick/brick_4_1k.png +- Assets/PolyHaven/Textures/brick/brick_crosswalk_1k.png +- Assets/PolyHaven/Textures/brick/brick_floor_003_1k.png +- Assets/PolyHaven/Textures/brick/brick_floor_1k.png +- Assets/PolyHaven/Textures/brick/brick_moss_001_1k.png +- Assets/PolyHaven/Textures/brick/brick_pavement_02_1k.png +- Assets/PolyHaven/Textures/brick/brick_pavement_1k.png +- Assets/PolyHaven/Textures/brick/brick_wall_001_1k.png +- Assets/PolyHaven/Textures/brick/brick_wall_003_1k.png +- Assets/PolyHaven/Textures/brick/brick_wall_005_1k.png +- Assets/PolyHaven/Textures/brick/brick_wall_006_1k.png +- Assets/PolyHaven/Textures/brick/brick_wall_02_1k.png +- Assets/PolyHaven/Textures/brick/brick_wall_04_1k.png +- Assets/PolyHaven/Textures/brick/brick_wall_07_1k.png +- Assets/PolyHaven/Textures/brick/brick_wall_08_1k.png +- Assets/PolyHaven/Textures/brick/brick_wall_09_1k.png +- Assets/PolyHaven/Textures/brick/brick_wall_10_1k.png +- Assets/PolyHaven/Textures/brick/brick_wall_11_1k.png +- Assets/PolyHaven/Textures/brick/brick_wall_12_1k.png +- Assets/PolyHaven/Textures/brick/brick_wall_13_1k.png +- Assets/PolyHaven/Textures/brick/broken_brick_wall_1k.png +- Assets/PolyHaven/Textures/brick/brown_brick_02_1k.png +- Assets/PolyHaven/Textures/brick/castle_brick_01_1k.png +- Assets/PolyHaven/Textures/brick/castle_brick_02_red_1k.png +- Assets/PolyHaven/Textures/brick/castle_brick_02_white_1k.png +- Assets/PolyHaven/Textures/brick/castle_brick_07_1k.png +- Assets/PolyHaven/Textures/brick/castle_brick_broken_06_1k.png +- Assets/PolyHaven/Textures/brick/church_bricks_02_1k.png +- Assets/PolyHaven/Textures/brick/church_bricks_03_1k.png +- Assets/PolyHaven/Textures/brick/concrete_brick_wall_001_1k.png +- Assets/PolyHaven/Textures/brick/cracked_concrete_wall_1k.png +- Assets/PolyHaven/Textures/brick/damaged_plaster_1k.png +- Assets/PolyHaven/Textures/brick/dark_brick_wall_1k.png +- Assets/PolyHaven/Textures/brick/double_brick_floor_1k.png +- Assets/PolyHaven/Textures/brick/exterior_wall_cladding_1k.png +- Assets/PolyHaven/Textures/brick/factory_brick_1k.png +- Assets/PolyHaven/Textures/brick/floor_bricks_02_1k.png +- Assets/PolyHaven/Textures/brick/floor_klinkers_01_1k.png +- Assets/PolyHaven/Textures/brick/floor_klinkers_04_1k.png +- Assets/PolyHaven/Textures/brick/floor_pavement_1k.png +- Assets/PolyHaven/Textures/brick/herringbone_brick_02_1k.png +- Assets/PolyHaven/Textures/brick/herringbone_brick_1k.png +- Assets/PolyHaven/Textures/brick/herringbone_pavement_03_1k.png +- Assets/PolyHaven/Textures/brick/herringbone_pavement_1k.png +- Assets/PolyHaven/Textures/brick/large_red_bricks_1k.png +- Assets/PolyHaven/Textures/brick/large_sandstone_blocks_01_1k.png +- Assets/PolyHaven/Textures/brick/medieval_blocks_03_1k.png +- Assets/PolyHaven/Textures/brick/medieval_red_brick_1k.png +- Assets/PolyHaven/Textures/brick/mixed_brick_wall_1k.png +- Assets/PolyHaven/Textures/brick/mossy_brick_1k.png +- Assets/PolyHaven/Textures/brick/mossy_brick_floor_1k.png +- Assets/PolyHaven/Textures/brick/painted_brick_1k.png +- Assets/PolyHaven/Textures/brick/painted_worn_brick_1k.png +- Assets/PolyHaven/Textures/brick/patterned_brick_floor_02_1k.png +- Assets/PolyHaven/Textures/brick/patterned_brick_wall_02_1k.png +- Assets/PolyHaven/Textures/brick/patterned_brick_wall_03_1k.png +- Assets/PolyHaven/Textures/brick/patterned_brick_wall_1k.png +- Assets/PolyHaven/Textures/brick/patterned_cobblestone_1k.png +- Assets/PolyHaven/Textures/brick/pavement_03_1k.png +- Assets/PolyHaven/Textures/brick/pavement_04_1k.png +- Assets/PolyHaven/Textures/brick/pavement_05_1k.png +- Assets/PolyHaven/Textures/brick/pavement_06_1k.png +- Assets/PolyHaven/Textures/brick/plaster_brick_01_1k.png +- Assets/PolyHaven/Textures/brick/plaster_brick_pattern_1k.png +- Assets/PolyHaven/Textures/brick/random_bricks_thick_1k.png +- Assets/PolyHaven/Textures/brick/recycled_brick_floor_1k.png +- Assets/PolyHaven/Textures/brick/red_brick_03_1k.png +- Assets/PolyHaven/Textures/brick/red_brick_1k.png +- Assets/PolyHaven/Textures/brick/red_brick_plaster_patch_02_1k.png +- Assets/PolyHaven/Textures/brick/red_bricks_02_1k.png +- Assets/PolyHaven/Textures/brick/red_bricks_04_1k.png +- Assets/PolyHaven/Textures/brick/rough_block_wall_1k.png +- Assets/PolyHaven/Textures/brick/rough_plaster_03_1k.png +- Assets/PolyHaven/Textures/brick/rough_plaster_brick_02_1k.png +- Assets/PolyHaven/Textures/brick/rough_plaster_brick_1k.png +- Assets/PolyHaven/Textures/brick/sandstone_blocks_04_1k.png +- Assets/PolyHaven/Textures/brick/sandstone_blocks_05_1k.png +- Assets/PolyHaven/Textures/brick/sandstone_brick_wall_01_1k.png +- Assets/PolyHaven/Textures/brick/seaworn_sandstone_brick_1k.png +- Assets/PolyHaven/Textures/brick/seaworn_stone_tiles_1k.png +- Assets/PolyHaven/Textures/brick/short_bricks_floor_1k.png +- Assets/PolyHaven/Textures/brick/square_brick_paving_1k.png +- Assets/PolyHaven/Textures/brick/stenciled_brick_floor_1k.png +- Assets/PolyHaven/Textures/brick/stone_brick_wall_001_1k.png +- Assets/PolyHaven/Textures/brick/t_brick_floor_002_1k.png +- Assets/PolyHaven/Textures/brick/t_concrete_wall_002_1k.png +- Assets/PolyHaven/Textures/brick/wall_bricks_plaster_1k.png +- Assets/PolyHaven/Textures/brick/white_bricks_1k.png +- Assets/PolyHaven/Textures/brick/white_sandstone_blocks_02_1k.png +- Assets/PolyHaven/Textures/brick/white_sandstone_bricks_03_1k.png +- Assets/PolyHaven/Textures/brick/white_sandstone_bricks_1k.png +- Assets/PolyHaven/Textures/brick/whitewashed_brick_1k.png +- Assets/PolyHaven/Textures/brick/worn_brick_floor_1k.png +- Assets/PolyHaven/Textures/brick/worn_brick_wall_1k.png +- Assets/PolyHaven/Textures/brick/yellow_brick_1k.png +- Assets/PolyHaven/Textures/brick/yellow_bricks_1k.png +- Assets/PolyHaven/Textures/concrete/anti_slip_concrete_1k.png +- Assets/PolyHaven/Textures/concrete/asphalt_floor_1k.png +- Assets/PolyHaven/Textures/concrete/beige_wall_001_1k.png +- Assets/PolyHaven/Textures/concrete/beige_wall_002_1k.png +- Assets/PolyHaven/Textures/concrete/blue_plaster_weathered_1k.png +- Assets/PolyHaven/Textures/concrete/brushed_concrete_03_1k.png +- Assets/PolyHaven/Textures/concrete/brushed_concrete_1k.png +- Assets/PolyHaven/Textures/concrete/brushed_concrete_2_1k.png +- Assets/PolyHaven/Textures/concrete/checkered_pavement_tiles_1k.png +- Assets/PolyHaven/Textures/concrete/clay_floor_001_1k.png +- Assets/PolyHaven/Textures/concrete/clay_plaster_1k.png +- Assets/PolyHaven/Textures/concrete/climbing_wall_02_1k.png +- Assets/PolyHaven/Textures/concrete/concrete_1k.png +- Assets/PolyHaven/Textures/concrete/concrete_block_wall_02_1k.png +- Assets/PolyHaven/Textures/concrete/concrete_block_wall_1k.png +- Assets/PolyHaven/Textures/concrete/concrete_debris_1k.png +- Assets/PolyHaven/Textures/concrete/concrete_floor_01_1k.png +- Assets/PolyHaven/Textures/concrete/concrete_floor_02_1k.png +- Assets/PolyHaven/Textures/concrete/concrete_floor_1k.png +- Assets/PolyHaven/Textures/concrete/concrete_floor_damaged_01_1k.png +- Assets/PolyHaven/Textures/concrete/concrete_floor_painted_1k.png +- Assets/PolyHaven/Textures/concrete/concrete_floor_worn_001_1k.png +- Assets/PolyHaven/Textures/concrete/concrete_floor_worn_02_1k.png +- Assets/PolyHaven/Textures/concrete/concrete_layers_02_1k.png +- Assets/PolyHaven/Textures/concrete/concrete_layers_1k.png +- Assets/PolyHaven/Textures/concrete/concrete_moss_1k.png +- Assets/PolyHaven/Textures/concrete/concrete_panels_1k.png +- Assets/PolyHaven/Textures/concrete/concrete_pavement_03_1k.png +- Assets/PolyHaven/Textures/concrete/concrete_pavement_1k.png +- Assets/PolyHaven/Textures/concrete/concrete_pavers_02_1k.png +- Assets/PolyHaven/Textures/concrete/concrete_pavers_03_1k.png +- Assets/PolyHaven/Textures/concrete/concrete_rock_path_1k.png +- Assets/PolyHaven/Textures/concrete/concrete_slab_wall_1k.png +- Assets/PolyHaven/Textures/concrete/concrete_tile_facade_1k.png +- Assets/PolyHaven/Textures/concrete/concrete_tiles_02_1k.png +- Assets/PolyHaven/Textures/concrete/concrete_tiles_1k.png +- Assets/PolyHaven/Textures/concrete/concrete_wall_001_1k.png +- Assets/PolyHaven/Textures/concrete/concrete_wall_003_1k.png +- Assets/PolyHaven/Textures/concrete/concrete_wall_004_1k.png +- Assets/PolyHaven/Textures/concrete/concrete_wall_005_1k.png +- Assets/PolyHaven/Textures/concrete/concrete_wall_006_1k.png +- Assets/PolyHaven/Textures/concrete/concrete_wall_007_1k.png +- Assets/PolyHaven/Textures/concrete/concrete_wall_008_1k.png +- Assets/PolyHaven/Textures/concrete/coral_fort_wall_01_1k.png +- Assets/PolyHaven/Textures/concrete/coral_fort_wall_02_1k.png +- Assets/PolyHaven/Textures/concrete/cracked_concrete_1k.png +- Assets/PolyHaven/Textures/concrete/damaged_concrete_floor_02_1k.png +- Assets/PolyHaven/Textures/concrete/damaged_concrete_floor_03_1k.png +- Assets/PolyHaven/Textures/concrete/damaged_concrete_floor_1k.png +- Assets/PolyHaven/Textures/concrete/dirty_concrete_1k.png +- Assets/PolyHaven/Textures/concrete/embedded_rock_floor_1k.png +- Assets/PolyHaven/Textures/concrete/garage_floor_1k.png +- Assets/PolyHaven/Textures/concrete/granular_concrete_1k.png +- Assets/PolyHaven/Textures/concrete/grass_concrete_pavement_1k.png +- Assets/PolyHaven/Textures/concrete/gravel_concrete_02_1k.png +- Assets/PolyHaven/Textures/concrete/gravel_concrete_03_1k.png +- Assets/PolyHaven/Textures/concrete/gravel_concrete_04_1k.png +- Assets/PolyHaven/Textures/concrete/gravel_concrete_1k.png +- Assets/PolyHaven/Textures/concrete/gravel_embedded_concrete_1k.png +- Assets/PolyHaven/Textures/concrete/grey_plaster_02_1k.png +- Assets/PolyHaven/Textures/concrete/grey_plaster_03_1k.png +- Assets/PolyHaven/Textures/concrete/grey_plaster_1k.png +- Assets/PolyHaven/Textures/concrete/grooved_concrete_driveway_1k.png +- Assets/PolyHaven/Textures/concrete/hangar_concrete_floor_1k.png +- Assets/PolyHaven/Textures/concrete/herringbone_concrete_tile_1k.png +- Assets/PolyHaven/Textures/concrete/hexagonal_concrete_paving_1k.png +- Assets/PolyHaven/Textures/concrete/interlocking_concrete_pavers_1k.png +- Assets/PolyHaven/Textures/concrete/medieval_wall_01_1k.png +- Assets/PolyHaven/Textures/concrete/mossy_sandstone_1k.png +- Assets/PolyHaven/Textures/concrete/mossy_stone_wall_1k.png +- Assets/PolyHaven/Textures/concrete/painted_concrete_02_1k.png +- Assets/PolyHaven/Textures/concrete/painted_concrete_1k.png +- Assets/PolyHaven/Textures/concrete/painted_plaster_wall_1k.png +- Assets/PolyHaven/Textures/concrete/patterned_clay_plaster_1k.png +- Assets/PolyHaven/Textures/concrete/patterned_concrete_pavers_02_1k.png +- Assets/PolyHaven/Textures/concrete/patterned_concrete_pavers_1k.png +- Assets/PolyHaven/Textures/concrete/patterned_concrete_wall_1k.png +- Assets/PolyHaven/Textures/concrete/patterned_plaster_wall_1k.png +- Assets/PolyHaven/Textures/concrete/pavement_02_1k.png +- Assets/PolyHaven/Textures/concrete/pebble_embedded_pavement_1k.png +- Assets/PolyHaven/Textures/concrete/plaster_grey_04_1k.png +- Assets/PolyHaven/Textures/concrete/plastered_stone_wall_1k.png +- Assets/PolyHaven/Textures/concrete/plastered_wall_02_1k.png +- Assets/PolyHaven/Textures/concrete/plastered_wall_03_1k.png +- Assets/PolyHaven/Textures/concrete/plastered_wall_04_1k.png +- Assets/PolyHaven/Textures/concrete/plastered_wall_05_1k.png +- Assets/PolyHaven/Textures/concrete/plastered_wall_1k.png +- Assets/PolyHaven/Textures/concrete/precast_concrete_wall_1k.png +- Assets/PolyHaven/Textures/concrete/precast_stone_paving_1k.png +- Assets/PolyHaven/Textures/concrete/preconcrete_wall_001_1k.png +- Assets/PolyHaven/Textures/concrete/preconcrete_wall_001_long_1k.png +- Assets/PolyHaven/Textures/concrete/rebar_reinforced_concrete_1k.png +- Assets/PolyHaven/Textures/concrete/red_plaster_weathered_1k.png +- Assets/PolyHaven/Textures/concrete/red_sandstone_pavement_1k.png +- Assets/PolyHaven/Textures/concrete/ribbed_concrete_wall_1k.png +- Assets/PolyHaven/Textures/concrete/rock_embedded_concrete_wall_1k.png +- Assets/PolyHaven/Textures/concrete/rock_embedded_floor_1k.png +- Assets/PolyHaven/Textures/concrete/rock_wall_09_1k.png +- Assets/PolyHaven/Textures/concrete/rough_concrete_1k.png +- Assets/PolyHaven/Textures/concrete/rough_plaster_brick_04_1k.png +- Assets/PolyHaven/Textures/concrete/rough_plaster_broken_1k.png +- Assets/PolyHaven/Textures/concrete/rough_plasterbrick_05_1k.png +- Assets/PolyHaven/Textures/concrete/scuffed_cement_1k.png +- Assets/PolyHaven/Textures/concrete/slate_driveway_1k.png +- Assets/PolyHaven/Textures/concrete/smooth_concrete_floor_1k.png +- Assets/PolyHaven/Textures/concrete/square_cobblestone_1k.png +- Assets/PolyHaven/Textures/concrete/square_concrete_pavers_1k.png +- Assets/PolyHaven/Textures/concrete/stone_embedded_concrete_1k.png +- Assets/PolyHaven/Textures/concrete/stone_wall_03_1k.png +- Assets/PolyHaven/Textures/concrete/white_plaster_02_1k.png +- Assets/PolyHaven/Textures/concrete/white_plaster_rough_01_1k.png +- Assets/PolyHaven/Textures/concrete/white_plaster_rough_02_1k.png +- Assets/PolyHaven/Textures/concrete/white_rough_plaster_1k.png +- Assets/PolyHaven/Textures/concrete/wood_inlaid_concrete_wall_1k.png +- Assets/PolyHaven/Textures/concrete/worn_mossy_plasterwall_1k.png +- Assets/PolyHaven/Textures/concrete/worn_patterned_pavers_1k.png +- Assets/PolyHaven/Textures/concrete/worn_plaster_wall_1k.png +- Assets/PolyHaven/Textures/concrete/yellow_plaster_02_1k.png +- Assets/PolyHaven/Textures/concrete/yellow_plaster_1k.png +- Assets/PolyHaven/Textures/concrete/yellow_stone_wall_1k.png +- Assets/PolyHaven/Textures/fabric/bi_stretch_1k.png +- Assets/PolyHaven/Textures/fabric/brown_leather_1k.png +- Assets/PolyHaven/Textures/fabric/caban_1k.png +- Assets/PolyHaven/Textures/fabric/cotton_jersey_1k.png +- Assets/PolyHaven/Textures/fabric/crepe_georgette_1k.png +- Assets/PolyHaven/Textures/fabric/crepe_satin_1k.png +- Assets/PolyHaven/Textures/fabric/curly_teddy_checkered_1k.png +- Assets/PolyHaven/Textures/fabric/curly_teddy_natural_1k.png +- Assets/PolyHaven/Textures/fabric/denim_fabric_03_1k.png +- Assets/PolyHaven/Textures/fabric/denim_fabric_04_1k.png +- Assets/PolyHaven/Textures/fabric/denim_fabric_05_1k.png +- Assets/PolyHaven/Textures/fabric/denim_fabric_06_1k.png +- Assets/PolyHaven/Textures/fabric/denim_fabric_1k.png +- Assets/PolyHaven/Textures/fabric/denmin_fabric_02_1k.png +- Assets/PolyHaven/Textures/fabric/dirty_carpet_1k.png +- Assets/PolyHaven/Textures/fabric/fabric_leather_01_1k.png +- Assets/PolyHaven/Textures/fabric/fabric_leather_02_1k.png +- Assets/PolyHaven/Textures/fabric/faux_fur_geometric_1k.png +- Assets/PolyHaven/Textures/fabric/floral_jacquard_1k.png +- Assets/PolyHaven/Textures/fabric/gingham_check_1k.png +- Assets/PolyHaven/Textures/fabric/hessian_230_1k.png +- Assets/PolyHaven/Textures/fabric/hessian_380_1k.png +- Assets/PolyHaven/Textures/fabric/jersey_melange_1k.png +- Assets/PolyHaven/Textures/fabric/jogging_melange_1k.png +- Assets/PolyHaven/Textures/fabric/knitted_fleece_1k.png +- Assets/PolyHaven/Textures/fabric/leather_white_1k.png +- Assets/PolyHaven/Textures/fabric/polar_fleece_1k.png +- Assets/PolyHaven/Textures/fabric/poly_wool_herringbone_1k.png +- Assets/PolyHaven/Textures/fabric/quatrefoil_jacquard_fabric_1k.png +- Assets/PolyHaven/Textures/fabric/ribbed_corduroy_1k.png +- Assets/PolyHaven/Textures/fabric/rough_linen_1k.png +- Assets/PolyHaven/Textures/fabric/scuba_suede_1k.png +- Assets/PolyHaven/Textures/fabric/stretch_poplin_1k.png +- Assets/PolyHaven/Textures/fabric/terlenka_1k.png +- Assets/PolyHaven/Textures/fabric/terry_cloth_1k.png +- Assets/PolyHaven/Textures/fabric/velour_velvet_1k.png +- Assets/PolyHaven/Textures/fabric/waffle_pique_cotton_1k.png +- Assets/PolyHaven/Textures/fabric/wool_boucle_1k.png +- Assets/PolyHaven/Textures/metal/blue_metal_plate_1k.png +- Assets/PolyHaven/Textures/metal/container_side_1k.png +- Assets/PolyHaven/Textures/metal/corrugated_iron_02_1k.png +- Assets/PolyHaven/Textures/metal/corrugated_iron_03_1k.png +- Assets/PolyHaven/Textures/metal/corrugated_iron_1k.png +- Assets/PolyHaven/Textures/metal/factory_wall_1k.png +- Assets/PolyHaven/Textures/metal/green_metal_rust_1k.png +- Assets/PolyHaven/Textures/metal/metal_grate_rusty_1k.png +- Assets/PolyHaven/Textures/metal/metal_plate_02_1k.png +- Assets/PolyHaven/Textures/metal/metal_plate_1k.png +- Assets/PolyHaven/Textures/metal/painted_metal_shutter_1k.png +- Assets/PolyHaven/Textures/metal/rust_coarse_01_1k.png +- Assets/PolyHaven/Textures/metal/rusted_shutter_1k.png +- Assets/PolyHaven/Textures/metal/rusty_corrugated_iron_1k.png +- Assets/PolyHaven/Textures/metal/rusty_metal_02_1k.png +- Assets/PolyHaven/Textures/metal/rusty_metal_03_1k.png +- Assets/PolyHaven/Textures/metal/rusty_metal_04_1k.png +- Assets/PolyHaven/Textures/metal/rusty_metal_05_1k.png +- Assets/PolyHaven/Textures/metal/rusty_metal_1k.png +- Assets/PolyHaven/Textures/metal/rusty_metal_grid_1k.png +- Assets/PolyHaven/Textures/metal/rusty_metal_sheet_1k.png +- Assets/PolyHaven/Textures/metal/rusty_metal_shutter_1k.png +- Assets/PolyHaven/Textures/metal/rusty_painted_metal_1k.png +- Assets/PolyHaven/Textures/metal/worn_corrugated_iron_1k.png +- Assets/PolyHaven/Textures/metal/worn_shutter_1k.png +- Assets/PolyHaven/Textures/other/asbestos_sheet_02_1k.png +- Assets/PolyHaven/Textures/other/asbestos_sheet_1k.png +- Assets/PolyHaven/Textures/other/asphalt_03_1k.png +- Assets/PolyHaven/Textures/other/asphalt_04_1k.png +- Assets/PolyHaven/Textures/other/asphalt_track_1k.png +- Assets/PolyHaven/Textures/other/bitumen_1k.png +- Assets/PolyHaven/Textures/other/box_profile_metal_sheet_1k.png +- Assets/PolyHaven/Textures/other/ceiling_interior_1k.png +- Assets/PolyHaven/Textures/other/ceramic_roof_01_1k.png +- Assets/PolyHaven/Textures/other/clay_block_wall_1k.png +- Assets/PolyHaven/Textures/other/clay_roof_tiles_02_1k.png +- Assets/PolyHaven/Textures/other/clay_roof_tiles_03_1k.png +- Assets/PolyHaven/Textures/other/clay_roof_tiles_1k.png +- Assets/PolyHaven/Textures/other/climbing_wall_1k.png +- Assets/PolyHaven/Textures/other/climbing_wall_base_1k.png +- Assets/PolyHaven/Textures/other/concrete_pavement_02_1k.png +- Assets/PolyHaven/Textures/other/coral_stone_wall_1k.png +- Assets/PolyHaven/Textures/other/decrepit_wallpaper_1k.png +- Assets/PolyHaven/Textures/other/flour_1k.png +- Assets/PolyHaven/Textures/other/granite_tile_03_1k.png +- Assets/PolyHaven/Textures/other/granite_wall_1k.png +- Assets/PolyHaven/Textures/other/gravel_floor_1k.png +- Assets/PolyHaven/Textures/other/gravel_stones_1k.png +- Assets/PolyHaven/Textures/other/grey_roof_01_1k.png +- Assets/PolyHaven/Textures/other/grey_roof_tiles_02_1k.png +- Assets/PolyHaven/Textures/other/grey_roof_tiles_1k.png +- Assets/PolyHaven/Textures/other/grey_stone_path_1k.png +- Assets/PolyHaven/Textures/other/herringbone_pavement_02_1k.png +- Assets/PolyHaven/Textures/other/herringbone_pavement_04_1k.png +- Assets/PolyHaven/Textures/other/large_pebbles_1k.png +- Assets/PolyHaven/Textures/other/large_sandstone_blocks_1k.png +- Assets/PolyHaven/Textures/other/macro_flour_1k.png +- Assets/PolyHaven/Textures/other/old_sandstone_02_1k.png +- Assets/PolyHaven/Textures/other/patterned_clay_wall_1k.png +- Assets/PolyHaven/Textures/other/pebbles_1k.png +- Assets/PolyHaven/Textures/other/peeling_painted_wall_1k.png +- Assets/PolyHaven/Textures/other/polystyrene_1k.png +- Assets/PolyHaven/Textures/other/rectangular_facade_tiles_02_1k.png +- Assets/PolyHaven/Textures/other/rectangular_facade_tiles_1k.png +- Assets/PolyHaven/Textures/other/red_sandstone_wall_1k.png +- Assets/PolyHaven/Textures/other/red_slate_roof_tiles_01_1k.png +- Assets/PolyHaven/Textures/other/reed_roof_03_1k.png +- Assets/PolyHaven/Textures/other/reed_roof_04_1k.png +- Assets/PolyHaven/Textures/other/riet_01_1k.png +- Assets/PolyHaven/Textures/other/roof_07_1k.png +- Assets/PolyHaven/Textures/other/roof_09_1k.png +- Assets/PolyHaven/Textures/other/roof_3_1k.png +- Assets/PolyHaven/Textures/other/roof_slates_02_1k.png +- Assets/PolyHaven/Textures/other/roof_slates_03_1k.png +- Assets/PolyHaven/Textures/other/roof_tiles_14_1k.png +- Assets/PolyHaven/Textures/other/roof_tiles_1k.png +- Assets/PolyHaven/Textures/other/rounded_square_tiled_wall_1k.png +- Assets/PolyHaven/Textures/other/square_tiled_wall_1k.png +- Assets/PolyHaven/Textures/other/stone_block_wall_1k.png +- Assets/PolyHaven/Textures/other/stone_tile_wall_1k.png +- Assets/PolyHaven/Textures/other/stone_wall_05_1k.png +- Assets/PolyHaven/Textures/other/terrazzo_tiles_1k.png +- Assets/PolyHaven/Textures/other/thatch_roof_angled_1k.png +- Assets/PolyHaven/Textures/other/volcanic_rock_tiles_1k.png +- Assets/PolyHaven/Textures/rock/aerial_grass_rock_1k.png +- Assets/PolyHaven/Textures/rock/aerial_rocks_01_1k.png +- Assets/PolyHaven/Textures/rock/aerial_rocks_02_1k.png +- Assets/PolyHaven/Textures/rock/aerial_rocks_04_1k.png +- Assets/PolyHaven/Textures/rock/broken_wall_1k.png +- Assets/PolyHaven/Textures/rock/castle_wall_slates_1k.png +- Assets/PolyHaven/Textures/rock/castle_wall_varriation_1k.png +- Assets/PolyHaven/Textures/rock/clean_pebbles_1k.png +- Assets/PolyHaven/Textures/rock/cliff_side_1k.png +- Assets/PolyHaven/Textures/rock/coast_sand_03_1k.png +- Assets/PolyHaven/Textures/rock/coast_sand_05_1k.png +- Assets/PolyHaven/Textures/rock/coast_sand_rocks_02_1k.png +- Assets/PolyHaven/Textures/rock/coral_fort_wall_03_1k.png +- Assets/PolyHaven/Textures/rock/coral_gravel_1k.png +- Assets/PolyHaven/Textures/rock/coral_ground_02_1k.png +- Assets/PolyHaven/Textures/rock/defense_wall_02_1k.png +- Assets/PolyHaven/Textures/rock/defense_wall_1k.png +- Assets/PolyHaven/Textures/rock/dry_riverbed_rock_1k.png +- Assets/PolyHaven/Textures/rock/forest_ground_04_1k.png +- Assets/PolyHaven/Textures/rock/ganges_river_pebbles_1k.png +- Assets/PolyHaven/Textures/rock/gravel_road_1k.png +- Assets/PolyHaven/Textures/rock/gray_rocks_1k.png +- Assets/PolyHaven/Textures/rock/japanese_stone_wall_1k.png +- Assets/PolyHaven/Textures/rock/lichen_rock_1k.png +- Assets/PolyHaven/Textures/rock/medieval_blocks_02_1k.png +- Assets/PolyHaven/Textures/rock/medieval_blocks_05_1k.png +- Assets/PolyHaven/Textures/rock/medieval_blocks_06_1k.png +- Assets/PolyHaven/Textures/rock/medieval_wall_02_1k.png +- Assets/PolyHaven/Textures/rock/mixed_rock_tiles_1k.png +- Assets/PolyHaven/Textures/rock/monastery_stone_floor_1k.png +- Assets/PolyHaven/Textures/rock/mossy_rock_1k.png +- Assets/PolyHaven/Textures/rock/quarry_wall_02_1k.png +- Assets/PolyHaven/Textures/rock/quarry_wall_1k.png +- Assets/PolyHaven/Textures/rock/rabdentse_ruins_wall_1k.png +- Assets/PolyHaven/Textures/rock/red_laterite_soil_stones_1k.png +- Assets/PolyHaven/Textures/rock/river_small_rocks_1k.png +- Assets/PolyHaven/Textures/rock/rock_01_1k.png +- Assets/PolyHaven/Textures/rock/rock_04_1k.png +- Assets/PolyHaven/Textures/rock/rock_05_1k.png +- Assets/PolyHaven/Textures/rock/rock_06_1k.png +- Assets/PolyHaven/Textures/rock/rock_08_1k.png +- Assets/PolyHaven/Textures/rock/rock_2_1k.png +- Assets/PolyHaven/Textures/rock/rock_3_1k.png +- Assets/PolyHaven/Textures/rock/rock_boulder_cracked_1k.png +- Assets/PolyHaven/Textures/rock/rock_boulder_dry_1k.png +- Assets/PolyHaven/Textures/rock/rock_embedded_tiles_1k.png +- Assets/PolyHaven/Textures/rock/rock_face_03_1k.png +- Assets/PolyHaven/Textures/rock/rock_face_1k.png +- Assets/PolyHaven/Textures/rock/rock_ground_02_1k.png +- Assets/PolyHaven/Textures/rock/rock_ground_1k.png +- Assets/PolyHaven/Textures/rock/rock_pitted_mossy_1k.png +- Assets/PolyHaven/Textures/rock/rock_surface_1k.png +- Assets/PolyHaven/Textures/rock/rock_tile_floor_02_1k.png +- Assets/PolyHaven/Textures/rock/rock_tile_floor_1k.png +- Assets/PolyHaven/Textures/rock/rock_wall_02_1k.png +- Assets/PolyHaven/Textures/rock/rock_wall_03_1k.png +- Assets/PolyHaven/Textures/rock/rock_wall_04_1k.png +- Assets/PolyHaven/Textures/rock/rock_wall_05_1k.png +- Assets/PolyHaven/Textures/rock/rock_wall_06_1k.png +- Assets/PolyHaven/Textures/rock/rock_wall_07_1k.png +- Assets/PolyHaven/Textures/rock/rock_wall_08_1k.png +- Assets/PolyHaven/Textures/rock/rock_wall_10_1k.png +- Assets/PolyHaven/Textures/rock/rock_wall_11_1k.png +- Assets/PolyHaven/Textures/rock/rock_wall_12_1k.png +- Assets/PolyHaven/Textures/rock/rock_wall_13_1k.png +- Assets/PolyHaven/Textures/rock/rock_wall_14_1k.png +- Assets/PolyHaven/Textures/rock/rock_wall_15_1k.png +- Assets/PolyHaven/Textures/rock/rock_wall_1k.png +- Assets/PolyHaven/Textures/rock/rocks_ground_01_1k.png +- Assets/PolyHaven/Textures/rock/rocks_ground_02_1k.png +- Assets/PolyHaven/Textures/rock/rocks_ground_03_1k.png +- Assets/PolyHaven/Textures/rock/rocks_ground_04_1k.png +- Assets/PolyHaven/Textures/rock/rocks_ground_05_1k.png +- Assets/PolyHaven/Textures/rock/rocks_ground_06_1k.png +- Assets/PolyHaven/Textures/rock/rocky_terrain_03_1k.png +- Assets/PolyHaven/Textures/rock/rocky_terrain_1k.png +- Assets/PolyHaven/Textures/rock/rocky_trail_1k.png +- Assets/PolyHaven/Textures/rock/rustic_stone_wall_02_1k.png +- Assets/PolyHaven/Textures/rock/rustic_stone_wall_1k.png +- Assets/PolyHaven/Textures/rock/seaside_rock_1k.png +- Assets/PolyHaven/Textures/rock/slab_tiles_1k.png +- Assets/PolyHaven/Textures/rock/slate_floor_02_1k.png +- Assets/PolyHaven/Textures/rock/stacked_stone_wall_1k.png +- Assets/PolyHaven/Textures/rock/stone_floor_1k.png +- Assets/PolyHaven/Textures/rock/stone_pathway_1k.png +- Assets/PolyHaven/Textures/rock/stone_tiles_02_1k.png +- Assets/PolyHaven/Textures/rock/stone_tiles_03_1k.png +- Assets/PolyHaven/Textures/rock/stone_wall_02_1k.png +- Assets/PolyHaven/Textures/rock/stone_wall_04_1k.png +- Assets/PolyHaven/Textures/rock/stone_wall_1k.png +- Assets/PolyHaven/Textures/rock/terrain_red_01_1k.png +- Assets/PolyHaven/Textures/rock/tiger_rock_1k.png +- Assets/PolyHaven/Textures/rock/volcanic_herringbone_01_1k.png +- Assets/PolyHaven/Textures/rock/wood_inlaid_stone_wall_1k.png +- Assets/PolyHaven/Textures/rock/wood_stone_pathway_1k.png +- Assets/PolyHaven/Textures/rock/worn_rock_natural_01_1k.png +- Assets/PolyHaven/Textures/terrain/aerial_asphalt_01_1k.png +- Assets/PolyHaven/Textures/terrain/aerial_beach_01_1k.png +- Assets/PolyHaven/Textures/terrain/aerial_beach_02_1k.png +- Assets/PolyHaven/Textures/terrain/aerial_beach_03_1k.png +- Assets/PolyHaven/Textures/terrain/aerial_ground_rock_1k.png +- Assets/PolyHaven/Textures/terrain/aerial_mud_1_1k.png +- Assets/PolyHaven/Textures/terrain/aerial_sand_1k.png +- Assets/PolyHaven/Textures/terrain/aerial_wood_snips_1k.png +- Assets/PolyHaven/Textures/terrain/asphalt_02_1k.png +- Assets/PolyHaven/Textures/terrain/asphalt_snow_1k.png +- Assets/PolyHaven/Textures/terrain/baseball_playground_1k.png +- Assets/PolyHaven/Textures/terrain/brown_mud_02_1k.png +- Assets/PolyHaven/Textures/terrain/brown_mud_03_1k.png +- Assets/PolyHaven/Textures/terrain/brown_mud_1k.png +- Assets/PolyHaven/Textures/terrain/brown_mud_dry_1k.png +- Assets/PolyHaven/Textures/terrain/brown_mud_leaves_01_1k.png +- Assets/PolyHaven/Textures/terrain/brown_mud_rocks_01_1k.png +- Assets/PolyHaven/Textures/terrain/burned_ground_01_1k.png +- Assets/PolyHaven/Textures/terrain/coast_land_rocks_01_1k.png +- Assets/PolyHaven/Textures/terrain/coast_sand_01_1k.png +- Assets/PolyHaven/Textures/terrain/coast_sand_02_1k.png +- Assets/PolyHaven/Textures/terrain/coast_sand_04_1k.png +- Assets/PolyHaven/Textures/terrain/coral_mud_01_1k.png +- Assets/PolyHaven/Textures/terrain/cracked_red_ground_1k.png +- Assets/PolyHaven/Textures/terrain/damp_sand_1k.png +- Assets/PolyHaven/Textures/terrain/dirt_1k.png +- Assets/PolyHaven/Textures/terrain/dirt_aerial_02_1k.png +- Assets/PolyHaven/Textures/terrain/dirt_aerial_03_1k.png +- Assets/PolyHaven/Textures/terrain/dirt_floor_1k.png +- Assets/PolyHaven/Textures/terrain/dry_decay_leaves_1k.png +- Assets/PolyHaven/Textures/terrain/dry_ground_01_1k.png +- Assets/PolyHaven/Textures/terrain/dry_ground_rocks_1k.png +- Assets/PolyHaven/Textures/terrain/dry_mud_field_001_1k.png +- Assets/PolyHaven/Textures/terrain/excavated_soil_wall_1k.png +- Assets/PolyHaven/Textures/terrain/floor_pebbles_01_1k.png +- Assets/PolyHaven/Textures/terrain/flower_scattered_dirt_1k.png +- Assets/PolyHaven/Textures/terrain/forest_floor_1k.png +- Assets/PolyHaven/Textures/terrain/forest_leaves_02_1k.png +- Assets/PolyHaven/Textures/terrain/forest_leaves_03_1k.png +- Assets/PolyHaven/Textures/terrain/forest_leaves_04_1k.png +- Assets/PolyHaven/Textures/terrain/forrest_ground_01_1k.png +- Assets/PolyHaven/Textures/terrain/forrest_ground_03_1k.png +- Assets/PolyHaven/Textures/terrain/forrest_sand_01_1k.png +- Assets/PolyHaven/Textures/terrain/grass_path_2_1k.png +- Assets/PolyHaven/Textures/terrain/grass_path_3_1k.png +- Assets/PolyHaven/Textures/terrain/grassy_cobblestone_1k.png +- Assets/PolyHaven/Textures/terrain/gravel_ground_01_1k.png +- Assets/PolyHaven/Textures/terrain/gravelly_sand_1k.png +- Assets/PolyHaven/Textures/terrain/ground_grey_1k.png +- Assets/PolyHaven/Textures/terrain/leaves_forest_ground_1k.png +- Assets/PolyHaven/Textures/terrain/low_tide_rocks_1k.png +- Assets/PolyHaven/Textures/terrain/moon_01_1k.png +- Assets/PolyHaven/Textures/terrain/moon_02_1k.png +- Assets/PolyHaven/Textures/terrain/moon_03_1k.png +- Assets/PolyHaven/Textures/terrain/moon_04_1k.png +- Assets/PolyHaven/Textures/terrain/moon_dusted_01_1k.png +- Assets/PolyHaven/Textures/terrain/moon_dusted_02_1k.png +- Assets/PolyHaven/Textures/terrain/moon_dusted_03_1k.png +- Assets/PolyHaven/Textures/terrain/moon_dusted_04_1k.png +- Assets/PolyHaven/Textures/terrain/moon_dusted_05_1k.png +- Assets/PolyHaven/Textures/terrain/moon_flat_macro_01_1k.png +- Assets/PolyHaven/Textures/terrain/moon_flat_macro_02_1k.png +- Assets/PolyHaven/Textures/terrain/moon_footprints_01_1k.png +- Assets/PolyHaven/Textures/terrain/moon_footprints_02_1k.png +- Assets/PolyHaven/Textures/terrain/moon_macro_01_1k.png +- Assets/PolyHaven/Textures/terrain/moon_meteor_01_1k.png +- Assets/PolyHaven/Textures/terrain/moon_meteor_02_1k.png +- Assets/PolyHaven/Textures/terrain/moon_track_01_1k.png +- Assets/PolyHaven/Textures/terrain/moon_track_02_1k.png +- Assets/PolyHaven/Textures/terrain/moon_track_03_1k.png +- Assets/PolyHaven/Textures/terrain/moon_track_04_1k.png +- Assets/PolyHaven/Textures/terrain/mud_cracked_dry_03_1k.png +- Assets/PolyHaven/Textures/terrain/mud_cracked_dry_riverbed_002_1k.png +- Assets/PolyHaven/Textures/terrain/park_dirt_1k.png +- Assets/PolyHaven/Textures/terrain/park_sand_1k.png +- Assets/PolyHaven/Textures/terrain/pebble_ground_01_1k.png +- Assets/PolyHaven/Textures/terrain/playground_sand_1k.png +- Assets/PolyHaven/Textures/terrain/red_dirt_mud_01_1k.png +- Assets/PolyHaven/Textures/terrain/red_mud_stones_1k.png +- Assets/PolyHaven/Textures/terrain/red_sand_1k.png +- Assets/PolyHaven/Textures/terrain/rocks_ground_08_1k.png +- Assets/PolyHaven/Textures/terrain/rocks_ground_09_1k.png +- Assets/PolyHaven/Textures/terrain/rocky_terrain_02_1k.png +- Assets/PolyHaven/Textures/terrain/rocky_trail_02_1k.png +- Assets/PolyHaven/Textures/terrain/sand_01_1k.png +- Assets/PolyHaven/Textures/terrain/sand_02_1k.png +- Assets/PolyHaven/Textures/terrain/sand_03_1k.png +- Assets/PolyHaven/Textures/terrain/sandstone_cracks_1k.png +- Assets/PolyHaven/Textures/terrain/sandy_gravel_02_1k.png +- Assets/PolyHaven/Textures/terrain/sandy_gravel_1k.png +- Assets/PolyHaven/Textures/terrain/shell_floor_01_1k.png +- Assets/PolyHaven/Textures/terrain/snow_01_1k.png +- Assets/PolyHaven/Textures/terrain/snow_02_1k.png +- Assets/PolyHaven/Textures/terrain/snow_03_1k.png +- Assets/PolyHaven/Textures/terrain/snow_04_1k.png +- Assets/PolyHaven/Textures/terrain/snow_05_1k.png +- Assets/PolyHaven/Textures/terrain/snow_field_aerial_1k.png +- Assets/PolyHaven/Textures/terrain/snow_floor_1k.png +- Assets/PolyHaven/Textures/terrain/wood_chip_path_1k.png +- Assets/PolyHaven/Textures/tile/anti_skid_tiles_1k.png +- Assets/PolyHaven/Textures/tile/asphalt_01_1k.png +- Assets/PolyHaven/Textures/tile/asphalt_05_1k.png +- Assets/PolyHaven/Textures/tile/asphalt_06_1k.png +- Assets/PolyHaven/Textures/tile/asphalt_pit_lane_1k.png +- Assets/PolyHaven/Textures/tile/bicolour_gravel_1k.png +- Assets/PolyHaven/Textures/tile/brick_floor_02_1k.png +- Assets/PolyHaven/Textures/tile/brick_villa_floor_1k.png +- Assets/PolyHaven/Textures/tile/brown_floor_tiles_1k.png +- Assets/PolyHaven/Textures/tile/cobblestone_01_1k.png +- Assets/PolyHaven/Textures/tile/cobblestone_02_1k.png +- Assets/PolyHaven/Textures/tile/cobblestone_03_1k.png +- Assets/PolyHaven/Textures/tile/cobblestone_05_1k.png +- Assets/PolyHaven/Textures/tile/cobblestone_color_1k.png +- Assets/PolyHaven/Textures/tile/cobblestone_embedded_asphalt_1k.png +- Assets/PolyHaven/Textures/tile/cobblestone_floor_001_1k.png +- Assets/PolyHaven/Textures/tile/cobblestone_floor_01_1k.png +- Assets/PolyHaven/Textures/tile/cobblestone_floor_02_1k.png +- Assets/PolyHaven/Textures/tile/cobblestone_floor_03_1k.png +- Assets/PolyHaven/Textures/tile/cobblestone_floor_04_1k.png +- Assets/PolyHaven/Textures/tile/cobblestone_floor_05_1k.png +- Assets/PolyHaven/Textures/tile/cobblestone_floor_06_1k.png +- Assets/PolyHaven/Textures/tile/cobblestone_floor_07_1k.png +- Assets/PolyHaven/Textures/tile/cobblestone_floor_08_1k.png +- Assets/PolyHaven/Textures/tile/cobblestone_floor_13_1k.png +- Assets/PolyHaven/Textures/tile/cobblestone_large_01_1k.png +- Assets/PolyHaven/Textures/tile/cobblestone_pavement_1k.png +- Assets/PolyHaven/Textures/tile/cobblestone_square_1k.png +- Assets/PolyHaven/Textures/tile/concrete_floor_03_1k.png +- Assets/PolyHaven/Textures/tile/concrete_pavers_1k.png +- Assets/PolyHaven/Textures/tile/dark_wooden_planks_1k.png +- Assets/PolyHaven/Textures/tile/diagonal_parquet_1k.png +- Assets/PolyHaven/Textures/tile/dirty_tiles_1k.png +- Assets/PolyHaven/Textures/tile/floor_pattern_01_1k.png +- Assets/PolyHaven/Textures/tile/floor_pattern_02_1k.png +- Assets/PolyHaven/Textures/tile/floor_tiles_02_1k.png +- Assets/PolyHaven/Textures/tile/floor_tiles_04_1k.png +- Assets/PolyHaven/Textures/tile/floor_tiles_06_1k.png +- Assets/PolyHaven/Textures/tile/floor_tiles_08_1k.png +- Assets/PolyHaven/Textures/tile/floor_tiles_09_1k.png +- Assets/PolyHaven/Textures/tile/flower_scattered_asphalt_1k.png +- Assets/PolyHaven/Textures/tile/flower_scattered_gravel_1k.png +- Assets/PolyHaven/Textures/tile/granite_tile_02_1k.png +- Assets/PolyHaven/Textures/tile/granite_tile_04_1k.png +- Assets/PolyHaven/Textures/tile/granite_tile_1k.png +- Assets/PolyHaven/Textures/tile/gravel_1k.png +- Assets/PolyHaven/Textures/tile/gravel_floor_02_1k.png +- Assets/PolyHaven/Textures/tile/gravel_floor_03_1k.png +- Assets/PolyHaven/Textures/tile/grey_cartago_01_1k.png +- Assets/PolyHaven/Textures/tile/grey_cartago_02_1k.png +- Assets/PolyHaven/Textures/tile/grey_cartago_03_1k.png +- Assets/PolyHaven/Textures/tile/grey_tiles_1k.png +- Assets/PolyHaven/Textures/tile/herringbone_parquet_1k.png +- Assets/PolyHaven/Textures/tile/interior_tiles_1k.png +- Assets/PolyHaven/Textures/tile/laminate_floor_02_1k.png +- Assets/PolyHaven/Textures/tile/laminate_floor_03_1k.png +- Assets/PolyHaven/Textures/tile/laminate_floor_1k.png +- Assets/PolyHaven/Textures/tile/large_floor_tiles_02_1k.png +- Assets/PolyHaven/Textures/tile/large_grey_tiles_1k.png +- Assets/PolyHaven/Textures/tile/large_square_pattern_01_1k.png +- Assets/PolyHaven/Textures/tile/leafy_grass_1k.png +- Assets/PolyHaven/Textures/tile/linoleum_brown_1k.png +- Assets/PolyHaven/Textures/tile/long_white_tiles_1k.png +- Assets/PolyHaven/Textures/tile/marble_01_1k.png +- Assets/PolyHaven/Textures/tile/marble_mosaic_tiles_1k.png +- Assets/PolyHaven/Textures/tile/marble_tiles_1k.png +- Assets/PolyHaven/Textures/tile/mixed_stone_tiles_1k.png +- Assets/PolyHaven/Textures/tile/moss_wood_1k.png +- Assets/PolyHaven/Textures/tile/mossy_cobblestone_1k.png +- Assets/PolyHaven/Textures/tile/mud_forest_1k.png +- Assets/PolyHaven/Textures/tile/old_linoleum_flooring_01_1k.png +- Assets/PolyHaven/Textures/tile/old_planks_02_1k.png +- Assets/PolyHaven/Textures/tile/old_wood_floor_1k.png +- Assets/PolyHaven/Textures/tile/overgrown_concrete_pavers_1k.png +- Assets/PolyHaven/Textures/tile/patio_tiles_1k.png +- Assets/PolyHaven/Textures/tile/patterned_brick_floor_03_1k.png +- Assets/PolyHaven/Textures/tile/patterned_brick_floor_1k.png +- Assets/PolyHaven/Textures/tile/patterned_cobblestone_02_1k.png +- Assets/PolyHaven/Textures/tile/patterned_concrete_pavers_03_1k.png +- Assets/PolyHaven/Textures/tile/patterned_paving_02_1k.png +- Assets/PolyHaven/Textures/tile/patterned_paving_1k.png +- Assets/PolyHaven/Textures/tile/patterned_slate_tiles_1k.png +- Assets/PolyHaven/Textures/tile/pavement_01_1k.png +- Assets/PolyHaven/Textures/tile/pebble_cemented_floor_1k.png +- Assets/PolyHaven/Textures/tile/pebble_embedded_concrete_02_1k.png +- Assets/PolyHaven/Textures/tile/pebble_embedded_concrete_1k.png +- Assets/PolyHaven/Textures/tile/plank_flooring_02_1k.png +- Assets/PolyHaven/Textures/tile/plank_flooring_03_1k.png +- Assets/PolyHaven/Textures/tile/plank_flooring_04_1k.png +- Assets/PolyHaven/Textures/tile/plank_flooring_1k.png +- Assets/PolyHaven/Textures/tile/raked_dirt_1k.png +- Assets/PolyHaven/Textures/tile/rectangular_parquet_1k.png +- Assets/PolyHaven/Textures/tile/rectangular_paving_1k.png +- Assets/PolyHaven/Textures/tile/red_brick_pavers_1k.png +- Assets/PolyHaven/Textures/tile/rock_path_1k.png +- Assets/PolyHaven/Textures/tile/rocky_gravel_1k.png +- Assets/PolyHaven/Textures/tile/roots_1k.png +- Assets/PolyHaven/Textures/tile/rubber_tiles_1k.png +- Assets/PolyHaven/Textures/tile/rubberized_track_1k.png +- Assets/PolyHaven/Textures/tile/rubble_1k.png +- Assets/PolyHaven/Textures/tile/running_track_1k.png +- Assets/PolyHaven/Textures/tile/sandstone_blocks_08_1k.png +- Assets/PolyHaven/Textures/tile/slate_floor_03_1k.png +- Assets/PolyHaven/Textures/tile/slate_floor_1k.png +- Assets/PolyHaven/Textures/tile/sparse_grass_1k.png +- Assets/PolyHaven/Textures/tile/square_floor_1k.png +- Assets/PolyHaven/Textures/tile/square_floor_patern_01_1k.png +- Assets/PolyHaven/Textures/tile/square_tiles_02_1k.png +- Assets/PolyHaven/Textures/tile/square_tiles_03_1k.png +- Assets/PolyHaven/Textures/tile/square_tiles_1k.png +- Assets/PolyHaven/Textures/tile/stone_embedded_tiles_1k.png +- Assets/PolyHaven/Textures/tile/stone_pathway_02_1k.png +- Assets/PolyHaven/Textures/tile/stone_pavers_1k.png +- Assets/PolyHaven/Textures/tile/stone_tiles_1k.png +- Assets/PolyHaven/Textures/tile/stony_dirt_path_1k.png +- Assets/PolyHaven/Textures/tile/tatami_mat_1k.png +- Assets/PolyHaven/Textures/tile/tiled_floor_001_1k.png +- Assets/PolyHaven/Textures/tile/weathered_brown_planks_1k.png +- Assets/PolyHaven/Textures/tile/weathered_planks_1k.png +- Assets/PolyHaven/Textures/tile/winter_leaves_1k.png +- Assets/PolyHaven/Textures/tile/withered_grass_1k.png +- Assets/PolyHaven/Textures/tile/wood_cabinet_worn_long_1k.png +- Assets/PolyHaven/Textures/tile/wood_floor_1k.png +- Assets/PolyHaven/Textures/tile/wood_floor_deck_1k.png +- Assets/PolyHaven/Textures/tile/wood_floor_worn_1k.png +- Assets/PolyHaven/Textures/tile/wood_planks_1k.png +- Assets/PolyHaven/Textures/tile/wood_planks_grey_1k.png +- Assets/PolyHaven/Textures/tile/worn_asphalt_1k.png +- Assets/PolyHaven/Textures/tile/worn_tile_floor_1k.png +- Assets/PolyHaven/Textures/tile/yellow_flower_mulch_1k.png +- Assets/PolyHaven/Textures/wood/bark_bluegum_1k.png +- Assets/PolyHaven/Textures/wood/bark_brown_01_1k.png +- Assets/PolyHaven/Textures/wood/bark_brown_02_1k.png +- Assets/PolyHaven/Textures/wood/bark_platanus_1k.png +- Assets/PolyHaven/Textures/wood/bark_willow_02_1k.png +- Assets/PolyHaven/Textures/wood/bark_willow_1k.png +- Assets/PolyHaven/Textures/wood/beam_wall_01_1k.png +- Assets/PolyHaven/Textures/wood/black_painted_planks_1k.png +- Assets/PolyHaven/Textures/wood/blue_painted_planks_1k.png +- Assets/PolyHaven/Textures/wood/brown_planks_03_1k.png +- Assets/PolyHaven/Textures/wood/brown_planks_04_1k.png +- Assets/PolyHaven/Textures/wood/brown_planks_05_1k.png +- Assets/PolyHaven/Textures/wood/brown_planks_07_1k.png +- Assets/PolyHaven/Textures/wood/brown_planks_08_1k.png +- Assets/PolyHaven/Textures/wood/brown_planks_09_1k.png +- Assets/PolyHaven/Textures/wood/chinese_cedar_bark_1k.png +- Assets/PolyHaven/Textures/wood/chinese_hackberry_bark_1k.png +- Assets/PolyHaven/Textures/wood/dark_planks_1k.png +- Assets/PolyHaven/Textures/wood/dark_wood_1k.png +- Assets/PolyHaven/Textures/wood/distressed_painted_planks_1k.png +- Assets/PolyHaven/Textures/wood/fine_grained_wood_1k.png +- Assets/PolyHaven/Textures/wood/green_rough_planks_1k.png +- Assets/PolyHaven/Textures/wood/japanese_camphor_bark_1k.png +- Assets/PolyHaven/Textures/wood/japanese_cedar_bark_1k.png +- Assets/PolyHaven/Textures/wood/japanese_hackberry_bark_1k.png +- Assets/PolyHaven/Textures/wood/japanese_sycamore_1k.png +- Assets/PolyHaven/Textures/wood/japanese_zelkova_bark_1k.png +- Assets/PolyHaven/Textures/wood/kitchen_wood_1k.png +- Assets/PolyHaven/Textures/wood/knotted_pine_bark_1k.png +- Assets/PolyHaven/Textures/wood/medieval_wood_1k.png +- Assets/PolyHaven/Textures/wood/metasequoia_bark_1k.png +- Assets/PolyHaven/Textures/wood/oak_veneer_01_1k.png +- Assets/PolyHaven/Textures/wood/palm_bark_1k.png +- Assets/PolyHaven/Textures/wood/palm_tree_bark_1k.png +- Assets/PolyHaven/Textures/wood/pine_bark_1k.png +- Assets/PolyHaven/Textures/wood/planks_brown_10_1k.png +- Assets/PolyHaven/Textures/wood/plywood_1k.png +- Assets/PolyHaven/Textures/wood/raw_plank_wall_1k.png +- Assets/PolyHaven/Textures/wood/rosewood_veneer1_1k.png +- Assets/PolyHaven/Textures/wood/rough_pine_door_1k.png +- Assets/PolyHaven/Textures/wood/rough_wood_1k.png +- Assets/PolyHaven/Textures/wood/sakura_bark_1k.png +- Assets/PolyHaven/Textures/wood/synthetic_wood_1k.png +- Assets/PolyHaven/Textures/wood/tree_bark_03_1k.png +- Assets/PolyHaven/Textures/wood/trident_maple_bark_1k.png +- Assets/PolyHaven/Textures/wood/wood_chips_1k.png +- Assets/PolyHaven/Textures/wood/wood_peeling_paint_weathered_1k.png +- Assets/PolyHaven/Textures/wood/wood_plank_wall_1k.png +- Assets/PolyHaven/Textures/wood/wood_planks_dirt_1k.png +- Assets/PolyHaven/Textures/wood/wood_shutter_1k.png +- Assets/PolyHaven/Textures/wood/wood_table_001_1k.png +- Assets/PolyHaven/Textures/wood/wood_table_1k.png +- Assets/PolyHaven/Textures/wood/wood_table_worn_1k.png +- Assets/PolyHaven/Textures/wood/wood_trunk_wall_1k.png +- Assets/PolyHaven/Textures/wood/wooden_garage_door_1k.png +- Assets/PolyHaven/Textures/wood/wooden_gate_1k.png +- Assets/PolyHaven/Textures/wood/wooden_planks_1k.png +- Assets/PolyHaven/Textures/wood/wooden_rough_planks_1k.png +- Assets/PolyHaven/Textures/wood/worn_planks_1k.png diff --git a/source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/omnireset/config/ur5e_robotiq_2f85/resources/texture_paths_ood.yaml b/source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/omnireset/config/ur5e_robotiq_2f85/resources/texture_paths_ood.yaml new file mode 100644 index 00000000..d9e9ea65 --- /dev/null +++ b/source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/omnireset/config/ur5e_robotiq_2f85/resources/texture_paths_ood.yaml @@ -0,0 +1,172 @@ +# Copyright (c) 2024-2026, The UW Lab Project Developers. (https://github.com/uw-lab/UWLab/blob/main/CONTRIBUTORS.md). +# All Rights Reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +# Out-of-Distribution (OOD) Texture paths for domain randomization testing +# +# These textures are from AmbientCG (https://ambientcg.com) - a DIFFERENT source +# than the training textures (which use Poly Haven and NVIDIA vMaterials_2). +# +# Total: 136 textures across diverse categories +# License: CC0 (Public Domain) + +# AmbientCG textures (cloud - Backblaze B2) +cloud: + # Ice/Snow - Very different from typical indoor surfaces (10 textures) + - Assets/AmbientCG/Textures/Ice001.jpg + - Assets/AmbientCG/Textures/Ice002.jpg + - Assets/AmbientCG/Textures/Ice003.jpg + - Assets/AmbientCG/Textures/Ice004.jpg + - Assets/AmbientCG/Textures/Snow001.jpg + - Assets/AmbientCG/Textures/Snow002.jpg + - Assets/AmbientCG/Textures/Snow003.jpg + - Assets/AmbientCG/Textures/Snow004.jpg + - Assets/AmbientCG/Textures/Snow005.jpg + - Assets/AmbientCG/Textures/Snow006.jpg + + # Lava - Unusual volcanic textures (5 textures) + - Assets/AmbientCG/Textures/Lava001.jpg + - Assets/AmbientCG/Textures/Lava002.jpg + - Assets/AmbientCG/Textures/Lava003.jpg + - Assets/AmbientCG/Textures/Lava004.jpg + - Assets/AmbientCG/Textures/Lava005.jpg + + # Grass - Outdoor natural surfaces (5 textures) + - Assets/AmbientCG/Textures/Grass001.jpg + - Assets/AmbientCG/Textures/Grass002.jpg + - Assets/AmbientCG/Textures/Grass003.jpg + - Assets/AmbientCG/Textures/Grass004.jpg + - Assets/AmbientCG/Textures/Grass005.jpg + + # Ground - Various terrain textures (22 textures) + - Assets/AmbientCG/Textures/Ground001.jpg + - Assets/AmbientCG/Textures/Ground002.jpg + - Assets/AmbientCG/Textures/Ground003.jpg + - Assets/AmbientCG/Textures/Ground004.jpg + - Assets/AmbientCG/Textures/Ground005.jpg + - Assets/AmbientCG/Textures/Ground007.jpg + - Assets/AmbientCG/Textures/Ground008.jpg + - Assets/AmbientCG/Textures/Ground010.jpg + - Assets/AmbientCG/Textures/Ground012.jpg + - Assets/AmbientCG/Textures/Ground015.jpg + - Assets/AmbientCG/Textures/Ground020.jpg + - Assets/AmbientCG/Textures/Ground022.jpg + - Assets/AmbientCG/Textures/Ground025.jpg + - Assets/AmbientCG/Textures/Ground030.jpg + - Assets/AmbientCG/Textures/Ground035.jpg + - Assets/AmbientCG/Textures/Ground037.jpg + - Assets/AmbientCG/Textures/Ground040.jpg + - Assets/AmbientCG/Textures/Ground042.jpg + - Assets/AmbientCG/Textures/Ground044.jpg + - Assets/AmbientCG/Textures/Ground048.jpg + - Assets/AmbientCG/Textures/Ground050.jpg + - Assets/AmbientCG/Textures/Ground054.jpg + + # Rock - Different rock patterns from AmbientCG (20 textures) + - Assets/AmbientCG/Textures/Rock001.jpg + - Assets/AmbientCG/Textures/Rock002.jpg + - Assets/AmbientCG/Textures/Rock003.jpg + - Assets/AmbientCG/Textures/Rock004.jpg + - Assets/AmbientCG/Textures/Rock005.jpg + - Assets/AmbientCG/Textures/Rock006.jpg + - Assets/AmbientCG/Textures/Rock007.jpg + - Assets/AmbientCG/Textures/Rock008.jpg + - Assets/AmbientCG/Textures/Rock010.jpg + - Assets/AmbientCG/Textures/Rock012.jpg + - Assets/AmbientCG/Textures/Rock014.jpg + - Assets/AmbientCG/Textures/Rock016.jpg + - Assets/AmbientCG/Textures/Rock018.jpg + - Assets/AmbientCG/Textures/Rock020.jpg + - Assets/AmbientCG/Textures/Rock022.jpg + - Assets/AmbientCG/Textures/Rock024.jpg + - Assets/AmbientCG/Textures/Rock026.jpg + - Assets/AmbientCG/Textures/Rock028.jpg + - Assets/AmbientCG/Textures/Rock030.jpg + - Assets/AmbientCG/Textures/Rock032.jpg + + # Fabric - Different fabric patterns (8 textures) + - Assets/AmbientCG/Textures/Fabric001.jpg + - Assets/AmbientCG/Textures/Fabric002.jpg + - Assets/AmbientCG/Textures/Fabric003.jpg + - Assets/AmbientCG/Textures/Fabric004.jpg + - Assets/AmbientCG/Textures/Fabric005.jpg + - Assets/AmbientCG/Textures/Fabric006.jpg + - Assets/AmbientCG/Textures/Fabric007.jpg + - Assets/AmbientCG/Textures/Fabric008.jpg + + # Metal - Different metal finishes (25 textures) + - Assets/AmbientCG/Textures/Metal001.jpg + - Assets/AmbientCG/Textures/Metal002.jpg + - Assets/AmbientCG/Textures/Metal003.jpg + - Assets/AmbientCG/Textures/Metal004.jpg + - Assets/AmbientCG/Textures/Metal005.jpg + - Assets/AmbientCG/Textures/Metal006.jpg + - Assets/AmbientCG/Textures/Metal007.jpg + - Assets/AmbientCG/Textures/Metal008.jpg + - Assets/AmbientCG/Textures/Metal009.jpg + - Assets/AmbientCG/Textures/Metal010.jpg + - Assets/AmbientCG/Textures/Metal012.jpg + - Assets/AmbientCG/Textures/Metal014.jpg + - Assets/AmbientCG/Textures/Metal016.jpg + - Assets/AmbientCG/Textures/Metal018.jpg + - Assets/AmbientCG/Textures/Metal020.jpg + - Assets/AmbientCG/Textures/Metal022.jpg + - Assets/AmbientCG/Textures/Metal024.jpg + - Assets/AmbientCG/Textures/Metal026.jpg + - Assets/AmbientCG/Textures/Metal028.jpg + - Assets/AmbientCG/Textures/Metal030.jpg + - Assets/AmbientCG/Textures/Metal032.jpg + - Assets/AmbientCG/Textures/Metal034.jpg + - Assets/AmbientCG/Textures/Metal036.jpg + - Assets/AmbientCG/Textures/Metal038.jpg + - Assets/AmbientCG/Textures/Metal040.jpg + + # Wood - Different wood types (10 textures) + - Assets/AmbientCG/Textures/Wood001.jpg + - Assets/AmbientCG/Textures/Wood002.jpg + - Assets/AmbientCG/Textures/Wood003.jpg + - Assets/AmbientCG/Textures/Wood004.jpg + - Assets/AmbientCG/Textures/Wood005.jpg + - Assets/AmbientCG/Textures/Wood006.jpg + - Assets/AmbientCG/Textures/Wood007.jpg + - Assets/AmbientCG/Textures/Wood008.jpg + - Assets/AmbientCG/Textures/Wood009.jpg + - Assets/AmbientCG/Textures/Wood010.jpg + + # Leather - Different leather patterns (10 textures) + - Assets/AmbientCG/Textures/Leather001.jpg + - Assets/AmbientCG/Textures/Leather002.jpg + - Assets/AmbientCG/Textures/Leather003.jpg + - Assets/AmbientCG/Textures/Leather004.jpg + - Assets/AmbientCG/Textures/Leather005.jpg + - Assets/AmbientCG/Textures/Leather006.jpg + - Assets/AmbientCG/Textures/Leather007.jpg + - Assets/AmbientCG/Textures/Leather008.jpg + - Assets/AmbientCG/Textures/Leather009.jpg + - Assets/AmbientCG/Textures/Leather010.jpg + + # Tiles - Different tile patterns (20 textures) + - Assets/AmbientCG/Textures/Tiles001.jpg + - Assets/AmbientCG/Textures/Tiles002.jpg + - Assets/AmbientCG/Textures/Tiles003.jpg + - Assets/AmbientCG/Textures/Tiles004.jpg + - Assets/AmbientCG/Textures/Tiles005.jpg + - Assets/AmbientCG/Textures/Tiles006.jpg + - Assets/AmbientCG/Textures/Tiles007.jpg + - Assets/AmbientCG/Textures/Tiles008.jpg + - Assets/AmbientCG/Textures/Tiles009.jpg + - Assets/AmbientCG/Textures/Tiles010.jpg + - Assets/AmbientCG/Textures/Tiles015.jpg + - Assets/AmbientCG/Textures/Tiles020.jpg + - Assets/AmbientCG/Textures/Tiles025.jpg + - Assets/AmbientCG/Textures/Tiles030.jpg + - Assets/AmbientCG/Textures/Tiles035.jpg + - Assets/AmbientCG/Textures/Tiles040.jpg + - Assets/AmbientCG/Textures/Tiles045.jpg + - Assets/AmbientCG/Textures/Tiles050.jpg + - Assets/AmbientCG/Textures/Tiles055.jpg + - Assets/AmbientCG/Textures/Tiles060.jpg + + # Unusual/Fun - Holiday textures (1 texture) + - Assets/AmbientCG/Textures/ChristmasTreeOrnament001.jpg diff --git a/source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/reset_states/config/ur5e_robotiq_2f85/rl_state_cfg.py b/source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/omnireset/config/ur5e_robotiq_2f85/rl_state_cfg.py similarity index 75% rename from source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/reset_states/config/ur5e_robotiq_2f85/rl_state_cfg.py rename to source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/omnireset/config/ur5e_robotiq_2f85/rl_state_cfg.py index 0818cab7..e9dbf863 100644 --- a/source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/reset_states/config/ur5e_robotiq_2f85/rl_state_cfg.py +++ b/source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/omnireset/config/ur5e_robotiq_2f85/rl_state_cfg.py @@ -10,6 +10,7 @@ import isaaclab.sim as sim_utils from isaaclab.assets import AssetBaseCfg, RigidObjectCfg from isaaclab.envs import ManagerBasedRLEnvCfg, ViewerCfg +from isaaclab.managers import CurriculumTermCfg as CurrTerm from isaaclab.managers import EventTermCfg as EventTerm from isaaclab.managers import ObservationGroupCfg as ObsGroup from isaaclab.managers import ObservationTermCfg as ObsTerm @@ -21,14 +22,11 @@ from isaaclab.utils.assets import ISAAC_NUCLEUS_DIR from uwlab_assets import UWLAB_CLOUD_ASSETS_DIR -from uwlab_assets.robots.ur5e_robotiq_gripper import ( - EXPLICIT_UR5E_ROBOTIQ_2F85, - IMPLICIT_UR5E_ROBOTIQ_2F85, - Ur5eRobotiq2f85RelativeJointPositionAction, -) +from uwlab_assets.robots.ur5e_robotiq_gripper import EXPLICIT_UR5E_ROBOTIQ_2F85, IMPLICIT_UR5E_ROBOTIQ_2F85 -from uwlab_tasks.manager_based.manipulation.reset_states.config.ur5e_robotiq_2f85.actions import ( +from uwlab_tasks.manager_based.manipulation.omnireset.config.ur5e_robotiq_2f85.actions import ( Ur5eRobotiq2f85RelativeOSCAction, + Ur5eRobotiq2f85RelativeOSCEvalAction, ) from ... import mdp as task_mdp @@ -38,7 +36,7 @@ class RlStateSceneCfg(InteractiveSceneCfg): """Scene configuration for RL state environment.""" - robot = EXPLICIT_UR5E_ROBOTIQ_2F85.replace(prim_path="{ENV_REGEX_NS}/Robot") + robot = IMPLICIT_UR5E_ROBOTIQ_2F85.replace(prim_path="{ENV_REGEX_NS}/Robot") insertive_object: RigidObjectCfg = RigidObjectCfg( prim_path="{ENV_REGEX_NS}/InsertiveObject", @@ -65,10 +63,8 @@ class RlStateSceneCfg(InteractiveSceneCfg): solver_position_iteration_count=4, solver_velocity_iteration_count=0, disable_gravity=False, - # receptive object does not move kinematic_enabled=True, ), - # since kinematic_enabled=True, mass does not matter mass_props=sim_utils.MassPropertiesCfg(mass=0.5), ), init_state=RigidObjectCfg.InitialStateCfg(pos=(0.0, 0.0, 0.0), rot=(1.0, 0.0, 0.0, 0.0)), @@ -102,7 +98,7 @@ class RlStateSceneCfg(InteractiveSceneCfg): sky_light = AssetBaseCfg( prim_path="/World/skyLight", spawn=sim_utils.DomeLightCfg( - intensity=10000.0, + intensity=1000.0, texture_file=f"{ISAAC_NUCLEUS_DIR}/Materials/Textures/Skies/PolyHaven/kloofendal_43d_clear_puresky_4k.hdr", ), ) @@ -110,7 +106,12 @@ class RlStateSceneCfg(InteractiveSceneCfg): @configclass class BaseEventCfg: - """Configuration for events.""" + """Shared events: material/mass randomization, gripper gains, scene reset. + + Does NOT include arm sysid or OSC gain randomization -- those differ + between finetune (curriculum-ramped) and eval (fixed) stages. See + ``FinetuneEventCfg`` and ``FinetuneEvalEventCfg``. + """ # mode: startup (randomize dynamics) robot_material = EventTerm( @@ -126,7 +127,6 @@ class BaseEventCfg: }, ) - # use large friction to avoid slipping insertive_object_material = EventTerm( func=task_mdp.randomize_rigid_body_material, # type: ignore mode="startup", @@ -140,13 +140,12 @@ class BaseEventCfg: }, ) - # use large friction to avoid slipping receptive_object_material = EventTerm( func=task_mdp.randomize_rigid_body_material, # type: ignore mode="startup", params={ - "static_friction_range": (1.0, 2.0), - "dynamic_friction_range": (0.9, 1.9), + "static_friction_range": (0.2, 0.6), + "dynamic_friction_range": (0.15, 0.5), "restitution_range": (0.0, 0.0), "num_buckets": 256, "asset_cfg": SceneEntityCfg("receptive_object"), @@ -216,18 +215,6 @@ class BaseEventCfg: }, ) - randomize_robot_joint_parameters = EventTerm( - func=task_mdp.randomize_joint_parameters, - mode="reset", - params={ - "asset_cfg": SceneEntityCfg("robot", joint_names=["shoulder.*", "elbow.*", "wrist.*", "finger_joint"]), - "friction_distribution_params": (0.25, 4.0), - "armature_distribution_params": (0.25, 4.0), - "operation": "scale", - "distribution": "log_uniform", - }, - ) - randomize_gripper_actuator_parameters = EventTerm( func=task_mdp.randomize_actuator_gains, mode="reset", @@ -246,17 +233,18 @@ class BaseEventCfg: @configclass class TrainEventCfg(BaseEventCfg): - """Configuration for training events.""" + """Training events: material/mass randomization + 4-path resets. No sysid or OSC gain randomization.""" reset_from_reset_states = EventTerm( func=task_mdp.MultiResetManager, mode="reset", params={ - "base_paths": [ - f"{UWLAB_CLOUD_ASSETS_DIR}/Datasets/Resets/ObjectPairs/ObjectAnywhereEEAnywhere", - f"{UWLAB_CLOUD_ASSETS_DIR}/Datasets/Resets/ObjectPairs/ObjectRestingEEGrasped", - f"{UWLAB_CLOUD_ASSETS_DIR}/Datasets/Resets/ObjectPairs/ObjectAnywhereEEGrasped", - f"{UWLAB_CLOUD_ASSETS_DIR}/Datasets/Resets/ObjectPairs/ObjectPartiallyAssembledEEGrasped", + "dataset_dir": f"{UWLAB_CLOUD_ASSETS_DIR}/Datasets/OmniReset", + "reset_types": [ + "ObjectAnywhereEEAnywhere", + "ObjectRestingEEGrasped", + "ObjectAnywhereEEGrasped", + "ObjectPartiallyAssembledEEGrasped", ], "probs": [0.25, 0.25, 0.25, 0.25], "success": "env.reward_manager.get_term_cfg('progress_context').func.success", @@ -265,22 +253,102 @@ class TrainEventCfg(BaseEventCfg): @configclass -class EvalEventCfg(BaseEventCfg): - """Configuration for evaluation events.""" +class TrainEvalEventCfg(BaseEventCfg): + """Eval after Stage 1: no sysid/OSC gain randomization, 1-path resets.""" reset_from_reset_states = EventTerm( func=task_mdp.MultiResetManager, mode="reset", params={ - "base_paths": [ - f"{UWLAB_CLOUD_ASSETS_DIR}/Datasets/Resets/ObjectPairs/ObjectAnywhereEEAnywhere", + "dataset_dir": f"{UWLAB_CLOUD_ASSETS_DIR}/Datasets/OmniReset", + "reset_types": ["ObjectAnywhereEEAnywhere"], + "probs": [1.0], + "success": "env.reward_manager.get_term_cfg('progress_context').func.success", + }, + ) + + +@configclass +class FinetuneEvalEventCfg(BaseEventCfg): + """Eval after Stage 2: fixed sysid + OSC gains (scale_progress=1) + 1-path resets.""" + + randomize_arm_sysid = EventTerm( + func=task_mdp.randomize_arm_from_sysid_fixed, + mode="reset", + params={ + "asset_cfg": SceneEntityCfg("robot"), + "joint_names": [ + "shoulder_pan_joint", + "shoulder_lift_joint", + "elbow_joint", + "wrist_1_joint", + "wrist_2_joint", + "wrist_3_joint", ], + "actuator_name": "arm", + "scale_range": (0.8, 1.2), + "delay_range": (0, 1), + }, + ) + + randomize_osc_gains = EventTerm( + func=task_mdp.randomize_rel_cartesian_osc_gains_fixed, + mode="reset", + params={ + "action_name": "arm", + "scale_range": (0.8, 1.2), + }, + ) + + reset_from_reset_states = EventTerm( + func=task_mdp.MultiResetManager, + mode="reset", + params={ + "dataset_dir": f"{UWLAB_CLOUD_ASSETS_DIR}/Datasets/OmniReset", + "reset_types": ["ObjectAnywhereEEAnywhere"], "probs": [1.0], "success": "env.reward_manager.get_term_cfg('progress_context').func.success", }, ) +@configclass +class FinetuneEventCfg(TrainEventCfg): + """Finetune events: curriculum-ramped sysid + OSC gains + 4-path resets. Explicit actuator from start.""" + + randomize_arm_sysid = EventTerm( + func=task_mdp.randomize_arm_from_sysid, + mode="reset", + params={ + "asset_cfg": SceneEntityCfg("robot"), + "joint_names": [ + "shoulder_pan_joint", + "shoulder_lift_joint", + "elbow_joint", + "wrist_1_joint", + "wrist_2_joint", + "wrist_3_joint", + ], + "actuator_name": "arm", + "scale_range": (0.8, 1.2), + "delay_range": (0, 1), + "initial_scale_progress": 0.0, + }, + ) + + randomize_osc_gains = EventTerm( + func=task_mdp.randomize_rel_cartesian_osc_gains, + mode="reset", + params={ + "action_name": "arm", + "scale_range": (0.8, 1.2), + "terminal_kp": (1000.0, 1000.0, 1000.0, 50.0, 50.0, 50.0), + "terminal_damping_ratio": (1.0, 1.0, 1.0, 1.0, 1.0, 1.0), + "initial_scale_progress": 0.0, + }, + ) + + @configclass class CommandsCfg: """Command specifications for the MDP.""" @@ -288,8 +356,6 @@ class CommandsCfg: task_command = task_mdp.TaskCommandCfg( asset_cfg=SceneEntityCfg("robot", body_names="body"), resampling_time_range=(1e6, 1e6), - success_position_threshold=0.005, - success_orientation_threshold=0.025, insertive_asset_cfg=SceneEntityCfg("insertive_object"), receptive_asset_cfg=SceneEntityCfg("receptive_object"), ) @@ -308,22 +374,19 @@ class PolicyCfg(ObsGroup): joint_pos = ObsTerm(func=task_mdp.joint_pos) end_effector_pose = ObsTerm( - func=task_mdp.target_asset_pose_in_root_asset_frame_with_metadata, + func=task_mdp.target_asset_pose_in_root_asset_frame, params={ - "target_asset_cfg": SceneEntityCfg("robot", body_names="robotiq_base_link"), + "target_asset_cfg": SceneEntityCfg("robot", body_names="wrist_3_link"), "root_asset_cfg": SceneEntityCfg("robot"), - "target_asset_offset_metadata_key": "gripper_offset", - "root_asset_offset_metadata_key": "offset", "rotation_repr": "axis_angle", }, ) insertive_asset_pose = ObsTerm( - func=task_mdp.target_asset_pose_in_root_asset_frame_with_metadata, + func=task_mdp.target_asset_pose_in_root_asset_frame, params={ "target_asset_cfg": SceneEntityCfg("insertive_object"), - "root_asset_cfg": SceneEntityCfg("robot", body_names="robotiq_base_link"), - "root_asset_offset_metadata_key": "gripper_offset", + "root_asset_cfg": SceneEntityCfg("robot", body_names="wrist_3_link"), "rotation_repr": "axis_angle", }, ) @@ -332,7 +395,7 @@ class PolicyCfg(ObsGroup): func=task_mdp.target_asset_pose_in_root_asset_frame, params={ "target_asset_cfg": SceneEntityCfg("receptive_object"), - "root_asset_cfg": SceneEntityCfg("robot", body_names="robotiq_base_link"), + "root_asset_cfg": SceneEntityCfg("robot", body_names="wrist_3_link"), "rotation_repr": "axis_angle", }, ) @@ -347,7 +410,7 @@ class PolicyCfg(ObsGroup): ) def __post_init__(self): - self.enable_corruption = False + self.enable_corruption = True self.concatenate_terms = True self.history_length = 5 @@ -360,22 +423,19 @@ class CriticCfg(ObsGroup): joint_pos = ObsTerm(func=task_mdp.joint_pos) end_effector_pose = ObsTerm( - func=task_mdp.target_asset_pose_in_root_asset_frame_with_metadata, + func=task_mdp.target_asset_pose_in_root_asset_frame, params={ - "target_asset_cfg": SceneEntityCfg("robot", body_names="robotiq_base_link"), + "target_asset_cfg": SceneEntityCfg("robot", body_names="wrist_3_link"), "root_asset_cfg": SceneEntityCfg("robot"), - "target_asset_offset_metadata_key": "gripper_offset", - "root_asset_offset_metadata_key": "offset", "rotation_repr": "axis_angle", }, ) insertive_asset_pose = ObsTerm( - func=task_mdp.target_asset_pose_in_root_asset_frame_with_metadata, + func=task_mdp.target_asset_pose_in_root_asset_frame, params={ "target_asset_cfg": SceneEntityCfg("insertive_object"), - "root_asset_cfg": SceneEntityCfg("robot", body_names="robotiq_base_link"), - "root_asset_offset_metadata_key": "gripper_offset", + "root_asset_cfg": SceneEntityCfg("robot", body_names="wrist_3_link"), "rotation_repr": "axis_angle", }, ) @@ -384,7 +444,7 @@ class CriticCfg(ObsGroup): func=task_mdp.target_asset_pose_in_root_asset_frame, params={ "target_asset_cfg": SceneEntityCfg("receptive_object"), - "root_asset_cfg": SceneEntityCfg("robot", body_names="robotiq_base_link"), + "root_asset_cfg": SceneEntityCfg("robot", body_names="wrist_3_link"), "rotation_repr": "axis_angle", }, ) @@ -406,7 +466,7 @@ class CriticCfg(ObsGroup): end_effector_vel_lin_ang_b = ObsTerm( func=task_mdp.asset_link_velocity_in_root_asset_frame, params={ - "target_asset_cfg": SceneEntityCfg("robot", body_names="robotiq_base_link"), + "target_asset_cfg": SceneEntityCfg("robot", body_names="wrist_3_link"), "root_asset_cfg": SceneEntityCfg("robot"), }, ) @@ -466,11 +526,11 @@ class RewardsCfg: action_magnitude = RewTerm(func=task_mdp.action_l2_clamped, weight=-1e-4) - action_rate = RewTerm(func=task_mdp.action_rate_l2_clamped, weight=-1e-4) + action_rate = RewTerm(func=task_mdp.action_rate_l2_clamped, weight=-1e-3) joint_vel = RewTerm( func=task_mdp.joint_vel_l2_clamped, - weight=-1e-3, + weight=-1e-2, params={"asset_cfg": SceneEntityCfg("robot", joint_names=["shoulder.*", "elbow.*", "wrist.*"])}, ) @@ -512,6 +572,47 @@ class TerminationsCfg: abnormal_robot = DoneTerm(func=task_mdp.abnormal_robot_state) +@configclass +class FinetuneCurriculumsCfg: + """Finetune curriculum: ADR sysid + action scale ramp. No actuator swap (explicit from start).""" + + adr_sysid = CurrTerm( + func=task_mdp.adr_sysid_curriculum, + params={ + "event_term_names": ["randomize_arm_sysid", "randomize_osc_gains"], + "reset_event_name": "reset_from_reset_states", + "success_threshold_up": 0.95, + "success_threshold_down": 0.9, + "delta": 0.01, + "update_every_n_steps": 200, + "initial_scale_progress": 0.0, + "warmup_success_threshold": 0.95, + }, + ) + + action_scale = CurrTerm( + func=task_mdp.action_scale_curriculum, + params={ + "action_name": "arm", + "reset_event_name": "reset_from_reset_states", + "initial_scales": [0.02, 0.02, 0.02, 0.02, 0.02, 0.2], + "target_scales": [0.01, 0.01, 0.002, 0.02, 0.02, 0.2], + "success_threshold_up": 0.95, + "success_threshold_down": 0.9, + "delta": 0.01, + "update_every_n_steps": 200, + "initial_progress": 0.0, + }, + ) + + +@configclass +class NoCurriculumsCfg: + """No curriculum (eval / data-collection with fixed 0.8--1.2 randomization).""" + + pass + + def make_insertive_object(usd_path: str): return RigidObjectCfg( prim_path="{ENV_REGEX_NS}/InsertiveObject", @@ -581,6 +682,7 @@ class Ur5eRobotiq2f85RlStateCfg(ManagerBasedRLEnvCfg): actions: Ur5eRobotiq2f85RelativeOSCAction = Ur5eRobotiq2f85RelativeOSCAction() rewards: RewardsCfg = RewardsCfg() terminations: TerminationsCfg = TerminationsCfg() + curriculum: NoCurriculumsCfg = NoCurriculumsCfg() events: BaseEventCfg = MISSING commands: CommandsCfg = CommandsCfg() viewer: ViewerCfg = ViewerCfg(eye=(2.0, 0.0, 0.75), origin_type="world", env_index=0, asset_name="robot") @@ -613,99 +715,45 @@ def __post_init__(self): self.sim.render.enable_dl_denoiser = True -# Training configurations +# Training configuration (Stage 1: no curriculum, implicit actuator, no sysid DR) @configclass class Ur5eRobotiq2f85RelCartesianOSCTrainCfg(Ur5eRobotiq2f85RlStateCfg): - """Training configuration for Relative Cartesian OSC action space.""" events: TrainEventCfg = TrainEventCfg() actions: Ur5eRobotiq2f85RelativeOSCAction = Ur5eRobotiq2f85RelativeOSCAction() - def __post_init__(self): - super().__post_init__() - self.scene.robot = EXPLICIT_UR5E_ROBOTIQ_2F85.replace(prim_path="{ENV_REGEX_NS}/Robot") - - self.events.randomize_robot_actuator_parameters = EventTerm( - func=task_mdp.randomize_operational_space_controller_gains, - mode="reset", - params={ - "action_name": "arm", - "stiffness_distribution_params": (0.7, 1.3), - "damping_distribution_params": (0.9, 1.1), - "operation": "scale", - "distribution": "uniform", - }, - ) - +# Finetune configuration (Stage 2: explicit actuator, curriculum ramps sysid + gains + scales) @configclass -class Ur5eRobotiq2f85RelJointPosTrainCfg(Ur5eRobotiq2f85RlStateCfg): - """Training configuration for Relative Joint Position action space.""" +class Ur5eRobotiq2f85RelCartesianOSCFinetuneCfg(Ur5eRobotiq2f85RlStateCfg): + """Finetune config: loads converged Stage 1 policy, explicit actuator from start, curriculum ramps DR.""" - events: TrainEventCfg = TrainEventCfg() - actions: Ur5eRobotiq2f85RelativeJointPositionAction = Ur5eRobotiq2f85RelativeJointPositionAction() + events: FinetuneEventCfg = FinetuneEventCfg() + actions: Ur5eRobotiq2f85RelativeOSCAction = Ur5eRobotiq2f85RelativeOSCAction() + curriculum: FinetuneCurriculumsCfg = FinetuneCurriculumsCfg() def __post_init__(self): super().__post_init__() - self.scene.robot = IMPLICIT_UR5E_ROBOTIQ_2F85.replace(prim_path="{ENV_REGEX_NS}/Robot") - - self.events.randomize_robot_actuator_parameters = EventTerm( - func=task_mdp.randomize_actuator_gains, - mode="reset", - params={ - "asset_cfg": SceneEntityCfg("robot", joint_names=["shoulder.*", "elbow.*", "wrist.*", "finger_joint"]), - "stiffness_distribution_params": (0.5, 2.0), - "damping_distribution_params": (0.5, 2.0), - "operation": "scale", - "distribution": "log_uniform", - }, - ) + self.scene.robot = EXPLICIT_UR5E_ROBOTIQ_2F85.replace(prim_path="{ENV_REGEX_NS}/Robot") -# Evaluation configurations +# Evaluation configuration (after Stage 1: implicit actuator, soft gains, no sysid DR) @configclass class Ur5eRobotiq2f85RelCartesianOSCEvalCfg(Ur5eRobotiq2f85RlStateCfg): - """Evaluation configuration for Relative Cartesian OSC action space.""" + """Eval after Stage 1: implicit actuator, soft gains, large action scale, no sysid DR.""" - events: EvalEventCfg = EvalEventCfg() + events: TrainEvalEventCfg = TrainEvalEventCfg() actions: Ur5eRobotiq2f85RelativeOSCAction = Ur5eRobotiq2f85RelativeOSCAction() - def __post_init__(self): - super().__post_init__() - self.scene.robot = EXPLICIT_UR5E_ROBOTIQ_2F85.replace(prim_path="{ENV_REGEX_NS}/Robot") - - self.events.randomize_robot_actuator_parameters = EventTerm( - func=task_mdp.randomize_operational_space_controller_gains, - mode="reset", - params={ - "action_name": "arm", - "stiffness_distribution_params": (0.7, 1.3), - "damping_distribution_params": (0.9, 1.1), - "operation": "scale", - "distribution": "uniform", - }, - ) - +# Evaluation configuration (after Stage 2: explicit actuator, stiff gains, fixed sysid) @configclass -class Ur5eRobotiq2f85RelJointPosEvalCfg(Ur5eRobotiq2f85RlStateCfg): - """Evaluation configuration for Relative Joint Position action space.""" +class Ur5eRobotiq2f85RelCartesianOSCFinetuneEvalCfg(Ur5eRobotiq2f85RlStateCfg): + """Eval after Stage 2: explicit actuator, stiff gains, small action scale, fixed sysid + OSC gains.""" - events: EvalEventCfg = EvalEventCfg() - actions: Ur5eRobotiq2f85RelativeJointPositionAction = Ur5eRobotiq2f85RelativeJointPositionAction() + events: FinetuneEvalEventCfg = FinetuneEvalEventCfg() + actions: Ur5eRobotiq2f85RelativeOSCEvalAction = Ur5eRobotiq2f85RelativeOSCEvalAction() def __post_init__(self): super().__post_init__() - self.scene.robot = IMPLICIT_UR5E_ROBOTIQ_2F85.replace(prim_path="{ENV_REGEX_NS}/Robot") - - self.events.randomize_robot_actuator_parameters = EventTerm( - func=task_mdp.randomize_actuator_gains, - mode="reset", - params={ - "asset_cfg": SceneEntityCfg("robot", joint_names=["shoulder.*", "elbow.*", "wrist.*", "finger_joint"]), - "stiffness_distribution_params": (0.5, 2.0), - "damping_distribution_params": (0.5, 2.0), - "operation": "scale", - "distribution": "log_uniform", - }, - ) + self.scene.robot = EXPLICIT_UR5E_ROBOTIQ_2F85.replace(prim_path="{ENV_REGEX_NS}/Robot") diff --git a/source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/omnireset/config/ur5e_robotiq_2f85/sysid_cfg.py b/source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/omnireset/config/ur5e_robotiq_2f85/sysid_cfg.py new file mode 100644 index 00000000..64c68c68 --- /dev/null +++ b/source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/omnireset/config/ur5e_robotiq_2f85/sysid_cfg.py @@ -0,0 +1,91 @@ +# Copyright (c) 2024-2026, The UW Lab Project Developers. (https://github.com/uw-lab/UWLab/blob/main/CONTRIBUTORS.md). +# All Rights Reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +# Copyright (c) 2024-2025, The UW Lab Project Developers. +# All Rights Reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""Scene and manager-based env config for UR5e system identification (CMA-ES). + +Reuses the same robot and RelCartesianOSCAction as RL. Sysid scripts use the +registered gym env so the in-env OSC is used (no duplicate controller). +""" + +from __future__ import annotations + +import isaaclab.sim as sim_utils +from isaaclab.assets import AssetBaseCfg +from isaaclab.envs import ManagerBasedRLEnvCfg +from isaaclab.managers import ObservationGroupCfg as ObsGroup +from isaaclab.managers import ObservationTermCfg as ObsTerm +from isaaclab.managers import TerminationTermCfg as DoneTerm +from isaaclab.scene import InteractiveSceneCfg +from isaaclab.utils import configclass + +from uwlab_assets.robots.ur5e_robotiq_gripper import EXPLICIT_UR5E_ROBOTIQ_2F85 + +from ... import mdp as task_mdp +from .actions import Ur5eRobotiq2f85SysidOSCAction + +# Default simulation timestep for sysid (500 Hz, matches real robot control rate) +SYSID_SIM_DT = 1.0 / 500.0 + + +@configclass +class SysidSceneCfg(InteractiveSceneCfg): + """Scene for system identification: robot + ground + light, no objects.""" + + robot = EXPLICIT_UR5E_ROBOTIQ_2F85.replace(prim_path="{ENV_REGEX_NS}/Robot") + + ground = AssetBaseCfg( + prim_path="/World/defaultGroundPlane", + spawn=sim_utils.GroundPlaneCfg(), + ) + + dome_light = AssetBaseCfg( + prim_path="/World/Light", + spawn=sim_utils.DomeLightCfg(intensity=3000.0, color=(0.75, 0.75, 0.75)), + ) + + +# Minimal MDP for sysid env (same action as RL; obs/rew/term minimal so env runs) +@configclass +class SysidObservationsCfg: + @configclass + class PolicyCfg(ObsGroup): + joint_pos = ObsTerm(func=task_mdp.joint_pos) + + def __post_init__(self): + self.enable_corruption = False + self.concatenate_terms = True + + policy: PolicyCfg = PolicyCfg() + + +@configclass +class SysidRewardsCfg: + pass + + +@configclass +class SysidTerminationsCfg: + time_out = DoneTerm(func=task_mdp.time_out, time_out=True) + + +@configclass +class SysidEnvCfg(ManagerBasedRLEnvCfg): + """Manager-based env for sysid: same scene + RelCartesianOSC as RL, decimation=1.""" + + scene: SysidSceneCfg = SysidSceneCfg(num_envs=512, env_spacing=2.0) + actions: Ur5eRobotiq2f85SysidOSCAction = Ur5eRobotiq2f85SysidOSCAction() + observations: SysidObservationsCfg = SysidObservationsCfg() + rewards: SysidRewardsCfg = SysidRewardsCfg() + terminations: SysidTerminationsCfg = SysidTerminationsCfg() + + def __post_init__(self) -> None: + self.decimation = 1 + self.episode_length_s = 99999.0 + self.sim.dt = SYSID_SIM_DT diff --git a/source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/reset_states/mdp/__init__.py b/source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/omnireset/mdp/__init__.py similarity index 100% rename from source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/reset_states/mdp/__init__.py rename to source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/omnireset/mdp/__init__.py diff --git a/source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/reset_states/mdp/actions/__init__.py b/source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/omnireset/mdp/actions/__init__.py similarity index 100% rename from source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/reset_states/mdp/actions/__init__.py rename to source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/omnireset/mdp/actions/__init__.py diff --git a/source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/omnireset/mdp/actions/actions_cfg.py b/source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/omnireset/mdp/actions/actions_cfg.py new file mode 100644 index 00000000..5a1de1c8 --- /dev/null +++ b/source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/omnireset/mdp/actions/actions_cfg.py @@ -0,0 +1,59 @@ +# Copyright (c) 2024-2026, The UW Lab Project Developers. (https://github.com/uw-lab/UWLab/blob/main/CONTRIBUTORS.md). +# All Rights Reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from __future__ import annotations + +from dataclasses import MISSING + +from isaaclab.managers.action_manager import ActionTerm +from isaaclab.managers.manager_term_cfg import ActionTermCfg +from isaaclab.utils import configclass + +from . import task_space_actions + + +@configclass +class RelCartesianOSCActionCfg(ActionTermCfg): + """Configuration for Relative Cartesian OSC action term. + + Uses the analytical Jacobian from calibrated UR5e kinematics and a simple + task-space PD controller matching the real robot's OSC implementation: + tau = J^T @ (Kp * pose_error + Kd * vel_error) + + No inertial dynamics decoupling, no mass matrix. Designed to work with + the DelayedDCMotor actuator for sim2real alignment. + """ + + class_type: type[ActionTerm] = task_space_actions.RelCartesianOSCAction + + @configclass + class OffsetCfg: + """Offset configuration for body or frame offsets.""" + + pos: tuple[float, float, float] = (0.0, 0.0, 0.0) + """Translation offset.""" + rot: tuple[float, float, float, float] = (1.0, 0.0, 0.0, 0.0) + """Rotation offset as quaternion (w, x, y, z).""" + + joint_names: list[str] = MISSING + """Joint names for the arm (regex supported).""" + + body_name: str = MISSING + """End-effector body name (e.g., 'wrist_3_link').""" + + scale_xyz_axisangle: tuple[float, float, float, float, float, float] = MISSING + """Per-DOF scaling for [x, y, z, rx, ry, rz] action deltas.""" + + input_clip: tuple[float, float] | None = None + """Optional symmetric clip range for scaled actions.""" + + motion_stiffness: tuple[float, float, float, float, float, float] = (200.0, 200.0, 200.0, 3.0, 3.0, 3.0) + """Task-space stiffness Kp for [x, y, z, rx, ry, rz].""" + + motion_damping_ratio: tuple[float, float, float, float, float, float] = (3.0, 3.0, 3.0, 1.0, 1.0, 1.0) + """Task-space damping ratio. Kd = 2 * sqrt(Kp) * damping_ratio.""" + + torque_limit: tuple[float, float, float, float, float, float] = (150.0, 150.0, 150.0, 28.0, 28.0, 28.0) + """Per-joint torque limits (clamped after J^T multiplication).""" diff --git a/source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/omnireset/mdp/actions/task_space_actions.py b/source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/omnireset/mdp/actions/task_space_actions.py new file mode 100644 index 00000000..857610bb --- /dev/null +++ b/source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/omnireset/mdp/actions/task_space_actions.py @@ -0,0 +1,191 @@ +# Copyright (c) 2024-2026, The UW Lab Project Developers. (https://github.com/uw-lab/UWLab/blob/main/CONTRIBUTORS.md). +# All Rights Reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +from __future__ import annotations + +import torch +from collections.abc import Sequence +from typing import TYPE_CHECKING + +import isaaclab.utils.math as math_utils +from isaaclab.assets import Articulation +from isaaclab.managers.action_manager import ActionTerm + +from uwlab_assets.robots.ur5e_robotiq_gripper.kinematics import compute_jacobian_analytical + +from . import actions_cfg + +if TYPE_CHECKING: + from isaaclab.envs import ManagerBasedEnv + + +class RelCartesianOSCAction(ActionTerm): + """Relative Cartesian OSC action term using analytical Jacobian and PD control. + + Matches the real robot's OSC implementation using calibrated analytical kinematics: + tau = J^T @ (Kp * pose_error + Kd * vel_error) + + No inertial dynamics decoupling. Velocity is computed from J @ dq for consistency + with the analytical Jacobian. Designed to work with DelayedDCMotor actuator. + + The flow per policy step: + 1. process_actions: scale raw 6-DOF delta, compute desired EE pose + 2. apply_actions (every physics step): compute current state, analytical J, + PD torques, clamp, and apply as joint effort targets + + Frame convention: both EE pose and analytical Jacobian are in the robot's + base_link frame (REP-103), consistent with the calibrated USD model. + """ + + cfg: actions_cfg.RelCartesianOSCActionCfg + """The configuration of the action term.""" + _asset: Articulation + """The articulation asset on which the action term is applied.""" + + def __init__(self, cfg: actions_cfg.RelCartesianOSCActionCfg, env: ManagerBasedEnv): + super().__init__(cfg, env) + + # Resolve joints + self._joint_ids, self._joint_names = self._asset.find_joints(self.cfg.joint_names) + self._num_dof = len(self._joint_ids) + # Avoid slice-vs-list indexing overhead when all joints match + if self._num_dof == self._asset.num_joints: + self._joint_ids = slice(None) + + # Resolve EE body + body_ids, body_names = self._asset.find_bodies(self.cfg.body_name) + if len(body_ids) != 1: + raise ValueError( + f"Expected one match for body_name '{self.cfg.body_name}', got {len(body_ids)}: {body_names}" + ) + self._ee_body_idx = body_ids[0] + + # Controller gains (per-env for domain randomization): Kd = 2 * sqrt(Kp) * damping_ratio + kp = torch.tensor(cfg.motion_stiffness, device=self.device, dtype=torch.float32) + damping_ratio = torch.tensor(cfg.motion_damping_ratio, device=self.device, dtype=torch.float32) + kd = 2.0 * torch.sqrt(kp) * damping_ratio + # Store defaults (1D) and expand to per-env (N, 6) + self._kp_default = kp + self._kd_default = kd + self._damping_ratio_default = damping_ratio + self._kp = kp.unsqueeze(0).expand(self.num_envs, -1).clone() + self._kd = kd.unsqueeze(0).expand(self.num_envs, -1).clone() + self._torque_max = torch.tensor(cfg.torque_limit, device=self.device, dtype=torch.float32) + + # Action scaling + self._scale = torch.tensor(cfg.scale_xyz_axisangle, device=self.device, dtype=torch.float32) + if cfg.input_clip is not None: + self._input_clip = torch.tensor(cfg.input_clip, device=self.device, dtype=torch.float32) + else: + self._input_clip = None + + # Buffers + self._raw_actions = torch.zeros(self.num_envs, 6, device=self.device) + self._processed_actions = torch.zeros(self.num_envs, 6, device=self.device) + self._ee_pos_des = torch.zeros(self.num_envs, 3, device=self.device) + self._ee_quat_des = torch.zeros(self.num_envs, 4, device=self.device) + + # ------------------------------------------------------------------ + # Properties + # ------------------------------------------------------------------ + + @property + def action_dim(self) -> int: + return 6 + + @property + def raw_actions(self) -> torch.Tensor: + return self._raw_actions + + @property + def processed_actions(self) -> torch.Tensor: + return self._processed_actions + + # ------------------------------------------------------------------ + # Operations + # ------------------------------------------------------------------ + + def process_actions(self, actions: torch.Tensor): + """Scale raw 6-DOF deltas and compute desired EE pose for the PD tracker. + + Called once per policy step. The desired pose is held fixed while + apply_actions recomputes torques at each physics step. + """ + self._raw_actions[:] = actions + scaled = actions * self._scale + if self._input_clip is not None: + scaled = torch.clamp(scaled, min=self._input_clip[0], max=self._input_clip[1]) + self._processed_actions[:] = scaled + + # Current EE pose in root (base_link) frame + ee_pos_b, ee_quat_b = self._get_ee_pose_root_frame() + + # Desired position = current + delta + self._ee_pos_des[:] = ee_pos_b + scaled[:, :3] + + # Desired orientation: axis-angle delta -> quaternion -> compose + delta_rot = scaled[:, 3:6] + angle = torch.norm(delta_rot, dim=-1, keepdim=True) + safe_angle = torch.where(angle > 1e-6, angle, torch.ones_like(angle)) + axis = delta_rot / safe_angle + axis = torch.where(angle > 1e-6, axis, torch.zeros_like(axis)) + half = angle / 2.0 + delta_quat = torch.cat([torch.cos(half), axis * torch.sin(half)], dim=-1) + self._ee_quat_des[:] = math_utils.quat_mul(delta_quat, ee_quat_b) + + def apply_actions(self): + """Compute PD torques using analytical Jacobian and apply as joint efforts. + + Called every physics step (decimation times per policy step). + """ + # Current state + ee_pos_b, ee_quat_b = self._get_ee_pose_root_frame() + joint_pos = self._asset.data.joint_pos[:, self._joint_ids] + joint_vel = self._asset.data.joint_vel[:, self._joint_ids] + + # Analytical Jacobian (base_link frame, matching EE pose frame) + jacobian = compute_jacobian_analytical(joint_pos, device=str(self.device)) + + # EE velocity from J @ dq (consistent with analytical Jacobian) + ee_vel = torch.bmm(jacobian, joint_vel.unsqueeze(-1)).squeeze(-1) # (N, 6) + + # Pose error + pos_error = self._ee_pos_des - ee_pos_b + quat_error = math_utils.quat_mul(self._ee_quat_des, math_utils.quat_inv(ee_quat_b)) + axis_angle_error = math_utils.axis_angle_from_quat(quat_error) + pose_error = torch.cat([pos_error, axis_angle_error], dim=-1) # (N, 6) + + # PD control: tau = J^T @ (Kp * err + Kd * (-vel)) + vel_error = -ee_vel + task_force = self._kp * pose_error + self._kd * vel_error + joint_torques = torch.bmm(jacobian.transpose(-1, -2), task_force.unsqueeze(-1)).squeeze(-1) + joint_torques = torch.clamp(joint_torques, -self._torque_max, self._torque_max) + + self._asset.set_joint_effort_target(joint_torques, joint_ids=self._joint_ids) + + def reset(self, env_ids: Sequence[int] | None = None) -> None: + """Reset targets to current EE pose to avoid transients.""" + if env_ids is None: + env_ids = slice(None) + self._raw_actions[env_ids] = 0.0 + ee_pos_b, ee_quat_b = self._get_ee_pose_root_frame() + self._ee_pos_des[env_ids] = ee_pos_b[env_ids] + self._ee_quat_des[env_ids] = ee_quat_b[env_ids] + + # ------------------------------------------------------------------ + # Helpers + # ------------------------------------------------------------------ + + def _get_ee_pose_root_frame(self) -> tuple[torch.Tensor, torch.Tensor]: + """Get EE pose in root (base_link) frame from sim state.""" + ee_pos_w = self._asset.data.body_pos_w[:, self._ee_body_idx] + ee_quat_w = self._asset.data.body_quat_w[:, self._ee_body_idx] + ee_pos_b, ee_quat_b = math_utils.subtract_frame_transforms( + self._asset.data.root_pos_w, + self._asset.data.root_quat_w, + ee_pos_w, + ee_quat_w, + ) + return ee_pos_b, ee_quat_b diff --git a/source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/reset_states/mdp/collision_analyzer.py b/source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/omnireset/mdp/collision_analyzer.py similarity index 100% rename from source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/reset_states/mdp/collision_analyzer.py rename to source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/omnireset/mdp/collision_analyzer.py diff --git a/source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/reset_states/mdp/collision_analyzer_cfg.py b/source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/omnireset/mdp/collision_analyzer_cfg.py similarity index 100% rename from source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/reset_states/mdp/collision_analyzer_cfg.py rename to source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/omnireset/mdp/collision_analyzer_cfg.py diff --git a/source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/reset_states/mdp/commands.py b/source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/omnireset/mdp/commands.py similarity index 97% rename from source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/reset_states/mdp/commands.py rename to source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/omnireset/mdp/commands.py index 29d1d408..27e66492 100644 --- a/source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/reset_states/mdp/commands.py +++ b/source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/omnireset/mdp/commands.py @@ -97,8 +97,8 @@ def __init__(self, cfg: TaskCommandCfg, env: ManagerBasedEnv): pos=tuple(receptive_meta.get("assembled_offset").get("pos")), quat=tuple(receptive_meta.get("assembled_offset").get("quat")), ) - self.success_position_threshold: float = cfg.success_position_threshold - self.success_orientation_threshold: float = cfg.success_orientation_threshold + self.success_position_threshold: float = receptive_meta.get("success_thresholds").get("position") + self.success_orientation_threshold: float = receptive_meta.get("success_thresholds").get("orientation") self.metrics["average_rot_align_error"] = torch.zeros(self.num_envs, device=self.device) self.metrics["average_pos_align_error"] = torch.zeros(self.num_envs, device=self.device) diff --git a/source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/reset_states/mdp/commands_cfg.py b/source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/omnireset/mdp/commands_cfg.py similarity index 89% rename from source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/reset_states/mdp/commands_cfg.py rename to source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/omnireset/mdp/commands_cfg.py index 5c2b6490..086049e8 100644 --- a/source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/reset_states/mdp/commands_cfg.py +++ b/source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/omnireset/mdp/commands_cfg.py @@ -26,10 +26,6 @@ class TaskCommandCfg(TaskDependentCommandCfg): asset_cfg: SceneEntityCfg = SceneEntityCfg("robot") - success_position_threshold: float = MISSING - - success_orientation_threshold: float = MISSING - insertive_asset_cfg: SceneEntityCfg = MISSING receptive_asset_cfg: SceneEntityCfg = MISSING diff --git a/source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/reset_states/mdp/events.py b/source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/omnireset/mdp/events.py similarity index 51% rename from source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/reset_states/mdp/events.py rename to source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/omnireset/mdp/events.py index edeeb79d..0769039c 100644 --- a/source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/reset_states/mdp/events.py +++ b/source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/omnireset/mdp/events.py @@ -5,13 +5,15 @@ """Event functions for manipulation tasks.""" +import logging import numpy as np import os +import random import scipy.stats as stats -import tempfile import torch import trimesh import trimesh.transformations as tra +from collections.abc import Sequence import carb import isaaclab.sim as sim_utils @@ -24,12 +26,11 @@ from isaaclab.managers import EventTermCfg, ManagerTermBase, SceneEntityCfg from isaaclab.markers import VisualizationMarkers from isaaclab.markers.config import FRAME_MARKER_CFG -from isaaclab.utils.assets import retrieve_file_path -from pxr import UsdGeom +from pxr import Gf, UsdGeom, UsdLux from uwlab.envs.mdp.actions.actions_cfg import DifferentialInverseKinematicsActionCfg -from uwlab_tasks.manager_based.manipulation.reset_states.mdp import utils +from uwlab_tasks.manager_based.manipulation.omnireset.mdp import utils from ..assembly_keypoints import Offset from .success_monitor_cfg import SuccessMonitorCfg @@ -533,7 +534,9 @@ def __call__( # set the forces and torques into the buffers # note: these are only applied when you call: `asset.write_data_to_sim()` - asset.set_external_force_and_torque(forces, torques, env_ids=env_ids, body_ids=asset_cfg.body_ids) + asset.permanent_wrench_composer.set_forces_and_torques( + forces, torques, env_ids=env_ids, body_ids=asset_cfg.body_ids + ) class reset_end_effector_round_fixed_asset(ManagerTermBase): @@ -604,7 +607,7 @@ class reset_end_effector_from_grasp_dataset(ManagerTermBase): """Reset end effector pose using saved grasp dataset from grasp sampling.""" def __init__(self, cfg: EventTermCfg, env: ManagerBasedEnv): - self.base_path: str = cfg.params.get("base_path") + self.dataset_dir: str = cfg.params.get("dataset_dir") self.fixed_asset_cfg: SceneEntityCfg = cfg.params.get("fixed_asset_cfg") # type: ignore robot_ik_cfg: SceneEntityCfg = cfg.params.get("robot_ik_cfg", SceneEntityCfg("robot")) gripper_cfg: SceneEntityCfg = cfg.params.get( @@ -637,22 +640,20 @@ def __init__(self, cfg: EventTermCfg, env: ManagerBasedEnv): self.gripper_joint_ids: list[int] | slice = gripper_cfg.joint_ids self.gripper_joint_names: list[str] = gripper_cfg.joint_names if gripper_cfg.joint_names else [] - # Compute grasp dataset path using object hash + # Compute grasp dataset path from object name self.grasp_dataset_path = self._compute_grasp_dataset_path() # Load and pre-compute grasp data for fast sampling self._load_and_precompute_grasps(env) def _compute_grasp_dataset_path(self) -> str: - """Compute grasp dataset path using hash of the fixed asset (insertive object).""" usd_path = self.fixed_asset.cfg.spawn.usd_path - object_hash = utils.compute_assembly_hash(usd_path) - return f"{self.base_path}/{object_hash}.pt" + obj_name = utils.object_name_from_usd(usd_path) + return f"{self.dataset_dir}/Grasps/{obj_name}/grasps.pt" def _load_and_precompute_grasps(self, env): """Load Torch (.pt) grasp data and convert to optimized tensors.""" - # Handle URL or local path - local_path = retrieve_file_path(self.grasp_dataset_path) + local_path = utils.safe_retrieve_file_path(self.grasp_dataset_path) data = torch.load(local_path, map_location="cpu") # TorchDatasetFileHandler stores nested dicts; grasp data likely under 'grasp_relative_pose' @@ -710,7 +711,7 @@ def __call__( self, env: ManagerBasedEnv, env_ids: torch.Tensor, - base_path: str, + dataset_dir: str, fixed_asset_cfg: SceneEntityCfg, robot_ik_cfg: SceneEntityCfg, gripper_cfg: SceneEntityCfg, @@ -785,7 +786,7 @@ def __init__(self, cfg: EventTermCfg, env: ManagerBasedEnv): super().__init__(cfg, env) # Extract parameters from config - self.base_path: str = cfg.params.get("base_path") + self.dataset_dir: str = cfg.params.get("dataset_dir") self.receptive_object_cfg: SceneEntityCfg = cfg.params.get("receptive_object_cfg") self.receptive_object: RigidObject = env.scene[self.receptive_object_cfg.name] self.insertive_object_cfg: SceneEntityCfg = cfg.params.get("insertive_object_cfg") @@ -796,22 +797,21 @@ def __init__(self, cfg: EventTermCfg, env: ManagerBasedEnv): range_list = [pose_range_b.get(key, (0.0, 0.0)) for key in ["x", "y", "z", "roll", "pitch", "yaw"]] self.ranges = torch.tensor(range_list, device=env.device) - # Compute partial assembly dataset path using object hash + # Compute partial assembly dataset path from object pair names self.partial_assembly_dataset_path = self._compute_partial_assembly_dataset_path() # Load and pre-compute partial assembly data for fast sampling self._load_and_precompute_partial_assemblies(env) def _compute_partial_assembly_dataset_path(self) -> str: - """Compute partial assembly dataset path using hash of insertive and receptive objects.""" insertive_usd_path = self.insertive_object.cfg.spawn.usd_path receptive_usd_path = self.receptive_object.cfg.spawn.usd_path - object_hash = utils.compute_assembly_hash(insertive_usd_path, receptive_usd_path) - return f"{self.base_path}/{object_hash}.pt" + pair = utils.compute_pair_dir(insertive_usd_path, receptive_usd_path) + return f"{self.dataset_dir}/Resets/{pair}/partial_assemblies.pt" def _load_and_precompute_partial_assemblies(self, env): """Load Torch (.pt) partial assembly data and convert to optimized tensors.""" - local_path = retrieve_file_path(self.partial_assembly_dataset_path) + local_path = utils.safe_retrieve_file_path(self.partial_assembly_dataset_path) data = torch.load(local_path, map_location="cpu") rel_pos = data.get("relative_position") @@ -838,7 +838,7 @@ def __call__( self, env: ManagerBasedEnv, env_ids: torch.Tensor, - base_path: str, + dataset_dir: str, insertive_object_cfg: SceneEntityCfg, receptive_object_cfg: SceneEntityCfg, pose_range_b: dict[str, tuple[float, float]] = dict(), @@ -996,31 +996,30 @@ class MultiResetManager(ManagerTermBase): def __init__(self, cfg: EventTermCfg, env: ManagerBasedEnv): super().__init__(cfg, env) - base_paths: list[str] = cfg.params.get("base_paths", []) + dataset_dir: str = cfg.params.get("dataset_dir", "") + reset_types: list[str] = cfg.params.get("reset_types", []) probabilities: list[float] = cfg.params.get("probs", []) - if not base_paths: - raise ValueError("No base paths provided") - if len(base_paths) != len(probabilities): - raise ValueError("Number of base paths must match number of probabilities") + if not reset_types: + raise ValueError("No reset_types provided") + if len(reset_types) != len(probabilities): + raise ValueError("Number of reset_types must match number of probabilities") - # Compute dataset paths using object hash + # Derive pair directory from scene objects insertive_usd_path = env.scene["insertive_object"].cfg.spawn.usd_path receptive_usd_path = env.scene["receptive_object"].cfg.spawn.usd_path - reset_state_hash = utils.compute_assembly_hash(insertive_usd_path, receptive_usd_path) + pair = utils.compute_pair_dir(insertive_usd_path, receptive_usd_path) - # Generate dataset paths using provided base paths + # Generate dataset paths from pair directory and reset types dataset_files = [] - for base_path in base_paths: - dataset_files.append(f"{base_path}/{reset_state_hash}.pt") + for rt in reset_types: + dataset_files.append(f"{dataset_dir}/Resets/{pair}/resets_{rt}.pt") # Load all datasets self.datasets = [] num_states = [] - download_dir = utils.get_temp_dir() for dataset_file in dataset_files: - # Handle both local files and URLs - local_file_path = retrieve_file_path(dataset_file, download_dir=download_dir) + local_file_path = utils.safe_retrieve_file_path(dataset_file) # Check if local file exists (after potential download) if not os.path.exists(local_file_path): @@ -1049,7 +1048,8 @@ def __call__( self, env: ManagerBasedEnv, env_ids: torch.Tensor, - base_paths: list[str], + dataset_dir: str, + reset_types: list[str], probs: list[float], success: str | None = None, ) -> None: @@ -1072,6 +1072,10 @@ def __call__( f"Metrics/task_{task_idx}_normalized_prob": self.probs[task_idx].item(), }) + # Log episode length at reset + ep_lengths = self._env.episode_length_buf[env_ids].float() + self._env.extras["log"]["Metrics/mean_episode_length"] = ep_lengths.mean().item() + # Sample which dataset to use for each environment dataset_indices = torch.multinomial(self.probs, len(env_ids), replacement=True) self.task_id[env_ids] = dataset_indices @@ -1087,12 +1091,81 @@ def __call__( 0, self.num_states[dataset_idx], (len(current_env_ids),), device=self._env.device ) states_to_reset_from = sample_from_nested_dict(self.datasets[dataset_idx], state_indices) - self._env.scene.reset_to(states_to_reset_from["initial_state"], env_ids=current_env_ids, is_relative=True) + self._reset_to(states_to_reset_from["initial_state"], env_ids=current_env_ids, is_relative=True) # Reset velocities robot: Articulation = self._env.scene["robot"] robot.set_joint_velocity_target(torch.zeros_like(robot.data.joint_vel[env_ids]), env_ids=env_ids) + def _reset_to( + self, + state: dict[str, dict[str, dict[str, torch.Tensor]]], + env_ids: Sequence[int] | None = None, + is_relative: bool = False, + ): + """Resets the entities in the scene to the provided state. + + Args: + state: The state to reset the scene entities to. Please refer to :meth:`get_state` for the format. + env_ids: The indices of the environments to reset. Defaults to None, in which case + all environment instances are reset. + is_relative: If set to True, the state is considered relative to the environment origins. + Defaults to False. + """ + # resolve env_ids + if env_ids is None: + env_ids = self._env.scene._ALL_INDICES + # articulations + for asset_name, articulation in self._env.scene._articulations.items(): + if asset_name not in state["articulation"]: + continue + asset_state = state["articulation"][asset_name] + # root state + root_pose = asset_state["root_pose"].clone() + if is_relative: + root_pose[:, :3] += self._env.scene.env_origins[env_ids] + root_velocity = asset_state["root_velocity"].clone() + articulation.write_root_pose_to_sim(root_pose, env_ids=env_ids) + articulation.write_root_velocity_to_sim(root_velocity, env_ids=env_ids) + # joint state + joint_position = asset_state["joint_position"].clone() + joint_velocity = asset_state["joint_velocity"].clone() + articulation.write_joint_state_to_sim(joint_position, joint_velocity, env_ids=env_ids) + # FIXME: This is not generic as it assumes PD control over the joints. + # This assumption does not hold for effort controlled joints. + articulation.set_joint_position_target(joint_position, env_ids=env_ids) + articulation.set_joint_velocity_target(joint_velocity, env_ids=env_ids) + # deformable objects + for asset_name, deformable_object in self._env.scene._deformable_objects.items(): + if asset_name not in state["deformable_object"]: + continue + asset_state = state["deformable_object"][asset_name] + nodal_position = asset_state["nodal_position"].clone() + if is_relative: + nodal_position[:, :3] += self._env.scene.env_origins[env_ids] + nodal_velocity = asset_state["nodal_velocity"].clone() + deformable_object.write_nodal_pos_to_sim(nodal_position, env_ids=env_ids) + deformable_object.write_nodal_velocity_to_sim(nodal_velocity, env_ids=env_ids) + # rigid objects + for asset_name, rigid_object in self._env.scene._rigid_objects.items(): + if asset_name not in state["rigid_object"]: + continue + asset_state = state["rigid_object"][asset_name] + root_pose = asset_state["root_pose"].clone() + if is_relative: + root_pose[:, :3] += self._env.scene.env_origins[env_ids] + root_velocity = asset_state["root_velocity"].clone() + rigid_object.write_root_pose_to_sim(root_pose, env_ids=env_ids) + rigid_object.write_root_velocity_to_sim(root_velocity, env_ids=env_ids) + # surface grippers + for asset_name, surface_gripper in self._env.scene._surface_grippers.items(): + asset_state = state["gripper"][asset_name] + surface_gripper.set_grippers_command(asset_state) + + # write data to simulation to make sure initial state is set + # this propagates the joint targets to the simulation + self._env.scene.write_data_to_sim() + def sample_state_data_set(episode_data: dict, idx: torch.Tensor, device: torch.device) -> dict: """Sample state from episode data and move tensors to device in one pass.""" @@ -1231,133 +1304,1086 @@ def __call__( asset.write_root_velocity_to_sim(velocities, env_ids=env_ids) -def randomize_operational_space_controller_gains( - env: ManagerBasedEnv, +class randomize_hdri(ManagerTermBase): + """Randomizes the HDRI texture, intensity, and rotation. + + HDRI paths are loaded from a YAML config file once during initialization. + Paths under 'isaac_nucleus' section are prefixed with ISAAC_NUCLEUS_DIR, + all other paths are prefixed with NVIDIA_NUCLEUS_DIR. + """ + + def __init__(self, cfg: EventTermCfg, env: ManagerBasedEnv): + """Initialize the randomization term and cache HDRI paths.""" + super().__init__(cfg, env) + + hdri_config_path = cfg.params.get("hdri_config_path") + + # Load and cache HDRI paths once during init + if hdri_config_path is not None: + self.hdri_paths = utils.load_asset_paths_from_config( + hdri_config_path, cache_subdir="hdris", skip_validation=False + ) + logging.info(f"[randomize_hdri] Loaded {len(self.hdri_paths)} HDRI paths.") + else: + self.hdri_paths = [] + + if not self.hdri_paths: + raise RuntimeError(f"[randomize_hdri] No HDRI paths loaded. Check hdri_config_path={hdri_config_path}") + non_local = [p for p in self.hdri_paths if not p.startswith("/")] + if non_local: + raise RuntimeError( + f"[randomize_hdri] {len(non_local)} HDRI paths are non-local (Nucleus) " + "and will silently fail if Nucleus is unreachable. " + f"First 3: {non_local[:3]}. " + "Use only local/cloud-cached HDRIs." + ) + missing = [p for p in self.hdri_paths if not os.path.exists(p)] + if missing: + raise RuntimeError( + f"[randomize_hdri] {len(missing)}/{len(self.hdri_paths)} HDRI files missing on disk. " + f"First 3: {missing[:3]}" + ) + + # Apply initial randomization so envs don't start with default lighting + self(env, torch.arange(env.num_envs, device=env.device), **cfg.params) + + def __call__( + self, + env: ManagerBasedEnv, + env_ids: torch.Tensor, + light_path: str = "/World/skyLight", + hdri_config_path: str | None = None, + intensity_range: tuple = (500.0, 1000.0), + rotation_range: tuple = (0.0, 360.0), + ) -> None: + stage = omni.usd.get_context().get_stage() + light_prim = stage.GetPrimAtPath(light_path) + if not light_prim.IsValid(): + raise RuntimeError( + f"[randomize_hdri] Light prim at '{light_path}' does not exist on the stage. " + "This is likely because the DomeLightCfg failed to spawn (e.g. Nucleus server unreachable). " + "Remove the texture_file from DomeLightCfg or use a local path." + ) + + dome_light = UsdLux.DomeLight(light_prim) + if not dome_light: + raise RuntimeError(f"[randomize_hdri] Prim at '{light_path}' is not a DomeLight.") + + random_hdri = random.choice(self.hdri_paths) + intensity = random.randint(int(intensity_range[0]), int(intensity_range[1])) + + # Use direct attribute access (DEXTRAH-style) -- UsdLux helper methods + # can map to the wrong schema attribute name depending on USD version. + light_prim.GetAttribute("inputs:texture:file").Set(random_hdri) + light_prim.GetAttribute("inputs:intensity").Set(float(intensity)) + + from scipy.spatial.transform import Rotation as R + + quat = R.random().as_quat() # [x, y, z, w] scipy convention + xformable = UsdGeom.Xformable(light_prim) + xformable.ClearXformOpOrder() + xformable.AddOrientOp(precision=UsdGeom.XformOp.PrecisionDouble).Set( + Gf.Quatd(float(quat[3]), Gf.Vec3d(float(quat[0]), float(quat[1]), float(quat[2]))) + ) + + logging.debug(f"[randomize_hdri] Applied: {random_hdri}, intensity={intensity}") + + +def randomize_tiled_cameras( + env, env_ids: torch.Tensor, - action_name: str, - stiffness_distribution_params: tuple[float, float], - damping_distribution_params: tuple[float, float], - operation: str = "scale", - distribution: str = "log_uniform", + camera_path_template: str, + base_position: tuple, + base_rotation: tuple, + position_deltas: dict, + euler_deltas: dict, ) -> None: - """Randomize operational space controller motion stiffness and damping gains. + """Randomizes tiled cameras with XYZ and Euler angle deltas from base values.""" + if env_ids is None: + env_ids = torch.arange(env.scene.num_envs, device="cpu") + else: + env_ids = env_ids.cpu() - This function randomizes the motion_stiffness_task and motion_damping_ratio_task parameters - of an operational space controller. The first three terms (xyz) and last three terms (ypr) - are randomized together to maintain consistency within translational and rotational components. + for env_idx in env_ids: + env_idx_value = env_idx.item() if hasattr(env_idx, "item") else env_idx - Args: - env: The environment instance. - env_ids: The environment indices to randomize. If None, all environments are randomized. - action_name: The name of the action term to randomize. - stiffness_distribution_params: The distribution parameters for stiffness (min, max). - damping_distribution_params: The distribution parameters for damping ratio (min, max). - operation: The operation to perform on the gains. Currently supports "scale" and "add". - distribution: The distribution to sample from. Currently supports "log_uniform". - - Raises: - ValueError: If the action is not found or is not an operational space controller action. - ValueError: If an unsupported distribution is specified. - """ - if env_ids is None: - env_ids = torch.arange(env.scene.num_envs, device=env.device) + # Get the camera path for this environment using the template + camera_path = camera_path_template.format(env_idx_value) - # Get the action term - action_term = env.action_manager._terms.get(action_name) - if action_term is None: - raise ValueError(f"Action term '{action_name}' not found in action manager.") + # Get the stage + stage = omni.usd.get_context().get_stage() + camera_prim = stage.GetPrimAtPath(camera_path) - # Check if it's an operational space controller action - if not hasattr(action_term, "_osc") or not hasattr(action_term._osc, "cfg"): - raise ValueError(f"Action term '{action_name}' does not appear to be an operational space controller.") + if not camera_prim.IsValid(): + continue - controller = action_term._osc + # === Randomize Position === + pos_delta_x = random.uniform(*position_deltas["x"]) + pos_delta_y = random.uniform(*position_deltas["y"]) + pos_delta_z = random.uniform(*position_deltas["z"]) - # Check distribution type - if operation != "scale": - raise ValueError(f"Operation '{operation}' not supported. Only 'scale' is supported.") - if distribution not in ["uniform", "log_uniform"]: - raise ValueError( - f"Distribution '{distribution}' not supported. Only 'uniform' and 'log_uniform' are supported." - ) + new_pos = (base_position[0] + pos_delta_x, base_position[1] + pos_delta_y, base_position[2] + pos_delta_z) - # Sample random multipliers for stiffness (xyz and ypr separately) - if distribution == "uniform": - stiff_xyz_multiplier = ( - torch.rand(len(env_ids), device=env.device) - * (stiffness_distribution_params[1] - stiffness_distribution_params[0]) - + stiffness_distribution_params[0] - ) + # === Randomize Rotation (Euler deltas in degrees, convert to radians) === + # Convert base quaternion (w, x, y, z) to GfQuatf + base_quat = Gf.Quatf(base_rotation[0], Gf.Vec3f(base_rotation[1], base_rotation[2], base_rotation[3])) + base_rot = Gf.Rotation(base_quat) - stiff_rpy_multiplier = ( - torch.rand(len(env_ids), device=env.device) - * (stiffness_distribution_params[1] - stiffness_distribution_params[0]) - + stiffness_distribution_params[0] - ) - else: # log_uniform - log_min_stiff = torch.log(torch.tensor(stiffness_distribution_params[0], device=env.device)) - log_max_stiff = torch.log(torch.tensor(stiffness_distribution_params[1], device=env.device)) + # Create delta rotation from Euler angles (ZYX order: yaw, pitch, roll) + delta_pitch = random.uniform(*euler_deltas["pitch"]) + delta_yaw = random.uniform(*euler_deltas["yaw"]) + delta_roll = random.uniform(*euler_deltas["roll"]) - stiff_xyz_multiplier = torch.exp( - torch.rand(len(env_ids), device=env.device) * (log_max_stiff - log_min_stiff) + log_min_stiff + delta_rot = ( + Gf.Rotation(Gf.Vec3d(0, 0, 1), delta_yaw) + * Gf.Rotation(Gf.Vec3d(0, 1, 0), delta_pitch) + * Gf.Rotation(Gf.Vec3d(1, 0, 0), delta_roll) ) - stiff_rpy_multiplier = torch.exp( - torch.rand(len(env_ids), device=env.device) * (log_max_stiff - log_min_stiff) + log_min_stiff - ) + # Apply delta rotation to base rotation + new_rot = delta_rot * base_rot + new_quat = new_rot.GetQuat() + + # === Apply pose to the USD prim === + xform = UsdGeom.Xformable(camera_prim) + xform_ops = xform.GetOrderedXformOps() + + if not xform_ops: + xform.AddTransformOp() + + # Set translation and orientation + xform_ops = xform.GetOrderedXformOps() + for op in xform_ops: + if op.GetOpType() == UsdGeom.XformOp.TypeTranslate: + op.Set(Gf.Vec3d(*new_pos)) + elif op.GetOpType() == UsdGeom.XformOp.TypeOrient: + op.Set(new_quat) + + +def randomize_camera_focal_length( + env, env_ids: torch.Tensor, camera_path_template: str, focal_length_range: tuple = (0.8, 1.8) +) -> None: + """Randomizes the focal length of cameras.""" + if env_ids is None: + env_ids = torch.arange(env.scene.num_envs, device="cpu") + else: + env_ids = env_ids.cpu() - # Sample random multipliers for damping (xyz and ypr separately) - if distribution == "uniform": - damp_xyz_multiplier = ( - torch.rand(len(env_ids), device=env.device) - * (damping_distribution_params[1] - damping_distribution_params[0]) - + damping_distribution_params[0] + stage = omni.usd.get_context().get_stage() + + for env_idx in env_ids: + camera_path = camera_path_template.format(env_idx) + camera_prim = stage.GetPrimAtPath(camera_path) + if not camera_prim.IsValid(): + continue + + focal_length = random.uniform(focal_length_range[0], focal_length_range[1]) + focal_attr = camera_prim.GetAttribute("focalLength") + if focal_attr.IsValid(): + focal_attr.Set(focal_length) + + +class randomize_arm_from_sysid(ManagerTermBase): + """Randomize arm joint dynamics around sysid nominal values. + + Sysid parameters (armature, friction, etc.) are loaded from ``metadata.yaml`` + next to the robot USD. ``scale_range = (lo, hi)`` scales each nominal: + ``nominal * uniform(lo, hi)`` per env per joint. + + When used with ADR, ``scale_progress`` (0→1) linearly interpolates armature, + friction, and motor delay from 0 to the full sysid-randomized values. + """ + + def __init__(self, cfg: EventTermCfg, env: ManagerBasedEnv): + super().__init__(cfg, env) + self.asset_cfg: SceneEntityCfg = cfg.params["asset_cfg"] + self.robot: Articulation = env.scene[self.asset_cfg.name] + self.joint_ids = self.robot.find_joints(cfg.params["joint_names"])[0] + self.actuator_name: str = cfg.params["actuator_name"] + + # Load sysid from robot metadata (co-located with USD) + metadata = utils.read_metadata_from_usd_directory(self.robot.cfg.spawn.usd_path) + sysid = metadata["sysid"] + self.armature = sysid["armature"] + self.static_friction = sysid["static_friction"] + self.dynamic_ratio = sysid["dynamic_ratio"] + self.viscous_friction = sysid["viscous_friction"] + + # ADR progress: 0 = armature/friction are 0, 1 = full sysid randomization + self.scale_progress: float = cfg.params.get("initial_scale_progress", 0.0) + + def __call__( + self, + env: ManagerBasedEnv, + env_ids: torch.Tensor, + asset_cfg: SceneEntityCfg, + joint_names: list[str], + actuator_name: str, + scale_range: tuple[float, float] = (0.8, 1.2), + delay_range: tuple[int, int] = (0, 2), + initial_scale_progress: float = 0.0, + ): + if env_ids is None: + env_ids = torch.arange(env.scene.num_envs, device=self.robot.device) + N = len(env_ids) + n_joints = len(self.joint_ids) + lo, hi = scale_range + device = self.robot.device + p = self.scale_progress + + def _scale(nominal): + val = torch.as_tensor(nominal, device=device, dtype=torch.float32) + return val * (lo + torch.rand(N, n_joints, device=device) * (hi - lo)) + + # Armature and friction: scaled by ADR progress (0 → sysid) + arm_vals = _scale(self.armature) * p + sfric_vals = _scale(self.static_friction) * p + dratio_vals = _scale(self.dynamic_ratio) * p + dfric_vals = torch.minimum(dratio_vals * sfric_vals, sfric_vals) + vfric_vals = _scale(self.viscous_friction) * p + + self.robot.write_joint_armature_to_sim(arm_vals, joint_ids=self.joint_ids, env_ids=env_ids) + self.robot.write_joint_friction_coefficient_to_sim( + sfric_vals, + joint_dynamic_friction_coeff=dfric_vals, + joint_viscous_friction_coeff=vfric_vals, + joint_ids=self.joint_ids, + env_ids=env_ids, ) - damp_rpy_multiplier = ( - torch.rand(len(env_ids), device=env.device) - * (damping_distribution_params[1] - damping_distribution_params[0]) - + damping_distribution_params[0] + # Motor delay scaled by ADR progress (if actuator supports it) + delay_lo, delay_hi = delay_range + actuator = self.robot.actuators[self.actuator_name] + if hasattr(actuator, "positions_delay_buffer"): + effective_hi = int(round(p * delay_hi)) + effective_lo = min(delay_lo, effective_hi) + delays = torch.randint(effective_lo, effective_hi + 1, (N,), device=device, dtype=torch.int) + actuator.positions_delay_buffer.set_time_lag(delays, env_ids) + actuator.velocities_delay_buffer.set_time_lag(delays, env_ids) + actuator.efforts_delay_buffer.set_time_lag(delays, env_ids) + + +class randomize_arm_from_sysid_fixed(randomize_arm_from_sysid): + """Same as randomize_arm_from_sysid but always applies scale_range (no curriculum).""" + + def __init__(self, cfg: EventTermCfg, env: ManagerBasedEnv): + super().__init__(cfg, env) + self.scale_progress = 1.0 + + +class randomize_gripper_from_sysid(ManagerTermBase): + """Randomize gripper dynamics around sysid nominal values. + + Each parameter is a nominal scalar. + ``scale_range = (lo, hi)`` scales it: ``nominal * uniform(lo, hi)`` per env. + + When used with ADR, ``scale_progress`` (0→1): + - Armature/friction: interpolate from 0 to sysid × U(scale_range). + - Stiffness/damping: interpolate from ``initial_stiffness``/``initial_damping`` + (sim defaults) to sysid × U(scale_range). + """ + + def __init__(self, cfg: EventTermCfg, env: ManagerBasedEnv): + super().__init__(cfg, env) + self.asset_cfg: SceneEntityCfg = cfg.params["asset_cfg"] + self.robot: Articulation = env.scene[self.asset_cfg.name] + self.gripper_joint_ids = self.robot.find_joints(cfg.params["joint_names"])[0] + self.actuator_name: str = cfg.params["actuator_name"] + # ADR progress: 0 = initial (defaults), 1 = full sysid randomization + self.scale_progress: float = 0.0 + + def __call__( + self, + env: ManagerBasedEnv, + env_ids: torch.Tensor, + asset_cfg: SceneEntityCfg, + joint_names: list[str], + actuator_name: str, + stiffness: float, + damping: float, + armature: float, + friction: float, + scale_range: tuple[float, float] = (0.8, 1.2), + initial_stiffness: float | None = None, + initial_damping: float | None = None, + ): + if env_ids is None: + env_ids = torch.arange(env.scene.num_envs, device=self.robot.device) + N = len(env_ids) + lo, hi = scale_range + device = self.robot.device + p = self.scale_progress + + def _scale(nominal): + return nominal * (lo + torch.rand(N, 1, device=device) * (hi - lo)) + + # Stiffness/damping: interpolate from initial defaults to sysid × U(scale_range) + target_stiff = _scale(stiffness) + target_damp = _scale(damping) + if initial_stiffness is not None and initial_damping is not None: + stiff_vals = initial_stiffness + p * (target_stiff - initial_stiffness) + damp_vals = initial_damping + p * (target_damp - initial_damping) + else: + stiff_vals = target_stiff + damp_vals = target_damp + # Armature and friction: scaled by ADR progress (0 → sysid) + arm_vals = _scale(armature) * p + fric_vals = _scale(friction) * p + + gripper_actuator = self.robot.actuators[self.actuator_name] + gripper_actuator.stiffness[env_ids] = stiff_vals + gripper_actuator.damping[env_ids] = damp_vals + self.robot.write_joint_stiffness_to_sim(stiff_vals, joint_ids=self.gripper_joint_ids, env_ids=env_ids) + self.robot.write_joint_damping_to_sim(damp_vals, joint_ids=self.gripper_joint_ids, env_ids=env_ids) + self.robot.write_joint_armature_to_sim(arm_vals, joint_ids=self.gripper_joint_ids, env_ids=env_ids) + self.robot.write_joint_friction_coefficient_to_sim(fric_vals, joint_ids=self.gripper_joint_ids, env_ids=env_ids) + + +class randomize_rel_cartesian_osc_gains(ManagerTermBase): + """Randomize RelCartesianOSCAction Kp/Kd gains. + + XYZ and RPY components are sampled independently (one scalar each). + ``scale_range = (lo, hi)`` scales the target Kp: ``target_kp * uniform(lo, hi)``. + + When used with ADR, ``scale_progress`` (0→1) interpolates from the action + config's default Kp/damping_ratio (initial) to ``terminal_kp``/ + ``terminal_damping_ratio``, with U(scale_range) randomization applied + to the terminal values. If no terminal params are given, randomizes + around the action config defaults directly. + """ + + def __init__(self, cfg: EventTermCfg, env: ManagerBasedEnv): + super().__init__(cfg, env) + self._action_name: str = cfg.params["action_name"] + self._action_term = None + # ADR progress: 0 = action defaults (initial), 1 = terminal gains + self.scale_progress: float = cfg.params.get("initial_scale_progress", 0.0) + + def _resolve_action_term(self): + if self._action_term is not None: + return + from .actions.task_space_actions import RelCartesianOSCAction + + action_term = self._env.action_manager._terms.get(self._action_name) + if action_term is None or not isinstance(action_term, RelCartesianOSCAction): + raise ValueError(f"Action term '{self._action_name}' is not a RelCartesianOSCAction.") + self._action_term = action_term + + def __call__( + self, + env: ManagerBasedEnv, + env_ids, + action_name: str, + scale_range: tuple[float, float] = (0.8, 1.2), + terminal_kp: tuple[float, ...] | None = None, + terminal_damping_ratio: tuple[float, ...] | None = None, + initial_scale_progress: float = 0.0, + ) -> None: + self._resolve_action_term() + + if env_ids is None: + env_ids = torch.arange(env.scene.num_envs, device=env.device) + + lo, hi = scale_range + n = len(env_ids) + p = self.scale_progress + + s_xyz = lo + torch.rand(n, 1, device=env.device) * (hi - lo) + s_rpy = lo + torch.rand(n, 1, device=env.device) * (hi - lo) + s_dr_xyz = lo + torch.rand(n, 1, device=env.device) * (hi - lo) + s_dr_rpy = lo + torch.rand(n, 1, device=env.device) * (hi - lo) + + kp_default = self._action_term._kp_default # (6,) + dr_default = self._action_term._damping_ratio_default # (6,) + + if terminal_kp is not None and terminal_damping_ratio is not None: + # Terminal Kp with randomization + kp_term = torch.tensor(terminal_kp, device=env.device, dtype=torch.float32) + target_kp = kp_term.unsqueeze(0).repeat(n, 1) + target_kp[:, :3] *= s_xyz + target_kp[:, 3:] *= s_rpy + + dr_term = torch.tensor(terminal_damping_ratio, device=env.device, dtype=torch.float32) + target_dr = dr_term.unsqueeze(0).repeat(n, 1) + target_dr[:, :3] *= s_dr_xyz + target_dr[:, 3:] *= s_dr_rpy + + # Interpolate from action defaults (initial) to terminal + init_kp = kp_default.unsqueeze(0) + init_dr = dr_default.unsqueeze(0) + new_kp = init_kp + p * (target_kp - init_kp) + new_dr = init_dr + p * (target_dr - init_dr) + else: + # No terminal specified — randomize around action defaults + new_kp = kp_default.unsqueeze(0).repeat(n, 1) + new_kp[:, :3] *= s_xyz + new_kp[:, 3:] *= s_rpy + new_dr = dr_default.unsqueeze(0).repeat(n, 1) + new_dr[:, :3] *= s_dr_xyz + new_dr[:, 3:] *= s_dr_rpy + + self._action_term._kp[env_ids] = new_kp + self._action_term._kd[env_ids] = 2.0 * torch.sqrt(new_kp) * new_dr + + +class randomize_rel_cartesian_osc_gains_fixed(randomize_rel_cartesian_osc_gains): + """Same as randomize_rel_cartesian_osc_gains but always applies scale_range (no curriculum).""" + + def __init__(self, cfg: EventTermCfg, env: ManagerBasedEnv): + super().__init__(cfg, env) + self.scale_progress = 1.0 + + +class adr_sysid_curriculum(ManagerTermBase): + """Automatic Domain Randomization curriculum for sysid event terms. + + Monitors the mean success rate from ``MultiResetManager``'s ``SuccessMonitor`` + and linearly ramps the ``scale_progress`` attribute of the target event terms + from 0 (no friction/armature) to 1 (full sysid randomization). + + Updates are gated by ``update_every_n_steps`` (env steps via ``common_step_counter``) + to ensure the update rate is independent of the number of environments. + + When success_rate > ``success_threshold_up``, ``scale_progress`` increases by ``delta``. + When success_rate < ``success_threshold_down``, ``scale_progress`` decreases by ``delta``. + + If ``warmup_success_threshold`` is set, the bang-bang controller is suppressed + until mean success rate reaches this threshold (latching: once warmed up, stays + warmed up even if success later dips). + """ + + def __init__(self, cfg, env: ManagerBasedEnv): + super().__init__(cfg, env) + self._event_term_names: list[str] = cfg.params["event_term_names"] + self._reset_event_name: str = cfg.params["reset_event_name"] + self._initial_scale_progress: float = cfg.params.get("initial_scale_progress", 0.0) + self._warmup_threshold: float | None = cfg.params.get("warmup_success_threshold") + self._warmed_up: bool = self._warmup_threshold is None + # Cache references to the event term instances (populated lazily) + self._event_terms: list = [] + self._reset_term: object | None = None + self._resolved = False + # Step-gated update tracking + self._last_update_step: int = -1 + self._last_state: dict[str, float] = { + "scale_progress": self._initial_scale_progress, + "mean_success_rate": 0.0, + } + + def _resolve_terms(self): + """Lazily resolve event term references (event manager may not be ready at __init__).""" + if self._resolved: + return + self._resolved = True + em = self._env.event_manager + self._event_terms = [] + for name in self._event_term_names: + term_cfg = em.get_term_cfg(name) + self._event_terms.append(term_cfg.func) + reset_cfg = em.get_term_cfg(self._reset_event_name) + self._reset_term = reset_cfg.func + if self._initial_scale_progress > 0.0: + for term in self._event_terms: + term.scale_progress = max(term.scale_progress, self._initial_scale_progress) + + def __call__( + self, + env: ManagerBasedEnv, + env_ids, + event_term_names: list[str], + reset_event_name: str, + success_threshold_up: float = 0.7, + success_threshold_down: float = 0.3, + delta: float = 0.01, + update_every_n_steps: int = 160, + initial_scale_progress: float = 0.0, + warmup_success_threshold: float | None = None, + ) -> dict[str, float]: + self._resolve_terms() + + # Only update once every N env steps (agnostic to num_envs) + current_step = env.common_step_counter + if (current_step - self._last_update_step) < update_every_n_steps: + return self._last_state + self._last_update_step = current_step + + # Get mean success rate across all tasks + if not hasattr(self._reset_term, "success_monitor"): + self._last_state = {"scale_progress": self._event_terms[0].scale_progress if self._event_terms else 0.0} + return self._last_state + + success_rates = self._reset_term.success_monitor.get_success_rate() + mean_success = success_rates.mean().item() + + # Warmup gate: hold scale_progress until success exceeds threshold + if not self._warmed_up: + if mean_success >= self._warmup_threshold: + self._warmed_up = True + else: + self._last_state = { + "scale_progress": self._event_terms[0].scale_progress if self._event_terms else 0.0, + "mean_success_rate": mean_success, + } + return self._last_state + + # Update scale_progress based on thresholds + current_progress = self._event_terms[0].scale_progress if self._event_terms else 0.0 + if mean_success > success_threshold_up: + current_progress = min(1.0, current_progress + delta) + elif mean_success < success_threshold_down: + current_progress = max(0.0, current_progress - delta) + + # Apply to all target event terms + for term in self._event_terms: + term.scale_progress = current_progress + + self._last_state = { + "scale_progress": current_progress, + "mean_success_rate": mean_success, + } + return self._last_state + + +class action_scale_curriculum(ManagerTermBase): + """Curriculum that gradually tightens action scales on the OSC action term. + + Linearly interpolates the per-axis ``_scale`` tensor from ``initial_scales`` + to ``target_scales`` as progress goes from 0 to 1. This limits the maximum + per-step EE motion without saturating the PD controller (unlike pose-error + clipping), preserving gradient signal for RL. + + Uses the same success-rate monitoring as ``adr_sysid_curriculum``: progress + increases when success_rate > ``success_threshold_up`` and decreases when + < ``success_threshold_down``. + """ + + def __init__(self, cfg, env: ManagerBasedEnv): + super().__init__(cfg, env) + self._action_name: str = cfg.params["action_name"] + self._reset_event_name: str = cfg.params["reset_event_name"] + self._action_term = None + self._reset_term = None + self._resolved = False + self._last_update_step: int = -1 + self._progress: float = cfg.params.get("initial_progress", 0.0) + self._last_state: dict[str, float] = {"scale_progress": self._progress, "mean_success_rate": 0.0} + + def _resolve(self): + if self._resolved: + return + self._resolved = True + from .actions.task_space_actions import RelCartesianOSCAction + + action_term = self._env.action_manager._terms.get(self._action_name) + if action_term is None or not isinstance(action_term, RelCartesianOSCAction): + raise ValueError(f"Action term '{self._action_name}' is not a RelCartesianOSCAction.") + self._action_term = action_term + + em = self._env.event_manager + reset_cfg = em.get_term_cfg(self._reset_event_name) + self._reset_term = reset_cfg.func + + def __call__( + self, + env: ManagerBasedEnv, + env_ids, + action_name: str, + reset_event_name: str, + target_scales: list[float], + initial_scales: list[float], + success_threshold_up: float = 0.7, + success_threshold_down: float = 0.3, + delta: float = 0.005, + update_every_n_steps: int = 200, + initial_progress: float = 0.0, + ) -> dict[str, float]: + self._resolve() + + current_step = env.common_step_counter + if (current_step - self._last_update_step) < update_every_n_steps: + return self._last_state + self._last_update_step = current_step + + if not hasattr(self._reset_term, "success_monitor"): + self._last_state = {"scale_progress": self._progress, "mean_success_rate": 0.0} + return self._last_state + + success_rates = self._reset_term.success_monitor.get_success_rate() + mean_success = success_rates.mean().item() + + if mean_success > success_threshold_up: + self._progress = min(1.0, self._progress + delta) + elif mean_success < success_threshold_down: + self._progress = max(0.0, self._progress - delta) + + initial = torch.tensor(initial_scales, device=env.device, dtype=torch.float32) + target = torch.tensor(target_scales, device=env.device, dtype=torch.float32) + effective = initial + self._progress * (target - initial) + + self._action_term._scale = effective + + self._last_state = { + "scale_progress": self._progress, + "mean_success_rate": mean_success, + } + return self._last_state + + +class obs_noise_curriculum(ManagerTermBase): + """Curriculum that gradually increases uniform noise on observation terms. + + Monitors success rate and linearly ramps the half-range on the specified + observation terms' ``AdditiveUniformNoiseCfg`` from ``initial_half_range`` + to ``target_half_range`` as progress goes from 0 to 1. At full progress + the noise is U(-target_half_range, +target_half_range). + """ + + def __init__(self, cfg, env: ManagerBasedEnv): + super().__init__(cfg, env) + self._obs_group: str = cfg.params["obs_group"] + self._obs_term_names: list[str] = cfg.params["obs_term_names"] + self._reset_event_name: str = cfg.params["reset_event_name"] + self._reset_term = None + self._obs_term_cfgs: list = [] + self._resolved = False + self._last_update_step: int = -1 + self._progress: float = 0.0 + self._last_state: dict[str, float] = {"scale_progress": 0.0, "mean_success_rate": 0.0} + + def _resolve(self): + if self._resolved: + return + self._resolved = True + om = self._env.observation_manager + term_names = om._group_obs_term_names[self._obs_group] + term_cfgs = om._group_obs_term_cfgs[self._obs_group] + name_to_cfg = dict(zip(term_names, term_cfgs)) + for name in self._obs_term_names: + if name not in name_to_cfg: + raise ValueError(f"Obs term '{name}' not found in group '{self._obs_group}'. Available: {term_names}") + cfg = name_to_cfg[name] + if cfg.noise is None: + raise ValueError( + f"Obs term '{name}' has no noise config. Set noise=AdditiveUniformNoiseCfg(n_min=0.0, n_max=0.0)." + ) + self._obs_term_cfgs.append(cfg) + + em = self._env.event_manager + reset_cfg = em.get_term_cfg(self._reset_event_name) + self._reset_term = reset_cfg.func + + def __call__( + self, + env: ManagerBasedEnv, + env_ids, + obs_group: str, + obs_term_names: list[str], + reset_event_name: str, + target_half_range: float, + initial_half_range: float = 0.0, + success_threshold_up: float = 0.7, + success_threshold_down: float = 0.3, + delta: float = 0.005, + update_every_n_steps: int = 200, + ) -> dict[str, float]: + self._resolve() + + current_step = env.common_step_counter + if (current_step - self._last_update_step) < update_every_n_steps: + return self._last_state + self._last_update_step = current_step + + if not hasattr(self._reset_term, "success_monitor"): + self._last_state = {"scale_progress": self._progress, "mean_success_rate": 0.0} + return self._last_state + + success_rates = self._reset_term.success_monitor.get_success_rate() + mean_success = success_rates.mean().item() + + if mean_success > success_threshold_up: + self._progress = min(1.0, self._progress + delta) + elif mean_success < success_threshold_down: + self._progress = max(0.0, self._progress - delta) + + effective_hr = initial_half_range + self._progress * (target_half_range - initial_half_range) + for term_cfg in self._obs_term_cfgs: + term_cfg.noise.n_min = -effective_hr + term_cfg.noise.n_max = effective_hr + + self._last_state = { + "scale_progress": self._progress, + "mean_success_rate": mean_success, + } + return self._last_state + + +class randomize_visual_appearance_multiple_meshes(ManagerTermBase): + """Randomize the visual appearance (texture or color) of multiple mesh bodies using Replicator API. + + This unified function can randomize either textures or solid colors on mesh bodies. + Use ``texture_prob`` to control the probability of applying textures vs solid colors: + - ``texture_prob=1.0``: Always use textures (default) + - ``texture_prob=0.0``: Always use solid colors + - ``0 < texture_prob < 1``: Randomly choose between texture and color each reset + + Texture paths can be provided via: + 1. ``texture_paths`` parameter (list of full paths) + 2. ``texture_config_path`` parameter (path to a YAML file) + + Colors can be specified as: + 1. A dict with ``r``, ``g``, ``b`` keys mapping to (low, high) ranges + 2. A list of RGB tuples to choose from + + Parameters: + - ``texture_prob``: Probability of using texture vs color (default 1.0 = always texture) + - ``colors``: Color specification for solid color mode + - ``diffuse_tint_range``: RGB tint multiplier for texture mode, e.g. ((0.8, 0.8, 0.8), (1.0, 1.0, 1.0)) + + .. note:: + Requires :attr:`isaaclab.scene.InteractiveSceneCfg.replicate_physics` to be False. + """ + + def __init__(self, cfg: EventTermCfg, env: ManagerBasedEnv): + """Initialize the randomization term.""" + super().__init__(cfg, env) + + from isaacsim.core.utils.extensions import enable_extension + + enable_extension("omni.replicator.core") + import omni.replicator.core as rep + + # read parameters from the configuration + asset_cfg: SceneEntityCfg = cfg.params.get("asset_cfg") + texture_paths = cfg.params.get("texture_paths") + texture_config_path = cfg.params.get("texture_config_path") + event_name = cfg.params.get("event_name") + mesh_names: list[str] = cfg.params.get("mesh_names", []) + + # Core parameters + self.texture_prob = cfg.params.get("texture_prob", 1.0) # 1.0 = always texture, 0.0 = always color + self.diffuse_tint_range = cfg.params.get("diffuse_tint_range") # ((r,g,b), (r,g,b)) + self.colors = cfg.params.get("colors", {"r": (0.0, 1.0), "g": (0.0, 1.0), "b": (0.0, 1.0)}) + self.color_event_name = f"{event_name}_color" + + # Material property ranges (DEXTRAH-aligned defaults) + self._texture_scale_range = cfg.params.get("texture_scale_range", (0.7, 5.0)) + self._roughness_range = cfg.params.get("roughness_range", (0.0, 1.0)) + self._metallic_range = cfg.params.get("metallic_range", (0.0, 1.0)) + self._specular_range = cfg.params.get("specular_range", (0.0, 1.0)) + + # Load texture paths from YAML config if provided + if texture_config_path is not None: + texture_paths = utils.load_asset_paths_from_config( + texture_config_path, cache_subdir="textures", skip_validation=False + ) + logging.info(f"[{event_name}] Loaded {len(texture_paths)} texture paths.") + if self.texture_prob > 0 and (texture_paths is None or len(texture_paths) == 0): + raise RuntimeError( + f"[{event_name}] texture_prob={self.texture_prob} but no texture paths loaded. " + f"Check texture_config_path={texture_config_path}" + ) + if texture_paths: + non_local = [p for p in texture_paths if not p.startswith("/")] + if non_local: + raise RuntimeError( + f"[{event_name}] {len(non_local)} texture paths are non-local (Nucleus) " + "and will silently fail if Nucleus is unreachable. " + f"First 3: {non_local[:3]}. " + "Use only local/cloud-cached textures." + ) + missing = [p for p in texture_paths if not os.path.exists(p)] + if missing: + raise RuntimeError( + f"[{event_name}] {len(missing)}/{len(texture_paths)} texture files missing on disk. " + f"First 3: {missing[:3]}" + ) + + # check to make sure replicate_physics is set to False + if env.cfg.scene.replicate_physics: + raise RuntimeError( + "Unable to randomize visual appearance with scene replication enabled." + " Please set 'replicate_physics' to False in 'InteractiveSceneCfg'." + ) + + # obtain the asset entity + asset = env.scene[asset_cfg.name] + asset_prim_path = asset.cfg.prim_path + + # create the affected prim path pattern + if len(mesh_names) == 0: + pattern_with_visuals = f"{asset_prim_path}/.*/visuals" + matching_prims = sim_utils.find_matching_prim_paths(pattern_with_visuals) + if matching_prims: + prim_path_pattern = pattern_with_visuals + else: + prim_path_pattern = f"{asset_prim_path}/.*" + carb.log_info( + f"Pattern '{pattern_with_visuals}' found no prims. Falling back to '{prim_path_pattern}'." + ) + else: + mesh_prim_paths = [] + for mesh_name in mesh_names: + if not mesh_name.startswith("/"): + mesh_name = "/" + mesh_name + mesh_prim_paths.append(f"{asset_prim_path}{mesh_name}") + prim_path_pattern = "|".join(mesh_prim_paths) + + # Store texture paths and RNG + self.texture_paths = texture_paths + unique_seed = hash(event_name) % (2**31) + self.texture_rng = rep.rng.ReplicatorRNG(seed=unique_seed) + self.prim_path_pattern = prim_path_pattern + + # Get prims and create materials + stage = sim_utils.SimulationContext.instance().stage + prims_group = rep.functional.get.prims(path_pattern=prim_path_pattern, stage=stage) + num_prims = len(prims_group) + + if num_prims == 0: + raise RuntimeError( + f"[randomize_visual_appearance_multiple_meshes] No prims found matching: {prim_path_pattern}. " + "Check mesh_names and asset_cfg." + ) + + # Disable instanceable on prims + for prim in prims_group: + if prim.IsInstanceable(): + prim.SetInstanceable(False) + + # Create OmniPBR materials and bind them to the prims + self.material_prims = rep.functional.create_batch.material( + mdl="OmniPBR.mdl", bind_prims=prims_group, count=num_prims, project_uvw=True ) - else: # log_uniform - log_min_damp = torch.log(torch.tensor(damping_distribution_params[0], device=env.device)) - log_max_damp = torch.log(torch.tensor(damping_distribution_params[1], device=env.device)) + self._stage = stage + self._texture_verified = False + + # Cache shader prims for direct USD access (avoids Replicator pipeline race conditions) + from pxr import Sdf, UsdShade + + self._shader_prims = [] + for i, mat_prim in enumerate(self.material_prims): + mat_path = str(mat_prim.GetPath()) if hasattr(mat_prim, "GetPath") else str(mat_prim) + shader_prim = stage.GetPrimAtPath(Sdf.Path(f"{mat_path}/Shader")) + if not shader_prim.IsValid(): + raise RuntimeError(f"[{event_name}] Shader not found at {mat_path}/Shader after material creation.") + self._shader_prims.append(shader_prim) + + # Force direct USD material binding (Replicator bind_prims can silently fail) + material = UsdShade.Material(mat_prim) + target_prim = prims_group[i] + UsdShade.MaterialBindingAPI.Apply(target_prim) + UsdShade.MaterialBindingAPI(target_prim).Bind(material, UsdShade.Tokens.strongerThanDescendants) + + # Ensure material property inputs exist on each shader + _required_inputs = { + "texture_scale": Sdf.ValueTypeNames.Float2, + "reflection_roughness_constant": Sdf.ValueTypeNames.Float, + "metallic_constant": Sdf.ValueTypeNames.Float, + "specular_level": Sdf.ValueTypeNames.Float, + } + for shader_prim in self._shader_prims: + shader = UsdShade.Shader(shader_prim) + props = shader_prim.GetPropertyNames() + for attr_name, attr_type in _required_inputs.items(): + if f"inputs:{attr_name}" not in props: + shader.CreateInput(attr_name, attr_type) + + # Parse color config for direct USD color generation + if isinstance(self.colors, dict): + self._color_low = np.array([self.colors[key][0] for key in ["r", "g", "b"]]) + self._color_high = np.array([self.colors[key][1] for key in ["r", "g", "b"]]) + else: + self._color_list = list(self.colors) + self._color_low = None + self._color_high = None + + # Apply initial randomization so envs don't start with default appearance + self(env, torch.arange(env.num_envs, device=env.device), **cfg.params) + + def __call__( + self, + env: ManagerBasedEnv, + env_ids: torch.Tensor, + event_name: str, + asset_cfg: SceneEntityCfg, + texture_paths: list[str] | None = None, + texture_config_path: str | None = None, + mesh_names: list[str] = [], + texture_prob: float = 1.0, + colors: list[tuple[float, float, float]] | dict[str, tuple[float, float]] | None = None, + diffuse_tint_range: tuple[tuple[float, float, float], tuple[float, float, float]] | None = None, + texture_scale_range: tuple[float, float] | None = None, + roughness_range: tuple[float, float] | None = None, + metallic_range: tuple[float, float] | None = None, + specular_range: tuple[float, float] | None = None, + ): + if not self._shader_prims: + return + + from pxr import Sdf + + rng = self.texture_rng.generator + num_prims = len(self._shader_prims) + + # Per-prim texture vs color decision + use_texture_mask = rng.random(size=num_prims) < self.texture_prob + + # Pre-generate random material properties (shared by both modes) + rand_roughness = rng.uniform(self._roughness_range[0], self._roughness_range[1], size=num_prims) + rand_metallic = rng.uniform(self._metallic_range[0], self._metallic_range[1], size=num_prims) + rand_specular = rng.uniform(self._specular_range[0], self._specular_range[1], size=num_prims) + + # Pre-generate texture-mode data + random_textures = None + if self.texture_paths and use_texture_mask.any(): + random_textures = rng.choice(self.texture_paths, size=num_prims) + for tex_path in random_textures: + if tex_path.startswith("/") and not os.path.exists(tex_path): + raise RuntimeError( + f"[randomize_visual_appearance] Texture file not found: {tex_path}. " + "Local texture paths must exist on disk." + ) + + # Pre-generate color-mode data + random_colors = None + if not use_texture_mask.all(): + if self._color_low is not None: + random_colors = rng.uniform(self._color_low, self._color_high, size=(num_prims, 3)) + else: + indices = rng.integers(0, len(self._color_list), size=num_prims) + random_colors = np.array([self._color_list[i] for i in indices]) + + n_tex = int(use_texture_mask.sum()) + n_col = num_prims - n_tex + logging.debug(f"[{event_name}] {n_tex} TEXTURE / {n_col} COLOR -> {num_prims} prims") + + with Sdf.ChangeBlock(): + for i, shader_prim in enumerate(self._shader_prims): + # Material properties (both modes) + shader_prim.GetAttribute("inputs:reflection_roughness_constant").Set(float(rand_roughness[i])) + shader_prim.GetAttribute("inputs:metallic_constant").Set(float(rand_metallic[i])) + shader_prim.GetAttribute("inputs:specular_level").Set(float(rand_specular[i])) + + if use_texture_mask[i] and random_textures is not None: + shader_prim.GetAttribute("inputs:diffuse_texture").Set(Sdf.AssetPath(random_textures[i])) + s = float(rng.uniform(self._texture_scale_range[0], self._texture_scale_range[1])) + shader_prim.GetAttribute("inputs:texture_scale").Set(Gf.Vec2f(s, s)) + + if self.diffuse_tint_range is not None: + t = rng.uniform(self.diffuse_tint_range[0], self.diffuse_tint_range[1], size=3) + shader_prim.GetAttribute("inputs:diffuse_tint").Set( + Gf.Vec3f(float(t[0]), float(t[1]), float(t[2])) + ) + else: + shader_prim.GetAttribute("inputs:diffuse_texture").Set(Sdf.AssetPath("")) + if random_colors is not None: + shader_prim.GetAttribute("inputs:diffuse_color_constant").Set( + Gf.Vec3f(float(random_colors[i][0]), float(random_colors[i][1]), float(random_colors[i][2])) + ) - damp_xyz_multiplier = torch.exp( - torch.rand(len(env_ids), device=env.device) * (log_max_damp - log_min_damp) + log_min_damp + if not self._texture_verified and random_textures is not None and use_texture_mask.any(): + first_tex_idx = int(np.argmax(use_texture_mask)) + self._verify_texture_applied(random_textures[first_tex_idx], event_name) + self._texture_verified = True + + def _verify_texture_applied(self, expected_texture: str, event_name: str): + """One-time check that textures are actually being applied by reading back from USD.""" + shader_prim = self._shader_prims[0] + shader_path = str(shader_prim.GetPath()) + tex_attr = shader_prim.GetAttribute("inputs:diffuse_texture") + if not tex_attr or not tex_attr.IsValid(): + raise RuntimeError( + f"[{event_name}] Texture verification failed: 'inputs:diffuse_texture' attribute " + f"not found on {shader_path}." + ) + current_val = tex_attr.Get() + logging.debug( + f"[{event_name}] Texture verify: shader={shader_path}, value={current_val}, expected={expected_texture}" ) + if current_val is None or str(current_val) == "": + raise RuntimeError( + f"[{event_name}] Texture verification failed: diffuse_texture is empty after " + f"USD Set. Expected: {expected_texture}." + ) + + +class implicit_to_explicit_swap(ManagerTermBase): + """One-shot curriculum that swaps the arm actuator from ImplicitActuator to + an explicit actuator (e.g. DelayedDCMotor) once the ADR sysid curriculum + reaches ``scale_progress == 1.0``. + + After the swap, ``randomize_arm_from_sysid`` (which looks up + ``robot.actuators[actuator_name]`` each call) will automatically pick up + the new explicit actuator and start setting delay buffers. + + Set ``swap_at_init=True`` to trigger the swap on the first call regardless + of ``scale_progress`` (useful when resuming from a checkpoint where the + swap had already occurred). + """ - damp_rpy_multiplier = torch.exp( - torch.rand(len(env_ids), device=env.device) * (log_max_damp - log_min_damp) + log_min_damp + def __init__(self, cfg, env: ManagerBasedEnv): + super().__init__(cfg, env) + self._swapped = False + self._swap_at_init: bool = cfg.params.get("swap_at_init", False) + self._robot: Articulation = env.scene[cfg.params["asset_cfg"].name] + self._actuator_name: str = cfg.params["actuator_name"] + self._explicit_arm_cfg = cfg.params["explicit_arm_cfg"] + self._sysid_event_name: str = cfg.params["sysid_event_name"] + self._sysid_term = None + self._resolved = False + + def _resolve(self): + if self._resolved: + return + self._resolved = True + em = self._env.event_manager + term_cfg = em.get_term_cfg(self._sysid_event_name) + self._sysid_term = term_cfg.func + + def _do_swap(self, env: ManagerBasedEnv) -> dict[str, object]: + old_actuator = self._robot.actuators[self._actuator_name] + new_actuator = self._explicit_arm_cfg.class_type( + cfg=self._explicit_arm_cfg, + joint_names=old_actuator.joint_names, + joint_ids=old_actuator.joint_indices, + num_envs=self._robot.num_instances, + device=self._robot.device, + ) + self._robot.actuators[self._actuator_name] = new_actuator + joint_ids = old_actuator.joint_indices + self._robot.write_joint_stiffness_to_sim(0.0, joint_ids=joint_ids) + self._robot.write_joint_damping_to_sim(0.0, joint_ids=joint_ids) + self._robot.write_joint_effort_limit_to_sim(1.0e9, joint_ids=joint_ids) + self._robot.write_joint_velocity_limit_to_sim(new_actuator.velocity_limit, joint_ids=joint_ids) + self._robot._data.default_joint_stiffness[:, joint_ids] = new_actuator.stiffness + self._robot._data.default_joint_damping[:, joint_ids] = new_actuator.damping + + self._swapped = True + carb.log_info( + f"[implicit_to_explicit_swap] Swapped '{self._actuator_name}' from " + f"{type(old_actuator).__name__} to {type(new_actuator).__name__} " + f"at step {env.common_step_counter}" ) + return {"actuator_swapped": True, "swap_step": env.common_step_counter} - # Apply randomization to motion stiffness gains - # Original gains from config - original_stiffness = torch.tensor(controller.cfg.motion_stiffness_task, device=env.device) - - # Create new stiffness values for each environment - new_stiffness = torch.zeros((len(env_ids), 6), device=env.device) - new_stiffness[:, 0:3] = original_stiffness[0:3] * stiff_xyz_multiplier.unsqueeze(-1) # xyz - new_stiffness[:, 3:6] = original_stiffness[3:6] * stiff_rpy_multiplier.unsqueeze(-1) # rpy - - # Update the controller's motion stiffness gains - controller._motion_p_gains_task[env_ids] = torch.diag_embed(new_stiffness) - # Apply selection matrix to zero out non-controlled axes - controller._motion_p_gains_task[env_ids] = ( - controller._selection_matrix_motion_task[env_ids] @ controller._motion_p_gains_task[env_ids] - ) - - # Apply randomization to motion damping gains - # Original damping ratios from config - original_damping = torch.tensor(controller.cfg.motion_damping_ratio_task, device=env.device) - - # Create new damping values for each environment - new_damping_ratios = torch.zeros((len(env_ids), 6), device=env.device) - new_damping_ratios[:, 0:3] = original_damping[0:3] * damp_xyz_multiplier.unsqueeze(-1) # xyz - new_damping_ratios[:, 3:6] = original_damping[3:6] * damp_rpy_multiplier.unsqueeze(-1) # rpy - - # Update the controller's motion damping gains - # Damping = 2 * sqrt(stiffness) * damping_ratio - controller._motion_d_gains_task[env_ids] = torch.diag_embed( - 2 * torch.diagonal(controller._motion_p_gains_task[env_ids], dim1=-2, dim2=-1).sqrt() * new_damping_ratios - ) + def __call__( + self, + env: ManagerBasedEnv, + env_ids, + asset_cfg: SceneEntityCfg, + actuator_name: str, + explicit_arm_cfg, + sysid_event_name: str, + swap_at_init: bool = False, + ) -> dict[str, object]: + if self._swapped: + return {"actuator_swapped": True} + + if self._swap_at_init: + return self._do_swap(env) + + self._resolve() + if self._sysid_term.scale_progress < 1.0: + return {"actuator_swapped": False, "scale_progress": self._sysid_term.scale_progress} + + return self._do_swap(env) diff --git a/source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/reset_states/mdp/observations.py b/source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/omnireset/mdp/observations.py similarity index 66% rename from source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/reset_states/mdp/observations.py rename to source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/omnireset/mdp/observations.py index 457f6300..43a64881 100644 --- a/source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/reset_states/mdp/observations.py +++ b/source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/omnireset/mdp/observations.py @@ -4,14 +4,16 @@ # SPDX-License-Identifier: BSD-3-Clause import torch +import torch.nn.functional as F import isaaclab.utils.math as math_utils from isaaclab.assets import Articulation, RigidObject from isaaclab.envs import ManagerBasedEnv, ManagerBasedRLEnv from isaaclab.managers import ManagerTermBase, ObservationTermCfg, SceneEntityCfg +from isaaclab.sensors import Camera, RayCasterCamera, TiledCamera -from uwlab_tasks.manager_based.manipulation.reset_states.assembly_keypoints import Offset -from uwlab_tasks.manager_based.manipulation.reset_states.mdp import utils +from uwlab_tasks.manager_based.manipulation.omnireset.assembly_keypoints import Offset +from uwlab_tasks.manager_based.manipulation.omnireset.mdp import utils def target_asset_pose_in_root_asset_frame( @@ -199,3 +201,101 @@ def time_left(env) -> torch.Tensor: else: life_left = torch.zeros(env.num_envs, device=env.device, dtype=torch.float) return life_left.view(-1, 1) + + +def process_image( + env: ManagerBasedEnv, + sensor_cfg: SceneEntityCfg = SceneEntityCfg("tiled_camera"), + data_type: str = "rgb", + process_image: bool = True, + output_size: tuple = (224, 224), +) -> torch.Tensor: + """Images of a specific datatype from the camera sensor. + + If the flag :attr:`normalize` is True, post-processing of the images are performed based on their + data-types: + + - "rgb": Scales the image to (0, 1) and subtracts with the mean of the current image batch. + - "depth" or "distance_to_camera" or "distance_to_plane": Replaces infinity values with zero. + + Args: + env: The environment the cameras are placed within. + sensor_cfg: The desired sensor to read from. Defaults to SceneEntityCfg("tiled_camera"). + data_type: The data type to pull from the desired camera. Defaults to "rgb". + process_image: Whether to normalize the image. Defaults to True. + + Returns: + The images produced at the last time-step + """ + assert data_type == "rgb", "Only RGB images are supported for now." + # extract the used quantities (to enable type-hinting) + sensor: TiledCamera | Camera | RayCasterCamera = env.scene.sensors[sensor_cfg.name] + + # obtain the input image + images = sensor.data.output[data_type].clone() + + start_dims = torch.arange(len(images.shape) - 3).tolist() + s = start_dims[-1] if len(start_dims) > 0 else -1 + current_size = (images.shape[s + 1], images.shape[s + 2]) + + # Convert to float32 and normalize in-place + images = images.to(dtype=torch.float32) # Avoid redundant .float() and .type() calls + images.div_(255.0).clamp_(0.0, 1.0) # Normalize and clip in-place + images = images.permute(start_dims + [s + 3, s + 1, s + 2]) + + if current_size != output_size: + # Perform resize operation + images = F.interpolate(images, size=output_size, mode="bilinear", antialias=True) + + # rgb/depth image normalization + if not process_image: + # Reverse the permutation + reverse_dims = torch.argsort(torch.tensor(start_dims + [s + 3, s + 1, s + 2])) + images = images.permute(reverse_dims.tolist()) + # Convert back to uint8 in-place + images.mul_(255.0).clamp_(0, 255) # Scale and clamp in-place + images = images.to(dtype=torch.uint8) # Type conversion (not in-place) + + # import matplotlib.pyplot as plt + # img_0 = images[0].permute([1, 2, 0]) + # plt.imshow(img_0.cpu().numpy()) + # plt.savefig('saved_image_0.png', dpi=300, bbox_inches='tight') + # img_1 = images[1].permute([1, 2, 0]) + # plt.imshow(img_1.cpu().numpy()) + # plt.savefig('saved_image_1.png', dpi=300, bbox_inches='tight') + # img_2 = images[2].permute([1, 2, 0]) + # plt.imshow(img_2.cpu().numpy()) + # plt.savefig('saved_image_2.png', dpi=300, bbox_inches='tight') + # img_3 = images[3].permute([1, 2, 0]) + # plt.imshow(img_3.cpu().numpy()) + # plt.savefig('saved_image_3.png', dpi=300, bbox_inches='tight') + + return images + + +def binary_force_contact( + env: ManagerBasedEnv, + asset_cfg: SceneEntityCfg, + body_name: str = "wrist_3_link", + force_threshold: float = 25.0, +) -> torch.Tensor: + """Binary contact detection from force norm at a body. + + Reads body_incoming_joint_wrench_b, computes ||F|| from the force + components (first 3), and returns 1.0 if above threshold, else 0.0. + + Args: + env: The environment. + asset_cfg: Scene entity config for the robot articulation. + body_name: Name of the body to read wrench from. + force_threshold: Force norm threshold (N) for contact detection. + + Returns: + Tensor of shape (num_envs, 1): 1.0 if contact, 0.0 otherwise. + """ + robot: Articulation = env.scene[asset_cfg.name] + body_idx = robot.body_names.index(body_name) + wrench_b = robot.data.body_incoming_joint_wrench_b[:, body_idx, :] # (N, 6) + force_norm = torch.norm(wrench_b[:, :3], dim=-1) # (N,) + contact = (force_norm > force_threshold).float() + return contact.unsqueeze(-1) # (N, 1) diff --git a/source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/reset_states/mdp/recorders/__init__.py b/source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/omnireset/mdp/recorders/__init__.py similarity index 100% rename from source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/reset_states/mdp/recorders/__init__.py rename to source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/omnireset/mdp/recorders/__init__.py diff --git a/source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/reset_states/mdp/recorders/recorders.py b/source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/omnireset/mdp/recorders/recorders.py similarity index 90% rename from source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/reset_states/mdp/recorders/recorders.py rename to source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/omnireset/mdp/recorders/recorders.py index 62584ecc..0da795bb 100644 --- a/source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/reset_states/mdp/recorders/recorders.py +++ b/source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/omnireset/mdp/recorders/recorders.py @@ -76,3 +76,11 @@ def record_pre_reset(self, env_ids): } return "grasp_relative_pose", grasp_data + + +class PreStepDataCollectionObservationsRecorder(RecorderTerm): + """Recorder term that records data collection observations from the data_collection observation group.""" + + def record_pre_step(self): + """Record data collection observations from the data_collection observation group.""" + return "obs", self._env.obs_buf["data_collection"] diff --git a/source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/reset_states/mdp/recorders/recorders_cfg.py b/source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/omnireset/mdp/recorders/recorders_cfg.py similarity index 71% rename from source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/reset_states/mdp/recorders/recorders_cfg.py rename to source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/omnireset/mdp/recorders/recorders_cfg.py index 80569e58..423201b5 100644 --- a/source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/reset_states/mdp/recorders/recorders_cfg.py +++ b/source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/omnireset/mdp/recorders/recorders_cfg.py @@ -5,6 +5,11 @@ from dataclasses import MISSING +from isaaclab.envs.mdp.recorders.recorders_cfg import ( + InitialStateRecorderCfg, + PostStepStatesRecorderCfg, + PreStepActionsRecorderCfg, +) from isaaclab.managers.recorder_manager import RecorderManagerBaseCfg, RecorderTerm, RecorderTermCfg from isaaclab.utils import configclass @@ -63,3 +68,21 @@ def __init__(self, *args, **kwargs): object_name=object_name, gripper_body_name=gripper_body_name, ) + + +# Observation recorder. +@configclass +class PreStepDataCollectionObservationsRecorderCfg(RecorderTermCfg): + """Configuration for the data collection observations recorder term.""" + + class_type: type[RecorderTerm] = recorders.PreStepDataCollectionObservationsRecorder + + +@configclass +class ActionStateRecorderManagerCfg(RecorderManagerBaseCfg): + """Recorder manager that records raw actions and observations for data collection.""" + + record_initial_state = InitialStateRecorderCfg() + record_post_step_states = PostStepStatesRecorderCfg() + record_pre_step_actions = PreStepActionsRecorderCfg() + record_pre_step_data_collection_observations = PreStepDataCollectionObservationsRecorderCfg() diff --git a/source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/reset_states/mdp/rewards.py b/source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/omnireset/mdp/rewards.py similarity index 100% rename from source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/reset_states/mdp/rewards.py rename to source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/omnireset/mdp/rewards.py diff --git a/source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/reset_states/mdp/rigid_object_hasher.py b/source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/omnireset/mdp/rigid_object_hasher.py similarity index 100% rename from source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/reset_states/mdp/rigid_object_hasher.py rename to source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/omnireset/mdp/rigid_object_hasher.py diff --git a/source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/reset_states/mdp/success_monitor.py b/source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/omnireset/mdp/success_monitor.py similarity index 100% rename from source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/reset_states/mdp/success_monitor.py rename to source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/omnireset/mdp/success_monitor.py diff --git a/source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/reset_states/mdp/success_monitor_cfg.py b/source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/omnireset/mdp/success_monitor_cfg.py similarity index 100% rename from source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/reset_states/mdp/success_monitor_cfg.py rename to source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/omnireset/mdp/success_monitor_cfg.py diff --git a/source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/reset_states/mdp/terminations.py b/source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/omnireset/mdp/terminations.py similarity index 80% rename from source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/reset_states/mdp/terminations.py rename to source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/omnireset/mdp/terminations.py index 1ee8ce22..7daf6855 100644 --- a/source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/reset_states/mdp/terminations.py +++ b/source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/omnireset/mdp/terminations.py @@ -10,12 +10,13 @@ import isaacsim.core.utils.bounds as bounds_utils from isaaclab.assets import Articulation, RigidObject, RigidObjectCollection -from isaaclab.envs import ManagerBasedEnv +from isaaclab.envs import ManagerBasedEnv, ManagerBasedRLEnv from isaaclab.managers import ManagerTermBase, SceneEntityCfg, TerminationTermCfg from isaaclab.utils import math as math_utils -from uwlab_tasks.manager_based.manipulation.reset_states.mdp import utils +from uwlab_tasks.manager_based.manipulation.omnireset.mdp import utils +from ..assembly_keypoints import Offset from .collision_analyzer_cfg import CollisionAnalyzerCfg @@ -251,6 +252,34 @@ def __init__(self, cfg: TerminationTermCfg, env: ManagerBasedEnv): self.assets_to_check = self.object_assets + [self.robot_asset] self.ee_body_idx = self.robot_asset.data.body_names.index(self.ee_body_name) + # Optional assembly alignment filter + self.assembly_success_prob = cfg.params.get("assembly_success_prob") + if self.assembly_success_prob is not None: + insertive_asset_cfg = cfg.params.get("insertive_asset_cfg") + receptive_asset_cfg = cfg.params.get("receptive_asset_cfg") + self.insertive_asset = env.scene[insertive_asset_cfg.name] + self.receptive_asset = env.scene[receptive_asset_cfg.name] + + insertive_meta = utils.read_metadata_from_usd_directory(self.insertive_asset.cfg.spawn.usd_path) + receptive_meta = utils.read_metadata_from_usd_directory(self.receptive_asset.cfg.spawn.usd_path) + self.insertive_asset_offset = Offset( + pos=tuple(insertive_meta.get("assembled_offset").get("pos")), + quat=tuple(insertive_meta.get("assembled_offset").get("quat")), + ) + self.receptive_asset_offset = Offset( + pos=tuple(receptive_meta.get("assembled_offset").get("pos")), + quat=tuple(receptive_meta.get("assembled_offset").get("quat")), + ) + assembly_threshold_scale = cfg.params.get("assembly_threshold_scale", 1.0) + self.assembly_pos_threshold: float = ( + receptive_meta.get("success_thresholds").get("position") * assembly_threshold_scale + ) + self.assembly_ori_threshold: float = ( + receptive_meta.get("success_thresholds").get("orientation") * assembly_threshold_scale + ) + self.require_assembly_success = torch.rand(env.num_envs, device=env.device) < self.assembly_success_prob + self._pending_reflip = torch.zeros(env.num_envs, device=env.device, dtype=torch.bool) + def reset(self, env_ids: torch.Tensor | None = None) -> None: super().reset(env_ids) @@ -259,12 +288,30 @@ def reset(self, env_ids: torch.Tensor | None = None) -> None: asset_pos = asset.data.body_link_pos_w[:, self.ee_body_idx].clone() else: asset_pos = asset.data.root_pos_w.clone() - if not hasattr(asset, "initial_pos"): + if not hasattr(asset, "initial_pos") or env_ids is None: asset.initial_pos = asset_pos else: asset.initial_pos[env_ids] = asset_pos[env_ids].clone() - self.stability_counter[env_ids] = 0 + if env_ids is None: + self.stability_counter.zero_() + else: + self.stability_counter[env_ids] = 0 + + if self.assembly_success_prob is not None: + if env_ids is None: + self.require_assembly_success = ( + torch.rand(self._env.num_envs, device=self._env.device) < self.assembly_success_prob + ) + self._pending_reflip.zero_() + else: + reflip_mask = self._pending_reflip[env_ids] + if reflip_mask.any(): + reflip_ids = env_ids[reflip_mask] + self.require_assembly_success[reflip_ids] = ( + torch.rand(reflip_ids.shape[0], device=self._env.device) < self.assembly_success_prob + ) + self._pending_reflip[env_ids] = False def __call__( self, @@ -277,6 +324,10 @@ def __call__( max_object_pos_deviation: float = 0.1, pos_z_threshold: float = -0.01, consecutive_stability_steps: int = 5, + insertive_asset_cfg: SceneEntityCfg | None = None, + receptive_asset_cfg: SceneEntityCfg | None = None, + assembly_success_prob: float | None = None, + assembly_threshold_scale: float = 1.0, ) -> torch.Tensor: # Check time out @@ -359,6 +410,18 @@ def __call__( & time_out ) + if self.assembly_success_prob is not None: + ins_pos_w, ins_quat_w = self.insertive_asset_offset.apply(self.insertive_asset) + rec_pos_w, rec_quat_w = self.receptive_asset_offset.apply(self.receptive_asset) + rel_pos, rel_quat = math_utils.subtract_frame_transforms(rec_pos_w, rec_quat_w, ins_pos_w, ins_quat_w) + e_x, e_y, _ = math_utils.euler_xyz_from_quat(rel_quat) + euler_xy_dist = math_utils.wrap_to_pi(e_x).abs() + math_utils.wrap_to_pi(e_y).abs() + xyz_dist = torch.norm(rel_pos, dim=1) + assembly_success = (xyz_dist < self.assembly_pos_threshold) & (euler_xy_dist < self.assembly_ori_threshold) + assembly_match = torch.where(self.require_assembly_success, assembly_success, ~assembly_success) + reset_success = reset_success & assembly_match + self._pending_reflip |= reset_success + return reset_success @@ -673,3 +736,86 @@ def __call__( self._visualize_bounding_boxes(env) return ~obb_overlap + + +def consecutive_success_state(env: ManagerBasedRLEnv, num_consecutive_successes: int = 10): + # Get the progress context to access assets and offsets + context_term = env.reward_manager.get_term_cfg("progress_context").func # type: ignore + continuous_success_counter = getattr(context_term, "continuous_success_counter") + + return continuous_success_counter >= num_consecutive_successes + + +def consecutive_success_state_with_min_length( + env: ManagerBasedRLEnv, num_consecutive_successes: int = 10, min_episode_length: int = 0 +): + """Like consecutive_success_state but rejects episodes shorter than min_episode_length. + + Episodes that start already assembled will reach num_consecutive_successes quickly, + but won't be marked as success until min_episode_length steps have passed. + Combined with a separate early termination, these episodes get terminated as failures. + """ + context_term = env.reward_manager.get_term_cfg("progress_context").func # type: ignore + continuous_success_counter = getattr(context_term, "continuous_success_counter") + success = continuous_success_counter >= num_consecutive_successes + if min_episode_length > 0: + success = success & (env.episode_length_buf >= min_episode_length) + return success + + +def early_success_termination(env: ManagerBasedRLEnv, num_consecutive_successes: int = 5, min_episode_length: int = 10): + """Terminates episodes that achieve success before min_episode_length steps. + + Paired with consecutive_success_state_with_min_length as the 'success' term: + that term gates success until min_episode_length, while this term terminates + the episode early (as a non-success failure) to avoid wasting sim time. + """ + context_term = env.reward_manager.get_term_cfg("progress_context").func # type: ignore + continuous_success_counter = getattr(context_term, "continuous_success_counter") + is_successful = continuous_success_counter >= num_consecutive_successes + is_too_short = env.episode_length_buf < min_episode_length + return is_successful & is_too_short + + +def corrupted_camera_detected( + env: ManagerBasedRLEnv, camera_names: list[str], std_threshold: float = 10.0 +) -> torch.Tensor: + """ + Detect corrupted camera images by checking if standard deviation is below threshold. + + Corrupted cameras typically show uniform gray/black images with very low variance. + This function checks all specified cameras and returns True for environments where + any camera shows corruption (std < threshold). + + Args: + env: The environment instance. + camera_names: List of camera sensor names to check (e.g., ["front_camera", "wrist_camera"]). + std_threshold: Standard deviation threshold below which image is considered corrupted. + Default 10.0 is conservative - normal images have std > 20. + + Returns: + Boolean tensor of shape (num_envs,) indicating which environments have corrupted cameras. + """ + num_envs = env.num_envs + device = env.device + + # Initialize as no corruption + is_corrupted = torch.zeros(num_envs, dtype=torch.bool, device=device) + + # Check each camera + for camera_name in camera_names: + # Get camera sensor from scene + camera = env.scene[camera_name] + + # Get RGB data: shape (num_envs, height, width, 3) + rgb_data = camera.data.output["rgb"] + + # Compute standard deviation across spatial and channel dimensions + # Reshape to (num_envs, -1) to compute std per environment + rgb_flat = rgb_data.reshape(num_envs, -1).float() + std_per_env = torch.std(rgb_flat, dim=1) + + # Mark as corrupted if std is below threshold + is_corrupted |= std_per_env < std_threshold + + return is_corrupted diff --git a/source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/reset_states/mdp/utils.py b/source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/omnireset/mdp/utils.py similarity index 59% rename from source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/reset_states/mdp/utils.py rename to source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/omnireset/mdp/utils.py index 83e784e3..86fe3f40 100644 --- a/source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/reset_states/mdp/utils.py +++ b/source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/omnireset/mdp/utils.py @@ -4,30 +4,34 @@ # SPDX-License-Identifier: BSD-3-Clause -import hashlib +import functools import io import logging import numpy as np import os import random +import shutil import tempfile import torch import trimesh import yaml from contextlib import contextmanager, redirect_stderr, redirect_stdout from functools import lru_cache +from pathlib import PurePosixPath from urllib.parse import urlparse import isaaclab.utils.math as math_utils import isaacsim.core.utils.torch as torch_utils import omni import warp as wp -from isaaclab.utils.assets import retrieve_file_path +from isaaclab.utils.assets import ISAAC_NUCLEUS_DIR, NVIDIA_NUCLEUS_DIR, retrieve_file_path from isaaclab.utils.warp import convert_to_warp_mesh from pxr import UsdGeom from pytorch3d.ops import sample_farthest_points, sample_points_from_meshes from pytorch3d.structures import Meshes +from uwlab_assets import UWLAB_CLOUD_ASSETS_DIR + from .rigid_object_hasher import RigidObjectHasher # ---- module-scope caches ---- @@ -328,32 +332,260 @@ def get_temp_dir(rank: int | None = None) -> str: return download_dir +def safe_retrieve_file_path(url: str, download_dir: str | None = None) -> str: + """Resolve a file path, downloading from the cloud if necessary. + + For HTTPS URLs and local paths the unified :func:`resolve_cloud_path` + handles download + persistent caching. Nucleus (``omniverse://``) + paths still fall back to Isaac Lab's :func:`retrieve_file_path`. + """ + from uwlab_assets import resolve_cloud_path + + if url.startswith(("http://", "https://")) or os.path.isfile(url): + return resolve_cloud_path(url) + + # Nucleus / omni.client fallback + if download_dir is None: + download_dir = get_temp_dir() + os.makedirs(download_dir, exist_ok=True) + tmp_dir = tempfile.mkdtemp(dir=download_dir, prefix=".dl_") + try: + downloaded = retrieve_file_path(url, download_dir=tmp_dir) + abs_tmp = os.path.abspath(tmp_dir) + if not os.path.abspath(downloaded).startswith(abs_tmp + os.sep): + return downloaded + target = os.path.join(download_dir, os.path.basename(downloaded)) + os.rename(downloaded, target) + return target + finally: + shutil.rmtree(tmp_dir, ignore_errors=True) + + +@functools.cache def read_metadata_from_usd_directory(usd_path: str) -> dict: - """Read metadata from metadata.yaml in the same directory as the USD file.""" - # Get the directory containing the USD file - usd_dir = os.path.dirname(usd_path) + """Read metadata from metadata.yaml in the same directory as the USD file. - # Look for metadata.yaml in the same directory + Results are memoised per *usd_path* so each asset's metadata is + downloaded and parsed at most once per process. + """ + usd_dir = os.path.dirname(usd_path) metadata_path = os.path.join(usd_dir, "metadata.yaml") - download_dir = get_temp_dir() - with open(retrieve_file_path(metadata_path, download_dir=download_dir)) as f: + local_path = safe_retrieve_file_path(metadata_path, download_dir=get_temp_dir()) + with open(local_path) as f: metadata_file = yaml.safe_load(f) return metadata_file -def compute_assembly_hash(*usd_paths: str) -> str: - """Compute a hash for an assembly based on the USD file paths. +def object_name_from_usd(usd_path: str) -> str: + """Extract the canonical object name from a USD asset path. + + Uses the parent directory name, which is unique across the asset tree. + Works identically for local paths and S3 URLs. + + Example: ``'.../Props/Custom/Peg/peg.usd'`` -> ``'Peg'`` + """ + return PurePosixPath(urlparse(usd_path).path).parent.name + + +def compute_pair_dir(*usd_paths: str) -> str: + """Derive a human-readable directory name from one or more USD asset paths. + + Names are sorted alphabetically and joined with ``'__'``. + + Examples: + Single object: ``('...Peg/peg.usd',)`` -> ``'Peg'`` + Object pair: ``('...Peg/peg.usd', '...PegHole/peg_hole.usd')`` -> ``'Peg__PegHole'`` + """ + return "__".join(sorted(object_name_from_usd(p) for p in usd_paths)) + + +def load_asset_paths_from_config( + config_path: str, + cache_subdir: str = "", + skip_validation: bool = True, +) -> list[str]: + """Load asset paths from YAML config file. Args: - *usd_paths: Variable number of USD file paths + config_path: Path to the YAML config file. + cache_subdir: Subdirectory name for local caching of cloud assets (e.g., "hdris", "textures"). + skip_validation: If True, skip expensive omni.client.stat() validation for Nucleus paths. Returns: - A hash string that uniquely identifies the combination of objects + List of asset paths ready to use. """ - # Extract path suffixes and sort to ensure consistent hash regardless of input order - sorted_paths = sorted(urlparse(path).path for path in usd_paths) - combined = "|".join(sorted_paths) + with open(config_path) as f: + config = yaml.safe_load(f) + + def collect_paths(obj): + paths = [] + if isinstance(obj, dict): + for value in obj.values(): + paths.extend(collect_paths(value)) + elif isinstance(obj, list): + for item in obj: + if isinstance(item, str): + paths.append(item) + else: + paths.extend(collect_paths(item)) + return paths + + local_and_nucleus_paths = [] + cloud_paths = [] + for section, paths_obj in config.items(): + section_paths = collect_paths(paths_obj) + for p in section_paths: + if section == "isaac_nucleus": + local_and_nucleus_paths.append(f"{ISAAC_NUCLEUS_DIR}/{p}") + elif section == "local": + local_and_nucleus_paths.append(p) + elif section == "cloud": + if p.startswith("http://") or p.startswith("https://"): + cloud_paths.append(p) + else: + cloud_paths.append(f"{UWLAB_CLOUD_ASSETS_DIR}/{p}") + else: + local_and_nucleus_paths.append(f"{NVIDIA_NUCLEUS_DIR}/{p}") + + # Download cloud assets to local cache + cached_cloud_paths = [] + if cloud_paths: + cached_cloud_paths = _download_cloud_assets(cloud_paths, cache_subdir) + + # Build final path list + valid_paths = list(cached_cloud_paths) + if skip_validation: + valid_paths.extend(local_and_nucleus_paths) + else: + skipped_nucleus = [] + for path in local_and_nucleus_paths: + if path.startswith("/"): + if os.path.exists(path): + valid_paths.append(path) + else: + skipped_nucleus.append(path) + if skipped_nucleus: + logging.warning( + f"[load_asset_paths_from_config] Skipped {len(skipped_nucleus)} Nucleus paths " + f"(non-local, unreliable). Using {len(valid_paths)} local paths only." + ) + + # Validate that we actually have usable paths + if not valid_paths: + raise RuntimeError( + f"[load_asset_paths_from_config] No valid asset paths loaded from {config_path}.\n" + f" Nucleus paths found: {len(local_and_nucleus_paths)}\n" + f" Cloud paths found: {len(cloud_paths)}\n" + f" Cloud paths cached locally: {len(cached_cloud_paths)}\n" + " Check that your Nucleus server is running or cloud assets are downloadable." + ) + + # Validate local paths are accessible (local and cached cloud only; Nucleus paths + # are validated lazily by the renderer since omni.client.stat is expensive) + inaccessible = [] + for p in valid_paths: + if p.startswith("/") and not os.path.exists(p): + inaccessible.append(p) + if inaccessible: + logging.warning( + f"[load_asset_paths_from_config] {len(inaccessible)}/{len(valid_paths)} local paths are inaccessible. " + f"First 3: {inaccessible[:3]}" + ) + valid_paths = [p for p in valid_paths if p not in inaccessible] + + total_local = sum(1 for p in valid_paths if p.startswith("/")) + total_nucleus = sum(1 for p in valid_paths if p.startswith("omniverse://")) + logging.info( + f"[load_asset_paths_from_config] Loaded {len(valid_paths)} paths from {config_path} " + f"({total_local} local, {total_nucleus} nucleus)" + ) - full_hash = hashlib.md5(combined.encode()).hexdigest() - return full_hash + return valid_paths + + +def _download_cloud_assets(cloud_urls: list[str], cache_subdir: str = "", num_workers: int = 8) -> list[str]: + """Download cloud URLs to local cache, return local paths. + + Delegates to :func:`resolve_cloud_path` for each URL so all cloud + assets share the same persistent cache and atomic-download logic. + Downloads are parallelized with *num_workers* threads and a live + progress line with elapsed time is printed. + """ + import time + from concurrent.futures import ThreadPoolExecutor, as_completed + + from uwlab_assets import resolve_cloud_path + + n = len(cloud_urls) + to_download = [u for u in cloud_urls if not os.path.isfile(_cached_local_path(u))] + needs_download = len(to_download) + + if needs_download == 0: + return [resolve_cloud_path(url) for url in cloud_urls] + + tag = cache_subdir or "cloud" + print(f"[INFO] Downloading {needs_download}/{n} {tag} assets ({num_workers} workers) ...") + t0 = time.monotonic() + + downloaded = 0 + futures = {} + with ThreadPoolExecutor(max_workers=num_workers) as pool: + for url in to_download: + futures[pool.submit(resolve_cloud_path, url)] = url + for future in as_completed(futures): + future.result() + downloaded += 1 + elapsed = time.monotonic() - t0 + rate = downloaded / elapsed + eta = (needs_download - downloaded) / rate if rate > 0 else 0 + print( + f"\r [{downloaded}/{needs_download}] {elapsed:.0f}s elapsed, ~{eta:.0f}s remaining", + end="", + flush=True, + ) + + elapsed = time.monotonic() - t0 + print(f"\n[INFO] Finished downloading {needs_download} {tag} assets in {elapsed:.0f}s.") + + return [resolve_cloud_path(url) for url in cloud_urls] + + +def _cached_local_path(url: str) -> str: + """Return the expected local cache path for a cloud URL without downloading.""" + from uwlab_assets import _extract_relative_path + + rel = _extract_relative_path(url) + return os.path.join(os.path.expanduser("~"), ".cache", "uwlab", "assets", rel) + + +# ---- OSC / script helpers (unscaled action = Cartesian delta) ---- +def target_pose_to_action( + ee_pos_b: torch.Tensor, + ee_quat_b: torch.Tensor, + target_pos: torch.Tensor, + target_quat: torch.Tensor, +) -> torch.Tensor: + """Compute arm action (6-DOF delta) so RelCartesianOSC tracks target pose. + + For Sysid env (unscaled action): action = delta_pose. Used by sysid/plot + scripts that step the env with waypoint targets. + """ + delta_pos = target_pos - ee_pos_b + quat_err = math_utils.quat_mul(target_quat, math_utils.quat_inv(ee_quat_b)) + axis_angle = math_utils.axis_angle_from_quat(quat_err) + return torch.cat([delta_pos, axis_angle], dim=-1) + + +def settle_robot( + robot, sim, default_joint_pos, default_joint_vel, arm_joint_ids, sim_dt, headless=True, settle_steps=10 +): + """Hard-reset settle: write desired state repeatedly, then final write without stepping.""" + for _ in range(settle_steps): + robot.write_joint_state_to_sim(default_joint_pos, default_joint_vel) + robot.write_data_to_sim() + sim.step(render=not headless) + robot.update(sim_dt) + robot.write_joint_state_to_sim(default_joint_pos, default_joint_vel) + robot.write_data_to_sim() + robot.update(sim_dt) diff --git a/source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/reset_states/config/ur5e_robotiq_2f85/actions.py b/source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/reset_states/config/ur5e_robotiq_2f85/actions.py deleted file mode 100644 index 582c5f26..00000000 --- a/source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/reset_states/config/ur5e_robotiq_2f85/actions.py +++ /dev/null @@ -1,51 +0,0 @@ -# Copyright (c) 2024-2026, The UW Lab Project Developers. (https://github.com/uw-lab/UWLab/blob/main/CONTRIBUTORS.md). -# All Rights Reserved. -# -# SPDX-License-Identifier: BSD-3-Clause - -from __future__ import annotations - -from isaaclab.controllers import OperationalSpaceControllerCfg -from isaaclab.utils import configclass - -from uwlab_assets.robots.ur5e_robotiq_gripper import EXPLICIT_UR5E_ROBOTIQ_2F85 -from uwlab_assets.robots.ur5e_robotiq_gripper.actions import ROBOTIQ_COMPLIANT_JOINTS, ROBOTIQ_GRIPPER_BINARY_ACTIONS - -from uwlab_tasks.manager_based.manipulation.reset_states.mdp.utils import read_metadata_from_usd_directory - -from ...mdp.actions.actions_cfg import TransformedOperationalSpaceControllerActionCfg - -UR5E_ROBOTIQ_2F85_RELATIVE_OSC = TransformedOperationalSpaceControllerActionCfg( - asset_name="robot", - joint_names=["shoulder.*", "elbow.*", "wrist.*"], - body_name="robotiq_base_link", - body_offset=TransformedOperationalSpaceControllerActionCfg.OffsetCfg( - pos=(0.1345, 0.0, 0.0), rot=(1.0, 0.0, 0.0, 0.0) - ), - action_root_offset=TransformedOperationalSpaceControllerActionCfg.OffsetCfg( - pos=read_metadata_from_usd_directory(EXPLICIT_UR5E_ROBOTIQ_2F85.spawn.usd_path).get("offset").get("pos"), - rot=read_metadata_from_usd_directory(EXPLICIT_UR5E_ROBOTIQ_2F85.spawn.usd_path).get("offset").get("quat"), - ), - scale_xyz_axisangle=(0.02, 0.02, 0.02, 0.02, 0.02, 0.2), - controller_cfg=OperationalSpaceControllerCfg( - target_types=["pose_rel"], - impedance_mode="fixed", - inertial_dynamics_decoupling=False, - partial_inertial_dynamics_decoupling=False, - gravity_compensation=False, - motion_stiffness_task=(200.0, 200.0, 200.0, 3.0, 3.0, 3.0), - motion_damping_ratio_task=(3.0, 3.0, 3.0, 1.0, 1.0, 1.0), - nullspace_control="none", - ), - position_scale=1.0, - orientation_scale=1.0, - stiffness_scale=1.0, - damping_ratio_scale=1.0, -) - - -@configclass -class Ur5eRobotiq2f85RelativeOSCAction: - arm = UR5E_ROBOTIQ_2F85_RELATIVE_OSC - gripper = ROBOTIQ_GRIPPER_BINARY_ACTIONS - compliant_joints = ROBOTIQ_COMPLIANT_JOINTS diff --git a/source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/reset_states/config/ur5e_robotiq_2f85/agents/rsl_rl_cfg.py b/source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/reset_states/config/ur5e_robotiq_2f85/agents/rsl_rl_cfg.py deleted file mode 100644 index 575e44dd..00000000 --- a/source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/reset_states/config/ur5e_robotiq_2f85/agents/rsl_rl_cfg.py +++ /dev/null @@ -1,48 +0,0 @@ -# Copyright (c) 2024-2026, The UW Lab Project Developers. (https://github.com/uw-lab/UWLab/blob/main/CONTRIBUTORS.md). -# All Rights Reserved. -# -# SPDX-License-Identifier: BSD-3-Clause - -from isaaclab.utils import configclass -from isaaclab_rl.rsl_rl import RslRlOnPolicyRunnerCfg, RslRlPpoAlgorithmCfg - -from uwlab_rl.rsl_rl.rl_cfg import RslRlFancyActorCriticCfg - - -def my_experts_observation_func(env): - obs = env.unwrapped.obs_buf["expert_obs"] - return obs - - -@configclass -class Base_PPORunnerCfg(RslRlOnPolicyRunnerCfg): - num_steps_per_env = 32 - max_iterations = 40000 - save_interval = 100 - resume = False - experiment_name = "ur5e_robotiq_2f85_reset_states_agent" - policy = RslRlFancyActorCriticCfg( - init_noise_std=1.0, - actor_obs_normalization=True, - critic_obs_normalization=True, - actor_hidden_dims=[512, 256, 128, 64], - critic_hidden_dims=[512, 256, 128, 64], - activation="elu", - noise_std_type="gsde", - state_dependent_std=False, - ) - algorithm = RslRlPpoAlgorithmCfg( - value_loss_coef=1.0, - use_clipped_value_loss=True, - normalize_advantage_per_mini_batch=False, - clip_param=0.2, - entropy_coef=0.006, - num_learning_epochs=5, - num_mini_batches=4, - learning_rate=1.0e-4, - schedule="adaptive", - gamma=0.99, - lam=0.95, - desired_kl=0.01, - max_grad_norm=1.0, - ) diff --git a/source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/reset_states/mdp/actions/actions_cfg.py b/source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/reset_states/mdp/actions/actions_cfg.py deleted file mode 100644 index 755dcf4c..00000000 --- a/source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/reset_states/mdp/actions/actions_cfg.py +++ /dev/null @@ -1,34 +0,0 @@ -# Copyright (c) 2024-2026, The UW Lab Project Developers. (https://github.com/uw-lab/UWLab/blob/main/CONTRIBUTORS.md). -# All Rights Reserved. -# -# SPDX-License-Identifier: BSD-3-Clause - -from __future__ import annotations - -from dataclasses import MISSING - -from isaaclab.envs.mdp.actions.actions_cfg import OperationalSpaceControllerActionCfg -from isaaclab.managers.action_manager import ActionTerm -from isaaclab.utils import configclass - -from . import task_space_actions - - -@configclass -class TransformedOperationalSpaceControllerActionCfg(OperationalSpaceControllerActionCfg): - """Configuration for Scaled Operational Space Controller action term. - - This action term uses the OperationalSpaceController directly and applies fixed scaling - to the input actions. The scaling values are applied per DOF (x, y, z, rx, ry, rz). - """ - - class_type: type[ActionTerm] = task_space_actions.TransformedOperationalSpaceControllerAction - - action_root_offset: OperationalSpaceControllerActionCfg.OffsetCfg | None = None - """Offset for the action root frame.""" - - scale_xyz_axisangle: tuple[float, float, float, float, float, float] = MISSING - """Fixed scaling values for [x, y, z, rx, ry, rz] where rotation is in axis-angle representation.""" - - input_clip: tuple[float, float] | None = None - """Input clip values for the action.""" diff --git a/source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/reset_states/mdp/actions/task_space_actions.py b/source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/reset_states/mdp/actions/task_space_actions.py deleted file mode 100644 index 03c6ddc8..00000000 --- a/source/uwlab_tasks/uwlab_tasks/manager_based/manipulation/reset_states/mdp/actions/task_space_actions.py +++ /dev/null @@ -1,96 +0,0 @@ -# Copyright (c) 2024-2026, The UW Lab Project Developers. (https://github.com/uw-lab/UWLab/blob/main/CONTRIBUTORS.md). -# All Rights Reserved. -# -# SPDX-License-Identifier: BSD-3-Clause - -from __future__ import annotations - -import torch -from typing import TYPE_CHECKING - -import isaaclab.utils.math as math_utils -from isaaclab.envs.mdp.actions.task_space_actions import OperationalSpaceControllerAction - -from . import actions_cfg - -if TYPE_CHECKING: - from isaaclab.envs import ManagerBasedEnv - - -class TransformedOperationalSpaceControllerAction(OperationalSpaceControllerAction): - """Scaled Operational Space Controller action term. - - This action term inherits from OperationalSpaceControllerAction and applies fixed scaling - to the input actions. The scaling values are applied per DOF (x, y, z, rx, ry, rz) where - rotation is in axis-angle representation. - - The workflow is: - 1. Receive 6-DOF Cartesian commands [x, y, z, rx, ry, rz] (rotation in axis-angle) - 2. Apply fixed scaling per DOF - 3. Use parent OperationalSpaceControllerAction to handle the rest - """ - - cfg: actions_cfg.TransformedOperationalSpaceControllerActionCfg - """The configuration of the action term.""" - - def __init__(self, cfg: actions_cfg.TransformedOperationalSpaceControllerActionCfg, env: ManagerBasedEnv): - # Initialize the parent OSC action - super().__init__(cfg, env) - - self._scale = torch.tensor(cfg.scale_xyz_axisangle, device=self.device) - if cfg.input_clip is not None: - self._input_clip = torch.tensor(cfg.input_clip, device=self.device) - else: - self._input_clip = None - if self.cfg.action_root_offset is not None: - self._action_root_offset_pos = torch.tensor(cfg.action_root_offset.pos, device=self.device).repeat( - self.num_envs, 1 - ) - self._action_root_offset_quat = torch.tensor(cfg.action_root_offset.rot, device=self.device).repeat( - self.num_envs, 1 - ) - else: - self._action_root_offset_pos = None - self._action_root_offset_quat = None - - self._transformed_actions = torch.zeros_like(self.raw_actions) - - def process_actions(self, actions: torch.Tensor): - """Process actions by applying fixed scaling per DOF and coordinate frame transformation, then call parent method.""" - # Step 1: Apply scaling - scaled_actions_offset_coords = actions * self._scale - if self._input_clip is not None: - scaled_actions_offset_coords = torch.clamp( - scaled_actions_offset_coords, min=self._input_clip[0], max=self._input_clip[1] - ) - - self._transformed_actions[:] = scaled_actions_offset_coords - - if self._action_root_offset_pos is not None and self._action_root_offset_quat is not None: - # Step 2: Transform coordinate frame from offset-robot-base to standard-robot-base - # Extract position and rotation deltas - delta_pos_offset = scaled_actions_offset_coords[:, :3] # [x, y, z] - delta_rot_offset = scaled_actions_offset_coords[:, 3:6] # [rx, ry, rz] in axis-angle - - # Get rotation matrix from offset-robot-base to standard-robot-base - # The action_root_offset defines standard -> offset, so we need the inverse - R_offset_to_standard = math_utils.matrix_from_quat(math_utils.quat_inv(self._action_root_offset_quat)) - - # Transform position delta: rotate from offset coordinates to standard coordinates - delta_pos_standard = torch.bmm(R_offset_to_standard, delta_pos_offset.unsqueeze(-1)).squeeze(-1) - - # Transform rotation delta (axis-angle): rotate the axis from offset coordinates to standard coordinates - delta_rot_standard = torch.bmm(R_offset_to_standard, delta_rot_offset.unsqueeze(-1)).squeeze(-1) - - # Combine back into 6-DOF command - scaled_actions_standard_coords = torch.cat([delta_pos_standard, delta_rot_standard], dim=-1) - else: - scaled_actions_standard_coords = scaled_actions_offset_coords - - # Call parent process_actions with transformed actions - super().process_actions(scaled_actions_standard_coords) - - @property - def transformed_actions(self) -> torch.Tensor: - """Processed actions for operational space control.""" - return self._transformed_actions diff --git a/uwlab.sh b/uwlab.sh index 94160e7b..4bfa38ae 100755 --- a/uwlab.sh +++ b/uwlab.sh @@ -574,13 +574,6 @@ while [[ $# -gt 0 ]]; do # LD_PRELOAD is restored below, after installation begin_arm_install_sandbox - # --- ensure fork of rsl-rl-lib is used --- - echo "[INFO] Forcing reinstall of rsl-rl-lib from zoctipus/rsl_rl.git@master..." - ${pip_uninstall_command} rsl-rl-lib || true - ${pip_command} --no-cache-dir --force-reinstall --no-deps \ - "rsl-rl-lib @ git+https://github.com/zoctipus/rsl_rl.git@master" - echo "[INFO] Verified: rsl-rl-lib reinstalled from UWLab fork." - # install pytorch (version based on arch) ensure_cuda_torch # recursively look into directories and install them