Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
37 changes: 26 additions & 11 deletions .github/unittest/linux_libs/scripts_isaaclab/isaac.sh
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,8 @@ export TF_CPP_MIN_LOG_LEVEL=0
export BATCHED_PIPE_TIMEOUT=60
export TD_GET_DEFAULTS_TO_NONE=1
export OMNI_KIT_ACCEPT_EULA=yes
export PIP_DISABLE_PIP_VERSION_CHECK=1
export PYTHONNOUSERSITE=1

nvidia-smi

Expand All @@ -46,36 +48,49 @@ eval "$(${conda_dir}/bin/conda shell.bash hook)"
conda create --prefix ${env_dir} python=3.10 -y
conda activate ${env_dir}

# Set LD_LIBRARY_PATH to prioritize conda environment libraries
export LD_LIBRARY_PATH=${lib_dir}:${LD_LIBRARY_PATH:-}

# Install a compatible version of expat (< 2.6.0 to avoid XML_SetReparseDeferralEnabled symbol issues)
conda install -c conda-forge "expat<2.6" -y

# Reinstall Python to ensure it's properly linked against the conda expat
conda install --force-reinstall python=3.10 -y

# Verify the expat linkage
echo "* Checking pyexpat linkage:"
python -c "import pyexpat; print('pyexpat imported successfully')" || echo "WARNING: pyexpat import failed"

# Pin pytorch to 2.5.1 for IsaacLab
conda install pytorch==2.5.1 torchvision==0.20.1 pytorch-cuda=12.4 -c pytorch -c nvidia -y

conda run -p ${env_dir} pip install --upgrade pip
conda run -p ${env_dir} pip install 'isaacsim[all,extscache]==4.5.0' --extra-index-url https://pypi.nvidia.com
python -m pip install --upgrade pip --disable-pip-version-check
python -m pip install 'isaacsim[all,extscache]==4.5.0' --extra-index-url https://pypi.nvidia.com --disable-pip-version-check
conda install conda-forge::"cmake>3.22" -y

git clone https://github.com/isaac-sim/IsaacLab.git
cd IsaacLab
conda run -p ${env_dir} ./isaaclab.sh --install sb3
./isaaclab.sh --install sb3
cd ../

# install tensordict
if [[ "$RELEASE" == 0 ]]; then
conda install "anaconda::cmake>=3.22" -y
conda run -p ${env_dir} python -m pip install "pybind11[global]"
conda run -p ${env_dir} python -m pip install git+https://github.com/pytorch/tensordict.git
python -m pip install "pybind11[global]" --disable-pip-version-check
python -m pip install git+https://github.com/pytorch/tensordict.git --disable-pip-version-check
else
conda run -p ${env_dir} python -m pip install tensordict
python -m pip install tensordict --disable-pip-version-check
fi

# smoke test
conda run -p ${env_dir} python -c "import tensordict"
python -c "import tensordict"

printf "* Installing torchrl\n"
conda run -p ${env_dir} python -m pip install -e . --no-build-isolation
conda run -p ${env_dir} python -c "import torchrl"
python -m pip install -e . --no-build-isolation --disable-pip-version-check
python -c "import torchrl"

# Install pytest
conda run -p ${env_dir} python -m pip install pytest pytest-cov pytest-mock pytest-instafail pytest-rerunfailures pytest-error-for-skips pytest-asyncio
python -m pip install pytest pytest-cov pytest-mock pytest-instafail pytest-rerunfailures pytest-error-for-skips pytest-asyncio --disable-pip-version-check

# Run tests
conda run -p ${env_dir} python -m pytest test/test_libs.py -k isaac -s
python -m pytest test/test_libs.py -k isaac -s
2 changes: 1 addition & 1 deletion .github/unittest/linux_libs/scripts_minari/environment.yml
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,6 @@ dependencies:
- gymnasium-robotics
- minari[create]
- jax>=0.7.0
- mujoco<3.3.6
- mujoco>=2.3.0,<3.0.0
- mujoco-py<2.2,>=2.1
- minigrid
72 changes: 36 additions & 36 deletions .github/workflows/test-linux-libs.yml
Original file line number Diff line number Diff line change
Expand Up @@ -91,40 +91,40 @@ jobs:

bash .github/unittest/linux_libs/scripts_brax/run_all.sh

unittests-d4rl:
strategy:
matrix:
python_version: ["3.10"]
cuda_arch_version: ["12.8"]
if: ${{ github.event_name == 'push' || contains(github.event.pull_request.labels.*.name, 'Data') }}
uses: pytorch/test-infra/.github/workflows/linux_job_v2.yml@main
with:
repository: pytorch/rl
runner: "linux.g5.4xlarge.nvidia.gpu"
docker-image: "nvidia/cuda:12.4.0-devel-ubuntu22.04"
timeout: 120
script: |
if [[ "${{ github.ref }}" =~ release/* ]]; then
export RELEASE=1
export TORCH_VERSION=stable
else
export RELEASE=0
export TORCH_VERSION=nightly
fi
# unittests-d4rl:
# strategy:
# matrix:
# python_version: ["3.10"]
# cuda_arch_version: ["12.8"]
# if: ${{ github.event_name == 'push' || contains(github.event.pull_request.labels.*.name, 'Data') }}
# uses: pytorch/test-infra/.github/workflows/linux_job_v2.yml@main
# with:
# repository: pytorch/rl
# runner: "linux.g5.4xlarge.nvidia.gpu"
# docker-image: "nvidia/cuda:12.4.0-devel-ubuntu22.04"
# timeout: 120
# script: |
# if [[ "${{ github.ref }}" =~ release/* ]]; then
# export RELEASE=1
# export TORCH_VERSION=stable
# else
# export RELEASE=0
# export TORCH_VERSION=nightly
# fi

set -euo pipefail
export PYTHON_VERSION="3.10"
export CU_VERSION="cu128"
export TAR_OPTIONS="--no-same-owner"
export UPLOAD_CHANNEL="nightly"
export TF_CPP_MIN_LOG_LEVEL=0
export BATCHED_PIPE_TIMEOUT=60
export TD_GET_DEFAULTS_TO_NONE=1
# set -euo pipefail
# export PYTHON_VERSION="3.10"
# export CU_VERSION="cu128"
# export TAR_OPTIONS="--no-same-owner"
# export UPLOAD_CHANNEL="nightly"
# export TF_CPP_MIN_LOG_LEVEL=0
# export BATCHED_PIPE_TIMEOUT=60
# export TD_GET_DEFAULTS_TO_NONE=1

bash .github/unittest/linux_libs/scripts_d4rl/setup_env.sh
bash .github/unittest/linux_libs/scripts_d4rl/install.sh
bash .github/unittest/linux_libs/scripts_d4rl/run_test.sh
bash .github/unittest/linux_libs/scripts_d4rl/post_process.sh
# bash .github/unittest/linux_libs/scripts_d4rl/setup_env.sh
# bash .github/unittest/linux_libs/scripts_d4rl/install.sh
# bash .github/unittest/linux_libs/scripts_d4rl/run_test.sh
# bash .github/unittest/linux_libs/scripts_d4rl/post_process.sh

unittests-envpool:
strategy:
Expand Down Expand Up @@ -156,10 +156,10 @@ jobs:
export BATCHED_PIPE_TIMEOUT=60
export TD_GET_DEFAULTS_TO_NONE=1

bash .github/unittest/linux_libs/scripts_d4rl/setup_env.sh
bash .github/unittest/linux_libs/scripts_d4rl/install.sh
bash .github/unittest/linux_libs/scripts_d4rl/run_test.sh
bash .github/unittest/linux_libs/scripts_d4rl/post_process.sh
bash .github/unittest/linux_libs/scripts_envpool/setup_env.sh
bash .github/unittest/linux_libs/scripts_envpool/install.sh
bash .github/unittest/linux_libs/scripts_envpool/run_test.sh
bash .github/unittest/linux_libs/scripts_envpool/post_process.sh

unittests-gendgrl:
strategy:
Expand Down
21 changes: 19 additions & 2 deletions test/test_libs.py
Original file line number Diff line number Diff line change
Expand Up @@ -73,6 +73,8 @@
from torchrl.data.datasets.roboset import RobosetExperienceReplay
from torchrl.data.datasets.vd4rl import VD4RLExperienceReplay
from torchrl.data.replay_buffers import SamplerWithoutReplacement
from torchrl.data.replay_buffers.samplers import SliceSampler
from torchrl.data.replay_buffers.storages import LazyTensorStorage
from torchrl.data.utils import CloudpickleWrapper
from torchrl.envs import (
CatTensors,
Expand All @@ -82,6 +84,7 @@
EnvCreator,
RemoveEmptySpecs,
RenameTransform,
StepCounter,
)
from torchrl.envs.batched_envs import SerialEnv
from torchrl.envs.libs.brax import _has_brax, BraxEnv, BraxWrapper
Expand Down Expand Up @@ -2790,7 +2793,6 @@ class TestVmas:
@pytest.mark.parametrize("scenario_name", VmasWrapper.available_envs)
@pytest.mark.parametrize("continuous_actions", [True, False])
def test_all_vmas_scenarios(self, scenario_name, continuous_actions):

env = VmasEnv(
scenario=scenario_name,
continuous_actions=continuous_actions,
Expand Down Expand Up @@ -3455,12 +3457,16 @@ def test_d4rl_dummy(self, task):
@pytest.mark.parametrize("split_trajs", [True, False])
@pytest.mark.parametrize("from_env", [True, False])
def test_dataset_build(self, task, split_trajs, from_env):
import d4rl # noqa: F401

t0 = time.time()
data = D4RLExperienceReplay(
task, split_trajs=split_trajs, from_env=from_env, batch_size=2
)
sample = data.sample()
env = GymWrapper(gym.make(task))
# D4RL environments are registered with gym, not gymnasium
with set_gym_backend("gym"):
env = GymWrapper(gym.make(task))
rollout = env.rollout(2)
for key in rollout.keys(True, True):
if "truncated" in key:
Expand Down Expand Up @@ -5144,6 +5150,17 @@ def test_isaaclab(self, env):
env.check_env_specs(break_when_any_done="both")
torchrl_logger.info("Check succeeded!")

def test_isaaclab_rb(self, env):
env = env.append_transform(StepCounter())
rb = ReplayBuffer(
storage=LazyTensorStorage(50, ndim=2), sampler=SliceSampler(num_slices=5)
)
rb.extend(env.rollout(20))
# check that rb["step_count"].flatten() is made of sequences of 4 consecutive numbers
flat_ranges = rb["step_count"].flatten() % 4
arange = torch.arange(flat_ranges.numel(), device=flat_ranges.device) % 4
assert (flat_ranges == arange).all()

def test_isaac_collector(self, env):
col = SyncDataCollector(
env, env.rand_action, frames_per_batch=1000, total_frames=100_000_000
Expand Down
18 changes: 12 additions & 6 deletions torchrl/data/datasets/d4rl.py
Original file line number Diff line number Diff line change
Expand Up @@ -267,16 +267,19 @@ def _get_dataset_direct_download(self, name, env_kwargs):
return dataset

def _get_dataset_direct(self, name, env_kwargs):
from torchrl.envs.libs.gym import GymWrapper
from torchrl.envs.libs.gym import GymWrapper, set_gym_backend

type(self)._import_d4rl()

if not self._has_d4rl:
raise ImportError("Could not import d4rl") from self.D4RL_ERR
import d4rl
import gym

env = GymWrapper(gym.make(name))
# D4RL environments are registered with gym, not gymnasium
# so we need to ensure we're using the gym backend
with set_gym_backend("gym"):
import gym
env = GymWrapper(gym.make(name))
with tempfile.TemporaryDirectory() as tmpdir:
os.environ["D4RL_DATASET_DIR"] = tmpdir
dataset = d4rl.qlearning_dataset(env._env, **env_kwargs)
Expand Down Expand Up @@ -346,12 +349,15 @@ def _get_dataset_from_env(self, name, env_kwargs):
"""
if env_kwargs:
raise RuntimeError("env_kwargs cannot be passed with using from_env=True")
import gym
import d4rl # noqa: F401

# we do a local import to avoid circular import issues
from torchrl.envs.libs.gym import GymWrapper
from torchrl.envs.libs.gym import GymWrapper, set_gym_backend

with tempfile.TemporaryDirectory() as tmpdir:
# D4RL environments are registered with gym, not gymnasium
# so we need to ensure we're using the gym backend
with set_gym_backend("gym"), tempfile.TemporaryDirectory() as tmpdir:
import gym
os.environ["D4RL_DATASET_DIR"] = tmpdir
env = GymWrapper(gym.make(name))
dataset = make_tensordict(
Expand Down
Loading