From cb59b1df42ae2e8d137d0290d5db11f92e6d4cec Mon Sep 17 00:00:00 2001 From: Xinjie Yao Date: Thu, 5 Jun 2025 11:12:40 -0700 Subject: [PATCH] fix_sqa bug --- README.md | 14 ++++++-------- scripts/config/args.py | 20 ++++++++------------ 2 files changed, 14 insertions(+), 20 deletions(-) diff --git a/README.md b/README.md index fa5da5c..fd7201a 100644 --- a/README.md +++ b/README.md @@ -141,15 +141,15 @@ The process involved converting demonstration data (Mimic-generated motion traje - Using a python interpreter or conda/virtual env that has Isaac Lab, GR00T and Eavluation Tasks installed, convert Mimic-generated trajectories by ```bash -# Example: Set `task_index` Based on Task +# Example: Set `task_name` Based on Task # Nut Pouring -export TASK_INDEX=0 -# Uncomment the below is Task is Exhaust Pipe Sorting -# export TASK_INDEX=2 +export TASK_NAME="nutpouring" +# Uncomment the below when Task is Exhaust Pipe Sorting +# export TASK_NAME="pipesorting" # Within IsaacLabEvalTasks directory # DATASET_ROOT_DIR is directory of where Mimic-generated HDF5 is saved locally -python scripts/convert_hdf5_to_lerobot.py --task_index $TASK_INDEX --data_root $DATASET_ROOT_DIR +python scripts/convert_hdf5_to_lerobot.py --task_name $TASK_NAME--data_root $DATASET_ROOT_DIR ``` The GR00T-LeRobot-compatible datasets will be available in `DATASET_ROOT_DIR`. @@ -181,9 +181,7 @@ tasks. The ordered sets of joints observed in simulation ([i.e. robot states fro GR00T-Lerobot schema also requires [additional metadata](https://github.com/NVIDIA/Isaac-GR00T/blob/main/getting_started/LeRobot_compatible_data_schema.md#meta). We include them ([info.json](scripts/config/gr00t/info.json), [modality.json](scripts/config/gr00t/info.json)) as templates to facilitate conversion. If you are working with other embodiments and data configurations, please modify them accordingly. -The `TASK_INDEX` is associated with the pre-defined task description in [`Gr00tN1DatasetConfig`](scripts/config/args.py) class, where 1 is reserved for data validity check, following GR00T-N1 guidelines. You may want to add other indices for your self-defined task. - -If you are interested in leveraging this tool for other tasks, please change the task metadata in `EvalTaskConfig' defined in the [configuration](scripts/config/args.py). More manipulation tasks are coming soon! +If you are interested in leveraging this tool for other tasks, please change the task metadata in `EvalTaskConfig` defined in the [configuration](scripts/config/args.py). The `TASK_NAME` is associated with the pre-defined task description in [`Gr00tN1DatasetConfig`](scripts/config/args.py) class. The task_index indicates the index associated with language description, and 1 is reserved for data validity check, following GR00T-N1 guidelines. You may want to add other indices for your self-defined task. More manipulation tasks are coming soon! ### Post Training diff --git a/scripts/config/args.py b/scripts/config/args.py index a2ab5d2..0c7e342 100644 --- a/scripts/config/args.py +++ b/scripts/config/args.py @@ -29,20 +29,23 @@ class EvalTaskConfig(Enum): " the metallic measuring scale." ), "nut_pouring_task.hdf5", + 0 # 1 is reserved for data validity check, following GR00T-N1 guidelines. ) PIPESORTING = ( "Isaac-ExhaustPipe-GR1T2-ClosedLoop-v0", "/home/gr00t/GR00T-N1-2B-tuned-Exhaust-Pipe-Sorting-task", "Pick up the blue pipe and place it into the blue bin.", "exhaust_pipe_sorting_task.hdf5", + 2 # 1 is reserved for data validity check, following GR00T-N1 guidelines. ) - def __init__(self, task: str, model_path: str, language_instruction: str, hdf5_name: str): + def __init__(self, task: str, model_path: str, language_instruction: str, hdf5_name: str, task_index: int): self.task = task self.model_path = model_path self.language_instruction = language_instruction self.hdf5_name = hdf5_name - + assert task_index != 1, "task_index must not be 1. (Use 0 for nutpouring, 2 for exhaustpipe, etc.)" + self.task_index = task_index @dataclass class Gr00tN1ClosedLoopArguments: @@ -194,14 +197,7 @@ class Gr00tN1DatasetConfig: default="", metadata={"description": "Instruction given to the policy in natural language."} ) hdf5_name: str = field(default="", metadata={"description": "Name of the HDF5 file to use for the dataset."}) - task_index: int = field( - default=0, - metadata={ - "description": ( - "Index of the task in the task list. Do not use 1. (E.g. 0 for nutpouring, 2 for exhaustpipe)." - ) - }, - ) + # Mimic-generated HDF5 datafield state_name_sim: str = field( default="robot_joint_pos", metadata={"description": "Name of the state in the HDF5 file."} @@ -293,10 +289,9 @@ class Gr00tN1DatasetConfig: hdf5_file_path: Path = field(init=False) lerobot_data_dir: Path = field(init=False) + task_index: int = field(init=False) # task index for the task description in LeRobot file def __post_init__(self): - # Reserve task_index 1 for the validity check field - assert self.task_index != 1, "task_index must not be 1. (Use 0 for nutpouring, 2 for exhaustpipe, etc.)" # Populate fields from enum based on task_name if self.task_name.upper() not in EvalTaskConfig.__members__: @@ -304,6 +299,7 @@ def __post_init__(self): config = EvalTaskConfig[self.task_name.upper()] self.language_instruction = config.language_instruction self.hdf5_name = config.hdf5_name + self.task_index = config.task_index self.hdf5_file_path = self.data_root / self.hdf5_name self.lerobot_data_dir = self.data_root / self.hdf5_name.replace(".hdf5", "") / "lerobot"