From d7cd500d29f1dd05655726542678234c5e5d5b4c Mon Sep 17 00:00:00 2001
From: jbwang <1159270049@qq.com>
Date: Wed, 13 Aug 2025 10:38:38 +0800
Subject: [PATCH 001/145] mini
---
.../vehicle_state/vehicle_parameters.py | 11 +
.../dataset_specific/kitti_360/__init__ .py | 0
.../kitti_360/kitti_360_data_converter.py | 456 ++++++++++++++++++
.../default_dataset_conversion.yaml | 3 +-
.../config/datasets/kitti360_dataset.yaml | 16 +
.../code/hydra/config.yaml | 60 +++
.../2025.08.11.15.45.36/code/hydra/hydra.yaml | 177 +++++++
.../code/hydra/overrides.yaml | 1 +
exp/my_run/2025.08.11.15.45.36/log.txt | 10 +
jbwang_test.py | 68 +++
notebooks/dataset/jbwang_test.py | 86 ++++
notebooks/jbwang_viz_test.py | 252 ++++++++++
notebooks/nuplan/nuplan_sensor_loading.ipynb | 27 +-
requirements.txt | 2 +-
14 files changed, 1165 insertions(+), 4 deletions(-)
create mode 100644 d123/dataset/dataset_specific/kitti_360/__init__ .py
create mode 100644 d123/dataset/dataset_specific/kitti_360/kitti_360_data_converter.py
create mode 100644 d123/script/config/datasets/kitti360_dataset.yaml
create mode 100644 exp/my_run/2025.08.11.15.45.36/code/hydra/config.yaml
create mode 100644 exp/my_run/2025.08.11.15.45.36/code/hydra/hydra.yaml
create mode 100644 exp/my_run/2025.08.11.15.45.36/code/hydra/overrides.yaml
create mode 100644 exp/my_run/2025.08.11.15.45.36/log.txt
create mode 100644 jbwang_test.py
create mode 100644 notebooks/dataset/jbwang_test.py
create mode 100644 notebooks/jbwang_viz_test.py
diff --git a/d123/common/datatypes/vehicle_state/vehicle_parameters.py b/d123/common/datatypes/vehicle_state/vehicle_parameters.py
index 8fe4d048..17480042 100644
--- a/d123/common/datatypes/vehicle_state/vehicle_parameters.py
+++ b/d123/common/datatypes/vehicle_state/vehicle_parameters.py
@@ -60,6 +60,17 @@ def get_wopd_pacifica_parameters() -> VehicleParameters:
rear_axle_to_center_longitudinal=1.461,
)
+def get_kitti360_station_wagon_parameters() -> VehicleParameters:
+ #TODO except wheel_base, all need to be checked
+ return VehicleParameters(
+ vehicle_name="kitti360_station_wagon",
+ width=2.297,
+ length=5.176,
+ height=1.400,
+ wheel_base=2.710,
+ rear_axle_to_center_vertical=0.45,
+ rear_axle_to_center_longitudinal=1.461,
+ )
def center_se3_to_rear_axle_se3(center_se3: StateSE3, vehicle_parameters: VehicleParameters) -> StateSE3:
"""
diff --git a/d123/dataset/dataset_specific/kitti_360/__init__ .py b/d123/dataset/dataset_specific/kitti_360/__init__ .py
new file mode 100644
index 00000000..e69de29b
diff --git a/d123/dataset/dataset_specific/kitti_360/kitti_360_data_converter.py b/d123/dataset/dataset_specific/kitti_360/kitti_360_data_converter.py
new file mode 100644
index 00000000..b6e97d8c
--- /dev/null
+++ b/d123/dataset/dataset_specific/kitti_360/kitti_360_data_converter.py
@@ -0,0 +1,456 @@
+import gc
+import json
+import os
+from dataclasses import asdict
+from functools import partial
+from pathlib import Path
+from typing import Any, Dict, Final, List, Optional, Tuple, Union
+
+import numpy as np
+import datetime
+import hashlib
+import pyarrow as pa
+from PIL import Image
+from nuplan.planning.utils.multithreading.worker_utils import WorkerPool, worker_map
+
+from d123.common.datatypes.sensor.camera import CameraMetadata, CameraType, camera_metadata_dict_to_json
+from d123.common.datatypes.time.time_point import TimePoint
+from d123.common.datatypes.vehicle_state.ego_state import EgoStateSE3Index
+from d123.common.datatypes.vehicle_state.vehicle_parameters import get_kitti360_station_wagon_parameters
+from d123.common.geometry.bounding_box.bounding_box import BoundingBoxSE3Index
+from d123.common.geometry.vector import Vector3DIndex
+from d123.dataset.arrow.helper import open_arrow_table, write_arrow_table
+from d123.dataset.dataset_specific.raw_data_converter import DataConverterConfig, RawDataConverter
+from d123.dataset.logs.log_metadata import LogMetadata
+
+KITTI360_DT: Final[float] = 0.1
+SORT_BY_TIMESTAMP: Final[bool] = True
+
+KITTI360_DATA_ROOT = Path(os.environ["KITTI360_DATA_ROOT"])
+
+#TODO carera mismatch
+KITTI360_CAMERA_TYPES: Final[Dict[CameraType, str]] = {
+ CameraType.CAM_L0: "image_00",
+ CameraType.CAM_R0: "image_01",
+ # TODO fisheye camera
+ # CameraType.CAM_L1: "image_02",
+ # CameraType.CAM_R1: "image_03",
+}
+
+DIR_2D_RAW = "data_2d_raw"
+DIR_2D_SMT = "data_2d_semantics"
+DIR_3D_RAW = "data_3d_raw"
+DIR_3D_SMT = "data_3d_semantics"
+DIR_3D_BBOX = "data_3d_bboxes"
+DIR_POSES = "data_poses"
+DIR_CALIB = "calibration"
+
+#TODO PATH_2D_RAW_ROOT
+PATH_2D_RAW_ROOT: Path = KITTI360_DATA_ROOT
+PATH_2D_SMT_ROOT: Path = KITTI360_DATA_ROOT / DIR_2D_SMT
+PATH_3D_RAW_ROOT: Path = KITTI360_DATA_ROOT / DIR_3D_RAW
+PATH_3D_SMT_ROOT: Path = KITTI360_DATA_ROOT / DIR_3D_SMT
+PATH_3D_BBOX_ROOT: Path = KITTI360_DATA_ROOT / DIR_3D_BBOX
+PATH_POSES_ROOT: Path = KITTI360_DATA_ROOT / DIR_POSES
+PATH_CALIB_ROOT: Path = KITTI360_DATA_ROOT / DIR_CALIB
+
+KITTI360_REQUIRED_MODALITY_ROOTS: Dict[str, Path] = {
+ DIR_2D_RAW: PATH_2D_RAW_ROOT,
+ # DIR_2D_SMT: PATH_2D_SMT_ROOT,
+ # DIR_3D_RAW: PATH_3D_RAW_ROOT,
+ # DIR_3D_SMT: PATH_3D_SMT_ROOT,
+ # DIR_3D_BBOX: PATH_3D_BBOX_ROOT,
+ # DIR_POSES: PATH_POSES_ROOT,
+}
+
+
+def create_token(input_data: str) -> str:
+ # TODO: Refactor this function.
+ # TODO: Add a general function to create tokens from arbitrary data.
+ if isinstance(input_data, str):
+ input_data = input_data.encode("utf-8")
+
+ hash_obj = hashlib.sha256(input_data)
+ return hash_obj.hexdigest()[:16]
+
+def _load_calibration() -> Tuple[Dict[str, np.ndarray], Dict[str, np.ndarray]]:
+ """
+ 读取 KITTI-360 全局标定文件,返回:
+ - intrinsics[image_02] = 3x3
+ - c2e[image_02] = 4x4(camera->ego/body),这里将 cam_to_pose 视为 camera->vehicle(简化)
+ """
+ calib_dir = KITTI360_DATA_ROOT / DIR_CALIB
+ intrinsics: Dict[str, np.ndarray] = {}
+ c2e: Dict[str, np.ndarray] = {}
+
+ # 内参:perspective.txt 中的 P_rect_0{0..3}
+ persp = calib_dir / "perspective.txt"
+ if persp.exists():
+ with open(persp, "r") as f:
+ lines = [ln.strip() for ln in f if ln.strip()]
+ for ln in lines:
+ if ln.startswith("P_rect_02"):
+ intrinsics["image_02"] = _read_projection_matrix(ln)
+ elif ln.startswith("P_rect_03"):
+ intrinsics["image_03"] = _read_projection_matrix(ln)
+
+ # 外参:cam_to_pose.txt 中 Tr_cam02(相机到车体/pose)
+ c2p = calib_dir / "cam_to_pose.txt"
+ if c2p.exists():
+ with open(c2p, "r") as f:
+ lines = [ln.strip() for ln in f if ln.strip()]
+ for ln in lines:
+ if ln.startswith("Tr_cam02"):
+ vals = [float(x) for x in ln.split(":")[1].strip().split()]
+ T = np.array(vals, dtype=np.float64).reshape(4, 4)
+ c2e["image_02"] = T
+ elif ln.startswith("Tr_cam03"):
+ vals = [float(x) for x in ln.split(":")[1].strip().split()]
+ T = np.array(vals, dtype=np.float64).reshape(4, 4)
+ c2e["image_03"] = T
+
+ return intrinsics, c2e
+
+class Kitti360DataConverter(RawDataConverter):
+ def __init__(
+ self,
+ splits: List[str],
+ log_path: Union[Path, str],
+ data_converter_config: DataConverterConfig,
+ ) -> None:
+ super().__init__(data_converter_config)
+ for split in splits:
+ assert (
+ split in self.get_available_splits()
+ ), f"Split {split} is not available. Available splits: {self.available_splits}"
+
+ self._splits: List[str] = splits
+ self._log_path: Path = Path(log_path)
+ self._log_paths_per_split: Dict[str, List[Path]] = self._collect_log_paths()
+
+ def _collect_log_paths(self) -> Dict[str, List[Path]]:
+ """
+ Collect candidate sequence folders under data_2d_raw that end with '_sync',
+ and keep only those sequences that are present in ALL required modality roots
+ (e.g., data_2d_semantics, data_3d_raw, etc.).
+ """
+ missing_roots = [str(p) for p in KITTI360_REQUIRED_MODALITY_ROOTS.values() if not p.exists()]
+ if missing_roots:
+ raise FileNotFoundError(f"KITTI-360 required roots missing: {missing_roots}")
+
+ # Enumerate candidate sequences from data_2d_raw
+ candidates = sorted(p for p in PATH_2D_RAW_ROOT.iterdir() if p.is_dir() and p.name.endswith("_sync"))
+
+ valid_seqs: List[Path] = []
+ for seq_dir in candidates:
+ seq_name = seq_dir.name
+ missing_modalities = [
+ modality_name
+ for modality_name, root in KITTI360_REQUIRED_MODALITY_ROOTS.items()
+ if not (root / seq_name).exists()
+ ]
+ if not missing_modalities:
+ valid_seqs.append(seq_dir) #KITTI360_DATA_ROOT / DIR_2D_RAW /seq_name
+ #TODO warnings
+ # else:
+ # warnings.warn(
+ # f"Sequence '{seq_name}' skipped: missing modalities {missing_modalities}. "
+ # f"Root: {KITTI360_DATA_ROOT}"
+ # )
+ return {"kitti360": valid_seqs}
+
+ def get_available_splits(self) -> List[str]:
+ """Returns a list of available raw data types."""
+ return ["kitti360"]
+
+ def convert_maps(self, worker: WorkerPool) -> None:
+ print("KITTI-360 does not provide standard maps. Skipping map conversion.")
+ return None
+
+ def convert_logs(self, worker: WorkerPool) -> None:
+ log_args = [
+ {
+ "log_path": log_path,
+ "split": split,
+ }
+ for split, log_paths in self._log_paths_per_split.items()
+ for log_path in log_paths
+ ]
+
+ worker_map(
+ worker,
+ partial(
+ convert_kitti360_log_to_arrow,
+ data_converter_config=self.data_converter_config,
+ ),
+ log_args,
+ )
+
+def convert_kitti360_log_to_arrow(
+ args: List[Dict[str, Union[List[str], List[Path]]]], data_converter_config: DataConverterConfig
+) -> List[Any]:
+
+ for log_info in args:
+ log_path: Path = log_info["log_path"]
+ split: str = log_info["split"]
+ log_name = log_path.stem
+
+ if not log_path.exists():
+ raise FileNotFoundError(f"Log path {log_path} does not exist.")
+ log_file_path = data_converter_config.output_path / split / f"{log_name}.arrow"
+
+ if data_converter_config.force_log_conversion or not log_file_path.exists():
+ log_file_path.unlink(missing_ok=True)
+ if not log_file_path.parent.exists():
+ log_file_path.parent.mkdir(parents=True, exist_ok=True)
+
+ schema_column_list = [
+ ("token", pa.string()),
+ ("timestamp", pa.int64()),
+ ("detections_state", pa.list_(pa.list_(pa.float64(), len(BoundingBoxSE3Index)))),
+ ("detections_velocity", pa.list_(pa.list_(pa.float64(), len(Vector3DIndex)))),
+ ("detections_token", pa.list_(pa.string())),
+ ("detections_type", pa.list_(pa.int16())),
+ ("ego_states", pa.list_(pa.float64(), len(EgoStateSE3Index))),
+ ("traffic_light_ids", pa.list_(pa.int64())),
+ ("traffic_light_types", pa.list_(pa.int16())),
+ ("scenario_tag", pa.list_(pa.string())),
+ ("route_lane_group_ids", pa.list_(pa.int64())),
+ ]
+ if data_converter_config.lidar_store_option is not None:
+ if data_converter_config.lidar_store_option == "path":
+ schema_column_list.append(("lidar", pa.string()))
+ elif data_converter_config.lidar_store_option == "binary":
+ raise NotImplementedError("Binary lidar storage is not implemented.")
+
+ # TODO: Adjust how cameras are added
+ if data_converter_config.camera_store_option is not None:
+ for cam_type in KITTI360_CAMERA_TYPES.keys():
+ if data_converter_config.camera_store_option == "path":
+ schema_column_list.append((cam_type.serialize(), pa.string()))
+ schema_column_list.append((f"{cam_type.serialize()}_extrinsic", pa.list_(pa.float64(), 16)))
+ elif data_converter_config.camera_store_option == "binary":
+ raise NotImplementedError("Binary camera storage is not implemented.")
+
+ recording_schema = pa.schema(schema_column_list)
+ #TODO location
+ metadata = LogMetadata(
+ dataset="kitti360",
+ log_name=log_name,
+ location="None",
+ timestep_seconds=KITTI360_DT,
+ map_has_z=False,
+ )
+
+ #TODO vehicle parameters
+ vehicle_parameters = get_kitti360_station_wagon_parameters()
+ camera_metadata = get_kitti360_camera_metadata()
+ recording_schema = recording_schema.with_metadata(
+ {
+ "log_metadata": json.dumps(asdict(metadata)),
+ "vehicle_parameters": json.dumps(asdict(vehicle_parameters)),
+ "camera_metadata": camera_metadata_dict_to_json(camera_metadata),
+ }
+ )
+
+ _write_recording_table(log_name, recording_schema, log_file_path, data_converter_config)
+
+ gc.collect()
+ return []
+
+
+def get_kitti360_camera_metadata() -> Dict[str, CameraMetadata]:
+
+ persp = PATH_CALIB_ROOT / "perspective.txt"
+
+ assert persp.exists()
+ result = {"image_00": {}, "image_01": {}}
+
+ with open(persp, "r") as f:
+ lines = [ln.strip() for ln in f if ln.strip()]
+ for ln in lines:
+ key, value = ln.split(" ", 1)
+ cam_id = key.split("_")[-1][:2]
+ if key.startswith("P_rect_"):
+ result[f"image_{cam_id}"]["intrinsic"] = _read_projection_matrix(ln)
+ elif key.startswith("S_rect_"):
+ result[f"image_{cam_id}"]["wh"] = [int(round(float(x))) for x in value.split()]
+ elif key.startswith("D_"):
+ result[f"image_{cam_id}"]["distortion"] = [float(x) for x in value.split()]
+
+ log_cam_infos: Dict[str, CameraMetadata] = {}
+ for cam_type, cam_name in KITTI360_CAMERA_TYPES.items():
+ log_cam_infos[cam_type.serialize()] = CameraMetadata(
+ camera_type=cam_type,
+ width=result[cam_name]["wh"][0],
+ height=result[cam_name]["wh"][1],
+ intrinsic=np.array(result[cam_name]["intrinsic"]),
+ distortion=np.array(result[cam_name]["distortion"]),
+ )
+ return log_cam_infos
+
+def _read_projection_matrix(p_line: str) -> np.ndarray:
+ parts = p_line.split(" ", 1)
+ if len(parts) != 2:
+ raise ValueError(f"Bad projection line: {p_line}")
+ vals = [float(x) for x in parts[1].strip().split()]
+ P = np.array(vals, dtype=np.float64).reshape(3, 4)
+ K = P[:, :3]
+ return K
+
+def _write_recording_table(
+ log_name: str,
+ recording_schema: pa.Schema,
+ log_file_path: Path,
+ data_converter_config: DataConverterConfig
+) -> None:
+
+ ts_list = _read_timestamps(log_name)
+
+ with pa.OSFile(str(log_file_path), "wb") as sink:
+ with pa.ipc.new_file(sink, recording_schema) as writer:
+ for i, tp in enumerate(ts_list):
+ row_data = {
+ "token": [create_token(f"{log_name}_{i}")],
+ "timestamp": [tp.time_us],
+ "detections_state": [],
+ "detections_velocity": [],
+ "detections_token": [],
+ "detections_type": [],
+ "ego_states": [],
+ "traffic_light_ids": [],
+ "traffic_light_types": [],
+ "scenario_tag": [],
+ "route_lane_group_ids": [],
+ }
+
+ if data_converter_config.lidar_store_option is not None:
+ row_data["lidar"] = []
+ # row_data["lidar"] = [_extract_lidar(log_name, data_converter_config)]
+
+ if data_converter_config.camera_store_option is not None:
+ # camera_data_dict = _extract_camera(log_db, lidar_pc, source_log_path, data_converter_config)
+ camera_data_dict = {}
+ for camera_type, camera_data in camera_data_dict.items():
+ if camera_data is not None:
+ row_data[camera_type.serialize()] = [camera_data[0]]
+ row_data[f"{camera_type.serialize()}_extrinsic"] = [camera_data[1]]
+ else:
+ row_data[camera_type.serialize()] = [None]
+ row_data[f"{camera_type.serialize()}_extrinsic"] = [None]
+
+ batch = pa.record_batch(row_data, schema=recording_schema)
+ writer.write_batch(batch)
+
+ if SORT_BY_TIMESTAMP:
+ recording_table = open_arrow_table(log_file_path)
+ recording_table = recording_table.sort_by([("timestamp", "ascending")])
+ write_arrow_table(recording_table, log_file_path)
+
+#TODO default timestamps
+# If timestamps are not provided, we can generate them based on the KITTI-360 DT
+def _read_timestamps(log_name: str) -> Optional[List[TimePoint]]:
+
+ ts_file = PATH_2D_RAW_ROOT / log_name / "image_01" / "timestamps.txt"
+ if ts_file.exists():
+ tps: List[TimePoint] = []
+ with open(ts_file, "r") as f:
+ for line in f:
+ s = line.strip()
+ if not s:
+ continue
+ dt_str, ns_str = s.split('.')
+ dt_obj = datetime.datetime.strptime(dt_str, "%Y-%m-%d %H:%M:%S")
+ dt_obj = dt_obj.replace(tzinfo=datetime.timezone.utc)
+ unix_epoch = datetime.datetime(1970, 1, 1, tzinfo=datetime.timezone.utc)
+
+ total_seconds = (dt_obj - unix_epoch).total_seconds()
+
+ ns_value = int(ns_str)
+ us_from_ns = ns_value // 1000
+
+ total_us = int(total_seconds * 1_000_000) + us_from_ns
+
+ tps.append(TimePoint.from_us(total_us))
+ return tps
+ return None
+
+#TODO lidar extraction
+def _extract_lidar(log_name: str, data_converter_config: DataConverterConfig) -> Optional[str]:
+ lidar: Optional[str] = None
+ lidar_full_path = DIR_3D_SMT / "train" / log_name / "0000000002_0000000385.ply"
+ if lidar_full_path.exists():
+ if data_converter_config.lidar_store_option == "path":
+ lidar = f"{log_name}/lidar/{sample_name}.npy"
+ elif data_converter_config.lidar_store_option == "binary":
+ raise NotImplementedError("Binary lidar storage is not implemented.")
+ else:
+ raise FileNotFoundError(f"LiDAR file not found: {lidar_full_path}")
+ return lidar
+
+def _extract_camera():
+ pass
+
+
+
+# for idx in range(n_frames):
+# token = f"{seq_name}_{idx:06d}"
+# t_us = ts_list[idx].time_us
+
+# row = {
+# "token": [token],
+# "timestamp": [t_us],
+# # 以下先填空/占位,方便后续替换为真实标注
+# "detections_state": [[]],
+# "detections_velocity": [[]],
+# "detections_token": [[]],
+# "detections_type": [[]],
+# "ego_states": [([0.0] * len(EgoStateSE3Index))], # 占位
+# "traffic_light_ids": [[]],
+# "traffic_light_types": [[]],
+# "scenario_tag": [["unknown"]],
+# "route_lane_group_ids": [[]],
+# }
+
+# # lidar 路径(若存在)
+# if data_converter_config.lidar_store_option is not None:
+# # velodyne bin:KITTI-360/data_3d_raw//velodyne_points/data/0000000000.bin
+# velodyne_dir = (
+# KITTI360_DATA_ROOT / DIR_3D / seq_name / "velodyne_points" / "data"
+# )
+# # 文件名位数可能为 10 位,这里做两种尝试
+# bin_path = None
+# for fmt in [f"{idx:010d}.bin", f"{idx:06d}.bin", f"{idx:08d}.bin"]:
+# cand = velodyne_dir / fmt
+# if cand.exists():
+# bin_path = cand
+# break
+# row["lidar"] = [str(bin_path.relative_to(KITTI360_DATA_ROOT)) if bin_path else None]
+
+# # 相机路径与外参
+# if data_converter_config.camera_store_option is not None:
+# for cam_type, cam_dir_name in KITTI360_CAMERA_TYPES.items():
+# img_dir = seq_dir_2d / cam_dir_name / "data"
+# # 文件名位数尝试
+# img_path = None
+# for ext in (".png", ".jpg", ".jpeg"):
+# for fmt in [f"{idx:010d}{ext}", f"{idx:06d}{ext}", f"{idx:08d}{ext}"]:
+# cand = img_dir / fmt
+# if cand.exists():
+# img_path = cand
+# break
+# if img_path:
+# break
+# if img_path is not None:
+# rel = str(img_path.relative_to(KITTI360_DATA_ROOT))
+# row[cam_type.serialize()] = [rel]
+# # 外参:固定 cam->ego(全局标定),逐帧不变(如需 rolling/姿态,可在此替换)
+# T = c2e.get(KITTI360_CAMERA_TYPES[cam_type], np.eye(4, dtype=np.float64))
+# row[f"{cam_type.serialize()}_extrinsic"] = [T.astype(np.float64).reshape(-1).tolist()]
+# else:
+# row[cam_type.serialize()] = [None]
+# row[f"{cam_type.serialize()}_extrinsic"] = [None]
+
+# batch = pa.record_batch(row, schema=recording_schema)
+# writer.write_batch(batch)
+# del batch, row
\ No newline at end of file
diff --git a/d123/script/config/dataset_conversion/default_dataset_conversion.yaml b/d123/script/config/dataset_conversion/default_dataset_conversion.yaml
index cceb2911..bc48ed00 100644
--- a/d123/script/config/dataset_conversion/default_dataset_conversion.yaml
+++ b/d123/script/config/dataset_conversion/default_dataset_conversion.yaml
@@ -15,9 +15,10 @@ defaults:
- default_dataset_paths
- _self_
- datasets:
+ - kitti360_dataset
# - nuplan_private_dataset
# - carla_dataset
- - wopd_dataset
+ # - wopd_dataset
force_log_conversion: False
force_map_conversion: True
diff --git a/d123/script/config/datasets/kitti360_dataset.yaml b/d123/script/config/datasets/kitti360_dataset.yaml
new file mode 100644
index 00000000..418d36a4
--- /dev/null
+++ b/d123/script/config/datasets/kitti360_dataset.yaml
@@ -0,0 +1,16 @@
+nuplan_dataset:
+ _target_: d123.dataset.dataset_specific.kitti_360.kitti_360_data_converter.Kitti360DataConverter
+ _convert_: 'all'
+
+ splits: ["kitti360"]
+ log_path: ${oc.env:KITTI360_DATA_ROOT}
+
+ data_converter_config:
+ _target_: d123.dataset.dataset_specific.raw_data_converter.DataConverterConfig
+ _convert_: 'all'
+
+ output_path: ${d123_data_root}
+ force_log_conversion: ${force_log_conversion}
+ force_map_conversion: ${force_map_conversion}
+ camera_store_option: "path"
+ lidar_store_option: "path"
diff --git a/exp/my_run/2025.08.11.15.45.36/code/hydra/config.yaml b/exp/my_run/2025.08.11.15.45.36/code/hydra/config.yaml
new file mode 100644
index 00000000..86d05e7b
--- /dev/null
+++ b/exp/my_run/2025.08.11.15.45.36/code/hydra/config.yaml
@@ -0,0 +1,60 @@
+worker:
+ _target_: nuplan.planning.utils.multithreading.worker_ray.RayDistributed
+ _convert_: all
+ master_node_ip: null
+ threads_per_node: null
+ debug_mode: false
+ log_to_driver: true
+ logs_subdir: logs
+ use_distributed: false
+scene_filter:
+ _target_: d123.dataset.scene.scene_filter.SceneFilter
+ _convert_: all
+ split_types: null
+ split_names: null
+ log_names: null
+ map_names: null
+ scene_tokens: null
+ timestamp_threshold_s: null
+ ego_displacement_minimum_m: null
+ duration_s: 9.2
+ history_s: 3.0
+scene_builder:
+ _target_: d123.dataset.scene.scene_builder.ArrowSceneBuilder
+ _convert_: all
+ dataset_path: ${d123_data_root}
+distributed_timeout_seconds: 7200
+selected_simulation_metrics: null
+verbose: false
+logger_level: info
+logger_format_string: null
+max_number_of_workers: null
+gpu: true
+seed: 42
+d123_devkit_root: ${oc.env:D123_DEVKIT_ROOT}
+d123_maps_root: ${oc.env:D123_MAPS_ROOT}
+d123_data_root: ${oc.env:D123_DATA_ROOT}
+nuplan_devkit_root: ${oc.env:NUPLAN_DEVKIT_ROOT}
+nuplan_maps_root: ${oc.env:NUPLAN_MAPS_ROOT}
+nuplan_data_root: ${oc.env:NUPLAN_DATA_ROOT}
+experiment_name: my_run
+date_format: '%Y.%m.%d.%H.%M.%S'
+experiment_uid: ${now:${date_format}}
+output_dir: ${oc.env:D123_EXP_ROOT}/${experiment_name}/${experiment_uid}
+force_log_conversion: false
+force_map_conversion: true
+datasets:
+ nuplan_private_dataset:
+ _target_: d123.dataset.dataset_specific.nuplan.nuplan_data_converter.NuplanDataConverter
+ _convert_: all
+ splits:
+ - nuplan_private_test
+ log_path: ${oc.env:NUPLAN_DATA_ROOT}/nuplan-v1.1/splits
+ data_converter_config:
+ _target_: d123.dataset.dataset_specific.raw_data_converter.DataConverterConfig
+ _convert_: all
+ output_path: ${d123_data_root}
+ force_log_conversion: ${force_log_conversion}
+ force_map_conversion: ${force_map_conversion}
+ camera_store_option: path
+ lidar_store_option: path
diff --git a/exp/my_run/2025.08.11.15.45.36/code/hydra/hydra.yaml b/exp/my_run/2025.08.11.15.45.36/code/hydra/hydra.yaml
new file mode 100644
index 00000000..bf09b447
--- /dev/null
+++ b/exp/my_run/2025.08.11.15.45.36/code/hydra/hydra.yaml
@@ -0,0 +1,177 @@
+hydra:
+ run:
+ dir: ${output_dir}
+ sweep:
+ dir: multirun/${now:%Y-%m-%d}/${now:%H-%M-%S}
+ subdir: ${hydra.job.num}
+ launcher:
+ _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher
+ sweeper:
+ _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper
+ max_batch_size: null
+ params: null
+ help:
+ app_name: ${hydra.job.name}
+ header: '${hydra.help.app_name} is powered by Hydra.
+
+ '
+ footer: 'Powered by Hydra (https://hydra.cc)
+
+ Use --hydra-help to view Hydra specific help
+
+ '
+ template: '${hydra.help.header}
+
+ == Configuration groups ==
+
+ Compose your configuration from those groups (group=option)
+
+
+ $APP_CONFIG_GROUPS
+
+
+ == Config ==
+
+ Override anything in the config (foo.bar=value)
+
+
+ $CONFIG
+
+
+ ${hydra.help.footer}
+
+ '
+ hydra_help:
+ template: 'Hydra (${hydra.runtime.version})
+
+ See https://hydra.cc for more info.
+
+
+ == Flags ==
+
+ $FLAGS_HELP
+
+
+ == Configuration groups ==
+
+ Compose your configuration from those groups (For example, append hydra/job_logging=disabled
+ to command line)
+
+
+ $HYDRA_CONFIG_GROUPS
+
+
+ Use ''--cfg hydra'' to Show the Hydra config.
+
+ '
+ hydra_help: ???
+ hydra_logging:
+ version: 1
+ formatters:
+ colorlog:
+ (): colorlog.ColoredFormatter
+ format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s'
+ handlers:
+ console:
+ class: logging.StreamHandler
+ formatter: colorlog
+ stream: ext://sys.stdout
+ root:
+ level: INFO
+ handlers:
+ - console
+ disable_existing_loggers: false
+ job_logging:
+ version: 1
+ formatters:
+ simple:
+ format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s'
+ colorlog:
+ (): colorlog.ColoredFormatter
+ format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s]
+ - %(message)s'
+ log_colors:
+ DEBUG: purple
+ INFO: green
+ WARNING: yellow
+ ERROR: red
+ CRITICAL: red
+ handlers:
+ console:
+ class: logging.StreamHandler
+ formatter: colorlog
+ stream: ext://sys.stdout
+ file:
+ class: logging.FileHandler
+ formatter: simple
+ filename: ${hydra.job.name}.log
+ root:
+ level: INFO
+ handlers:
+ - console
+ - file
+ disable_existing_loggers: false
+ env: {}
+ mode: RUN
+ searchpath:
+ - pkg://d123.script.config
+ - pkg://d123.script.config.common
+ callbacks: {}
+ output_subdir: ${output_dir}/code/hydra
+ overrides:
+ hydra:
+ - hydra.mode=RUN
+ task:
+ - experiment_name=my_run
+ job:
+ name: run_dataset_conversion
+ chdir: false
+ override_dirname: experiment_name=my_run
+ id: ???
+ num: ???
+ config_name: default_dataset_conversion
+ env_set: {}
+ env_copy: []
+ config:
+ override_dirname:
+ kv_sep: '='
+ item_sep: ','
+ exclude_keys: []
+ runtime:
+ version: 1.3.2
+ version_base: '1.3'
+ cwd: /home/jbwang/d123/d123/script
+ config_sources:
+ - path: hydra.conf
+ schema: pkg
+ provider: hydra
+ - path: /home/jbwang/d123/d123/script/config/dataset_conversion
+ schema: file
+ provider: main
+ - path: hydra_plugins.hydra_colorlog.conf
+ schema: pkg
+ provider: hydra-colorlog
+ - path: d123.script.config
+ schema: pkg
+ provider: hydra.searchpath in main
+ - path: d123.script.config.common
+ schema: pkg
+ provider: hydra.searchpath in main
+ - path: ''
+ schema: structured
+ provider: schema
+ output_dir: /home/jbwang/d123/exp/my_run/2025.08.11.15.45.36
+ choices:
+ scene_builder: default_scene_builder
+ scene_filter: all_scenes
+ worker: ray_distributed
+ hydra/env: default
+ hydra/callbacks: null
+ hydra/job_logging: colorlog
+ hydra/hydra_logging: colorlog
+ hydra/hydra_help: default
+ hydra/help: default
+ hydra/sweeper: basic
+ hydra/launcher: basic
+ hydra/output: default
+ verbose: false
diff --git a/exp/my_run/2025.08.11.15.45.36/code/hydra/overrides.yaml b/exp/my_run/2025.08.11.15.45.36/code/hydra/overrides.yaml
new file mode 100644
index 00000000..373bde0c
--- /dev/null
+++ b/exp/my_run/2025.08.11.15.45.36/code/hydra/overrides.yaml
@@ -0,0 +1 @@
+- experiment_name=my_run
diff --git a/exp/my_run/2025.08.11.15.45.36/log.txt b/exp/my_run/2025.08.11.15.45.36/log.txt
new file mode 100644
index 00000000..2bdc0b60
--- /dev/null
+++ b/exp/my_run/2025.08.11.15.45.36/log.txt
@@ -0,0 +1,10 @@
+2025-08-11 15:45:36,813 INFO {/home/jbwang/d123/d123/script/builders/worker_pool_builder.py:19} Building WorkerPool...
+2025-08-11 15:46:10,300 INFO {/data/jbwang/submodule/nuplan-devkit-master/nuplan/planning/utils/multithreading/worker_ray.py:78} Starting ray local!
+2025-08-11 15:46:34,960 INFO {/data/jbwang/submodule/nuplan-devkit-master/nuplan/planning/utils/multithreading/worker_pool.py:101} Worker: RayDistributed
+2025-08-11 15:46:34,962 INFO {/data/jbwang/submodule/nuplan-devkit-master/nuplan/planning/utils/multithreading/worker_pool.py:102} Number of nodes: 1
+Number of CPUs per node: 64
+Number of GPUs per node: 8
+Number of threads across all nodes: 64
+2025-08-11 15:46:34,962 INFO {/home/jbwang/d123/d123/script/builders/worker_pool_builder.py:27} Building WorkerPool...DONE!
+2025-08-11 15:46:34,963 INFO {/home/jbwang/d123/d123/script/run_dataset_conversion.py:30} Starting Dataset Caching...
+2025-08-11 15:46:34,964 INFO {/home/jbwang/d123/d123/script/builders/data_converter_builder.py:14} Building RawDataProcessor...
diff --git a/jbwang_test.py b/jbwang_test.py
new file mode 100644
index 00000000..ac3afac5
--- /dev/null
+++ b/jbwang_test.py
@@ -0,0 +1,68 @@
+# from nuplan.database.nuplan_db_orm.nuplandb import NuPlanDB
+
+# # # 打开数据库文件
+# # db = NuPlanDB(db_path="/nas/datasets/nuplan/nuplan-v1.1/splits/mini/2021.05.12.22.00.38_veh-35_01008_01518.db")
+# NUPLAN_DATA_ROOT = "/nas/datasets/nuplan/nuplan-v1.1/splits/mini"
+# log_path
+# log_db = NuPlanDB(NUPLAN_DATA_ROOT, str(log_path), None)
+
+# # 获取第1050帧数据
+# frame = db.get_frame(1050)
+# img_front = frame.camera_front # 前视图像
+# point_cloud = frame.lidar # 点云
+
+# # 获取本片段所有车辆状态
+# status_data = db.get_vehicle_status() # 返回DataFrame
+# print(status_data)
+
+
+
+# from d123.dataset.dataset_specific.nuplan.nuplan_data_converter import NuplanDataConverter, DataConverterConfig
+# spits = ["nuplan_mini_train"]
+# log_path = "/nas/datasets/nuplan/nuplan-v1.1/splits/mini/"
+# converter = NuplanDataConverter(
+# log_path=log_path,
+# splits=spits,
+# data_converter_config=DataConverterConfig(output_path="data/jbwang/d123"),
+# )
+# # converter.convert_logs()
+from pathlib import Path
+log_paths_per_split = {
+ "nuplan_mini_train": [
+ "2021","2022"]
+ }
+log_args = [
+ {
+ "log_path": log_path,
+ "split": split,
+ }
+ for split, log_paths in log_paths_per_split.items()
+ for log_path in log_paths
+ ]
+PATH_2D_RAW_ROOT = Path("/nas/datasets/KITTI-360/data_3d_raw/")
+candidates = sorted(p for p in PATH_2D_RAW_ROOT.iterdir() if p.is_dir() and p.name.endswith("_sync"))
+# print(log_args)
+# print(candidates)
+# print(candidates[0].name)
+# print(candidates[0].stem)
+# print(type(candidates[0].name))
+# print(type(candidates[0].stem))
+# PATH_2D_RAW_ROOT_new = PATH_2D_RAW_ROOT/"123"/candidates[0].name
+# print(PATH_2D_RAW_ROOT_new)
+
+
+
+# import hashlib
+# def create_token(input_data: str) -> str:
+# # TODO: Refactor this function.
+# # TODO: Add a general function to create tokens from arbitrary data.
+# if isinstance(input_data, str):
+# input_data = input_data.encode("utf-8")
+
+# hash_obj = hashlib.sha256(input_data)
+# return hash_obj.hexdigest()[:16]
+
+# log_name = "1230_asd_"
+# for i in range(20):
+# a = create_token(f"{log_name}_{i}")
+# print(a)
diff --git a/notebooks/dataset/jbwang_test.py b/notebooks/dataset/jbwang_test.py
new file mode 100644
index 00000000..caaa3201
--- /dev/null
+++ b/notebooks/dataset/jbwang_test.py
@@ -0,0 +1,86 @@
+s3_uri = "/data/jbwang/d123/data/nuplan_mini_train/2021.10.11.07.12.18_veh-50_00211_00304.arrow"
+# s3_uri = "/data/jbwang/d123/data/nuplan_private_test/2021.09.22.13.20.34_veh-28_01446_01583.arrow"
+# s3_uri = "/data/jbwang/d123/data/carla/_Rep0_routes_validation1_route0_07_23_14_33_15.arrow"
+# s3_uri = "/data/jbwang/d123/data/nuplan_mini_val/2021.06.07.12.54.00_veh-35_01843_02314.arrow"
+
+import pyarrow as pa
+import pyarrow.fs as fs
+import pyarrow.dataset as ds
+
+import os
+
+s3_fs = fs.S3FileSystem()
+from d123.common.utils.timer import Timer
+
+
+timer = Timer()
+timer.start()
+
+dataset = ds.dataset(f"{s3_uri}", format="ipc")
+timer.log("1. Dataset loaded")
+
+# Get all column names and remove the ones you want to drop
+all_columns = dataset.schema.names
+# print("all_columns", all_columns)
+# print("Schema:")
+# print(dataset.schema)
+# columns_to_keep = [col for col in all_columns if col not in ["front_cam_demo", "front_cam_transform"]]
+timer.log("2. Columns filtered")
+
+table = dataset.to_table(columns=all_columns)
+# print("table",table)
+# print(table["token"])
+for col in table.column_names:
+ if col == "lidar":
+ continue
+ print(f"Column: {col}, Type: {table.schema.field(col).type}")
+ tokens = table[col] # 或 table.column("token")
+ # print(len(tokens))
+ print(tokens.slice(0, 4).to_pylist())
+# print(table["traffic_light_ids"])
+timer.log("3. Table created")
+# Save locally
+# with pa.ipc.new_file("filtered_file.arrow", table.schema) as writer:
+# writer.write_table(table)
+timer.log("4. Table saved locally")
+
+timer.end()
+timer.stats(verbose=False)
+
+# 查看nuplan数据库的表结构和内容
+
+# from pathlib import Path
+# from nuplan.database.nuplan_db_orm.nuplandb import NuPlanDB
+# from nuplan.database.nuplan_db_orm.lidar_pc import LidarPc
+# from sqlalchemy import inspect, select
+# from sqlalchemy.orm import Session
+# from sqlalchemy import func
+# from nuplan.database.nuplan_db_orm.ego_pose import EgoPose
+
+# NUPLAN_DATA_ROOT = Path("/nas/datasets/nuplan/") # 按你实际路径
+# log_path = "/nas/datasets/nuplan/nuplan-v1.1/splits/mini/2021.05.12.22.00.38_veh-35_01008_01518.db"
+
+# db = NuPlanDB(NUPLAN_DATA_ROOT, log_path, None)
+# # print(db.log)
+# print(db.log.map_version)
+# # print("log.cameras",db.log.cameras)
+# # print("Log name:", db.log_name)
+# # print("lidar",db.lidar_pc)
+# # print("scenario_tags", db.scenario_tag)
+# # print(db.log._session.query(EgoPose).order_by(func.abs(EgoPose.timestamp)).first())
+
+# # persp = Path("/nas/datasets/KITTI-360/calibration/perspective.txt")
+# # with open(persp, "r") as f:
+# # lines = [ln.strip() for ln in f if ln.strip()]
+# # print(lines)
+
+# from d123.dataset.dataset_specific.kitti_360.kitti_360_data_converter import get_kitti360_camera_metadata
+
+# print(get_kitti360_camera_metadata())
+
+
+
+# from d123.dataset.dataset_specific.kitti_360.kitti_360_data_converter import _read_timestamps
+# result = _read_timestamps("2013_05_28_drive_0000_sync")
+# print(len(result))
+# print([result[0].time_us])
\ No newline at end of file
diff --git a/notebooks/jbwang_viz_test.py b/notebooks/jbwang_viz_test.py
new file mode 100644
index 00000000..73f05dbf
--- /dev/null
+++ b/notebooks/jbwang_viz_test.py
@@ -0,0 +1,252 @@
+# from typing import Tuple
+
+# import matplotlib.pyplot as plt
+
+# from nuplan.planning.utils.multithreading.worker_sequential import Sequential
+
+# from d123.dataset.scene.scene_builder import ArrowSceneBuilder
+# from d123.dataset.scene.scene_filter import SceneFilter
+# from d123.dataset.scene.abstract_scene import AbstractScene
+
+# from typing import Dict
+# from d123.common.datatypes.sensor.camera import CameraType
+# from d123.common.visualization.matplotlib.camera import add_camera_ax
+# from d123.common.visualization.matplotlib.camera import add_box_detections_to_camera_ax
+
+# # split = "nuplan_private_test"
+# # log_names = ["2021.09.29.17.35.58_veh-44_00066_00432"]
+
+
+
+
+# # splits = ["carla"]
+# splits = ["nuplan_private_test"]
+# # splits = ["wopd_train"]
+# # log_names = None
+
+
+
+# # splits = ["nuplan_private_test"]
+# log_names = None
+
+# scene_tokens = None
+
+# scene_filter = SceneFilter(
+# split_names=splits,
+# log_names=log_names,
+# scene_tokens=scene_tokens,
+# duration_s=19,
+# history_s=0.0,
+# timestamp_threshold_s=20,
+# shuffle=False,
+# camera_types=[CameraType.CAM_F0],
+# )
+# scene_builder = ArrowSceneBuilder("/data/jbwang/d123/data/")
+# worker = Sequential()
+# # worker = RayDistributed()
+# scenes = scene_builder.get_scenes(scene_filter, worker)
+
+# print(f"Found {len(scenes)} scenes")
+
+
+# from typing import List, Optional, Tuple
+# import matplotlib.pyplot as plt
+# import numpy as np
+# from d123.common.geometry.base import Point2D
+# from d123.common.visualization.color.color import BLACK, DARK_GREY, DARKER_GREY, LIGHT_GREY, NEW_TAB_10, TAB_10
+# from d123.common.visualization.color.config import PlotConfig
+# from d123.common.visualization.color.default import CENTERLINE_CONFIG, MAP_SURFACE_CONFIG, ROUTE_CONFIG
+# from d123.common.visualization.matplotlib.observation import (
+# add_box_detections_to_ax,
+# add_default_map_on_ax,
+# add_ego_vehicle_to_ax,
+# add_traffic_lights_to_ax,
+# )
+# from d123.common.visualization.matplotlib.utils import add_shapely_linestring_to_ax, add_shapely_polygon_to_ax
+# from d123.dataset.maps.abstract_map import AbstractMap
+# from d123.dataset.maps.abstract_map_objects import AbstractLane
+# from d123.dataset.maps.map_datatypes import MapLayer
+# from d123.dataset.scene.abstract_scene import AbstractScene
+
+
+# import shapely.geometry as geom
+
+# LEFT_CONFIG: PlotConfig = PlotConfig(
+# fill_color=TAB_10[2],
+# fill_color_alpha=1.0,
+# line_color=TAB_10[2],
+# line_color_alpha=0.5,
+# line_width=1.0,
+# line_style="-",
+# zorder=3,
+# )
+
+# RIGHT_CONFIG: PlotConfig = PlotConfig(
+# fill_color=TAB_10[3],
+# fill_color_alpha=1.0,
+# line_color=TAB_10[3],
+# line_color_alpha=0.5,
+# line_width=1.0,
+# line_style="-",
+# zorder=3,
+# )
+
+
+# LANE_CONFIG: PlotConfig = PlotConfig(
+# fill_color=BLACK,
+# fill_color_alpha=1.0,
+# line_color=BLACK,
+# line_color_alpha=0.0,
+# line_width=0.0,
+# line_style="-",
+# zorder=5,
+# )
+
+# ROAD_EDGE_CONFIG: PlotConfig = PlotConfig(
+# fill_color=DARKER_GREY.set_brightness(0.0),
+# fill_color_alpha=1.0,
+# line_color=DARKER_GREY.set_brightness(0.0),
+# line_color_alpha=1.0,
+# line_width=1.0,
+# line_style="-",
+# zorder=3,
+# )
+
+# ROAD_LINE_CONFIG: PlotConfig = PlotConfig(
+# fill_color=DARKER_GREY,
+# fill_color_alpha=1.0,
+# line_color=NEW_TAB_10[5],
+# line_color_alpha=1.0,
+# line_width=1.5,
+# line_style="-",
+# zorder=3,
+# )
+
+
+# def add_debug_map_on_ax(
+# ax: plt.Axes,
+# map_api: AbstractMap,
+# point_2d: Point2D,
+# radius: float,
+# route_lane_group_ids: Optional[List[int]] = None,
+# ) -> None:
+# layers: List[MapLayer] = [
+# MapLayer.LANE,
+# MapLayer.LANE_GROUP,
+# MapLayer.GENERIC_DRIVABLE,
+# MapLayer.CARPARK,
+# MapLayer.CROSSWALK,
+# MapLayer.INTERSECTION,
+# MapLayer.WALKWAY,
+# MapLayer.ROAD_EDGE,
+# MapLayer.ROAD_LINE,
+# ]
+# x_min, x_max = point_2d.x - radius, point_2d.x + radius
+# y_min, y_max = point_2d.y - radius, point_2d.y + radius
+# patch = geom.box(x_min, y_min, x_max, y_max)
+# map_objects_dict = map_api.query(geometry=patch, layers=layers, predicate="intersects")
+
+# done = False
+# for layer, map_objects in map_objects_dict.items():
+# for map_object in map_objects:
+# try:
+# if layer in [
+# # MapLayer.GENERIC_DRIVABLE,
+# # MapLayer.CARPARK,
+# # MapLayer.CROSSWALK,
+# # MapLayer.INTERSECTION,
+# # MapLayer.WALKWAY,
+# ]:
+# add_shapely_polygon_to_ax(ax, map_object.shapely_polygon, MAP_SURFACE_CONFIG[layer])
+
+# # if layer in [MapLayer.LANE_GROUP]:
+# # add_shapely_polygon_to_ax(ax, map_object.shapely_polygon, MAP_SURFACE_CONFIG[layer])
+
+# if layer in [MapLayer.LANE]:
+# map_object: AbstractLane
+# if map_object.right_lane is not None and map_object.left_lane is not None and not done:
+# add_shapely_polygon_to_ax(ax, map_object.shapely_polygon, LANE_CONFIG)
+# add_shapely_polygon_to_ax(ax, map_object.right_lane.shapely_polygon, RIGHT_CONFIG)
+# add_shapely_polygon_to_ax(ax, map_object.left_lane.shapely_polygon, LEFT_CONFIG)
+# done = True
+# else:
+# add_shapely_polygon_to_ax(ax, map_object.shapely_polygon, MAP_SURFACE_CONFIG[layer])
+
+
+# # add_shapely_linestring_to_ax(ax, map_object.right_boundary.linestring, RIGHT_CONFIG)
+# # add_shapely_linestring_to_ax(ax, map_object.left_boundary.linestring, LEFT_CONFIG)
+# # add_shapely_polygon_to_ax(ax, map_object.shapely_polygon, LANE_CONFIG)
+
+# # centroid = map_object.shapely_polygon.centroid
+# # ax.text(
+# # centroid.x,
+# # centroid.y,
+# # str(map_object.id),
+# # horizontalalignment="center",
+# # verticalalignment="center",
+# # fontsize=8,
+# # bbox=dict(facecolor="white", alpha=0.7, boxstyle="round,pad=0.2"),
+# # )
+# # if layer in [MapLayer.ROAD_EDGE]:
+# # add_shapely_linestring_to_ax(ax, map_object.polyline_3d.linestring, ROAD_EDGE_CONFIG)
+# # edge_lengths.append(map_object.polyline_3d.linestring.length)
+
+# if layer in [MapLayer.ROAD_LINE]:
+# line_type = int(map_object.road_line_type)
+# plt_config = PlotConfig(
+# fill_color=NEW_TAB_10[line_type % len(NEW_TAB_10)],
+# fill_color_alpha=1.0,
+# line_color=NEW_TAB_10[line_type % len(NEW_TAB_10)],
+# line_color_alpha=1.0,
+# line_width=1.5,
+# line_style="-",
+# zorder=3,
+# )
+# add_shapely_linestring_to_ax(ax, map_object.polyline_3d.linestring, plt_config)
+
+# except Exception:
+# import traceback
+
+# print(f"Error adding map object of type {layer.name} and id {map_object.id}")
+# traceback.print_exc()
+
+# ax.set_title(f"Map: {map_api.map_name}")
+
+
+# def _plot_scene_on_ax(ax: plt.Axes, scene: AbstractScene, iteration: int = 0, radius: float = 80) -> plt.Axes:
+
+# ego_vehicle_state = scene.get_ego_state_at_iteration(iteration)
+# box_detections = scene.get_box_detections_at_iteration(iteration)
+
+# point_2d = ego_vehicle_state.bounding_box.center.state_se2.point_2d
+# add_debug_map_on_ax(ax, scene.map_api, point_2d, radius=radius, route_lane_group_ids=None)
+# # add_default_map_on_ax(ax, scene.map_api, point_2d, radius=radius, route_lane_group_ids=None)
+# # add_traffic_lights_to_ax(ax, traffic_light_detections, scene.map_api)
+
+# add_box_detections_to_ax(ax, box_detections)
+# add_ego_vehicle_to_ax(ax, ego_vehicle_state)
+
+# zoom = 1.0
+# ax.set_xlim(point_2d.x - radius * zoom, point_2d.x + radius * zoom)
+# ax.set_ylim(point_2d.y - radius * zoom, point_2d.y + radius * zoom)
+
+# ax.set_aspect("equal", adjustable="box")
+# return ax
+
+
+# def plot_scene_at_iteration(
+# scene: AbstractScene, iteration: int = 0, radius: float = 80
+# ) -> Tuple[plt.Figure, plt.Axes]:
+
+# size = 15
+
+# fig, ax = plt.subplots(figsize=(size, size))
+# _plot_scene_on_ax(ax, scene, iteration, radius)
+# return fig, ax
+
+
+# scene_index = 1
+# fig, ax = plot_scene_at_iteration(scenes[scene_index], iteration=100, radius=100)
+
+# # fig.savefig(f"/home/daniel/scene_{scene_index}_iteration_1.pdf", dpi=300, bbox_inches="tight")
+
diff --git a/notebooks/nuplan/nuplan_sensor_loading.ipynb b/notebooks/nuplan/nuplan_sensor_loading.ipynb
index 0dd69b4e..8291f265 100644
--- a/notebooks/nuplan/nuplan_sensor_loading.ipynb
+++ b/notebooks/nuplan/nuplan_sensor_loading.ipynb
@@ -21,7 +21,18 @@
"execution_count": null,
"id": "1",
"metadata": {},
- "outputs": [],
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "[[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]"
+ ]
+ },
+ "execution_count": 2,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
"source": [
"import numpy as np\n",
"\n",
@@ -33,7 +44,19 @@
"execution_count": null,
"id": "2",
"metadata": {},
- "outputs": [],
+ "outputs": [
+ {
+ "ename": "ModuleNotFoundError",
+ "evalue": "No module named 'd123'",
+ "output_type": "error",
+ "traceback": [
+ "\u001b[31m---------------------------------------------------------------------------\u001b[39m",
+ "\u001b[31mModuleNotFoundError\u001b[39m Traceback (most recent call last)",
+ "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[3]\u001b[39m\u001b[32m, line 1\u001b[39m\n\u001b[32m----> \u001b[39m\u001b[32m1\u001b[39m \u001b[38;5;28;01mfrom\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[34;01md123\u001b[39;00m\u001b[34;01m.\u001b[39;00m\u001b[34;01mdataset\u001b[39;00m\u001b[34;01m.\u001b[39;00m\u001b[34;01mdataset_specific\u001b[39;00m\u001b[34;01m.\u001b[39;00m\u001b[34;01mnuplan\u001b[39;00m\u001b[34;01m.\u001b[39;00m\u001b[34;01mnuplan_data_converter\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[38;5;28;01mimport\u001b[39;00m NuplanDataConverter\n",
+ "\u001b[31mModuleNotFoundError\u001b[39m: No module named 'd123'"
+ ]
+ }
+ ],
"source": [
"from d123.dataset.dataset_specific.nuplan.nuplan_data_converter import NuplanDataConverter"
]
diff --git a/requirements.txt b/requirements.txt
index f0c697e2..b022f008 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,4 +1,4 @@
-nuplan-devkit @ git+https://github.com/motional/nuplan-devkit/@nuplan-devkit-v1.2
+# nuplan-devkit @ git+https://github.com/motional/nuplan-devkit/@nuplan-devkit-v1.2
# nuplan requirements
aioboto3
From a4f664ea5fe560c5195228ab390fd2bde1ccf457 Mon Sep 17 00:00:00 2001
From: jbwang <1159270049@qq.com>
Date: Fri, 15 Aug 2025 14:59:52 +0800
Subject: [PATCH 002/145] finish kitti360v0.0.1
---
d123/common/datatypes/sensor/lidar_index.py | 7 +
.../kitti_360/{__init__ .py => __init__.py} | 0
.../dataset_specific/kitti_360/jbwang_test.py | 154 +++++++
.../kitti_360/kitti_360_data_converter.py | 391 +++++++++++-------
.../kitti_360/kitti_360_helper.py | 102 +++++
.../dataset_specific/kitti_360/labels.py | 168 ++++++++
.../default_dataset_conversion.yaml | 4 -
.../config/datasets/kitti360_dataset.yaml | 2 +-
.../code/hydra/config.yaml | 60 +++
.../2025.08.15.14.31.57/code/hydra/hydra.yaml | 177 ++++++++
.../code/hydra/overrides.yaml | 1 +
exp/kitti360_test/2025.08.15.14.31.57/log.txt | 10 +
.../code/hydra/config.yaml | 60 +++
.../2025.08.15.14.36.40/code/hydra/hydra.yaml | 177 ++++++++
.../code/hydra/overrides.yaml | 1 +
exp/kitti360_test/2025.08.15.14.36.40/log.txt | 10 +
.../code/hydra/config.yaml | 60 +++
.../2025.08.15.14.40.29/code/hydra/hydra.yaml | 177 ++++++++
.../code/hydra/overrides.yaml | 1 +
exp/kitti_test2/2025.08.15.14.40.29/log.txt | 10 +
.../code/hydra/config.yaml | 60 +++
.../2025.08.15.14.43.13/code/hydra/hydra.yaml | 177 ++++++++
.../code/hydra/overrides.yaml | 1 +
exp/kitti_test2/2025.08.15.14.43.13/log.txt | 12 +
.../code/hydra/config.yaml | 60 +++
.../2025.08.15.14.46.49/code/hydra/hydra.yaml | 177 ++++++++
.../code/hydra/overrides.yaml | 1 +
exp/kitti_test2/2025.08.15.14.46.49/log.txt | 10 +
.../code/hydra/config.yaml | 60 +++
.../2025.08.15.14.50.55/code/hydra/hydra.yaml | 177 ++++++++
.../code/hydra/overrides.yaml | 1 +
exp/kitti_test2/2025.08.15.14.50.55/log.txt | 11 +
.../code/hydra/config.yaml | 60 +++
.../2025.08.15.14.52.39/code/hydra/hydra.yaml | 177 ++++++++
.../code/hydra/overrides.yaml | 1 +
exp/kitti_test2/2025.08.15.14.52.39/log.txt | 11 +
jbwang_test.py | 19 +-
jbwang_test2.py | 70 ++++
notebooks/dataset/jbwang_test.py | 11 +-
39 files changed, 2508 insertions(+), 160 deletions(-)
rename d123/dataset/dataset_specific/kitti_360/{__init__ .py => __init__.py} (100%)
create mode 100644 d123/dataset/dataset_specific/kitti_360/jbwang_test.py
create mode 100644 d123/dataset/dataset_specific/kitti_360/kitti_360_helper.py
create mode 100644 d123/dataset/dataset_specific/kitti_360/labels.py
create mode 100644 exp/kitti360_test/2025.08.15.14.31.57/code/hydra/config.yaml
create mode 100644 exp/kitti360_test/2025.08.15.14.31.57/code/hydra/hydra.yaml
create mode 100644 exp/kitti360_test/2025.08.15.14.31.57/code/hydra/overrides.yaml
create mode 100644 exp/kitti360_test/2025.08.15.14.31.57/log.txt
create mode 100644 exp/kitti360_test/2025.08.15.14.36.40/code/hydra/config.yaml
create mode 100644 exp/kitti360_test/2025.08.15.14.36.40/code/hydra/hydra.yaml
create mode 100644 exp/kitti360_test/2025.08.15.14.36.40/code/hydra/overrides.yaml
create mode 100644 exp/kitti360_test/2025.08.15.14.36.40/log.txt
create mode 100644 exp/kitti_test2/2025.08.15.14.40.29/code/hydra/config.yaml
create mode 100644 exp/kitti_test2/2025.08.15.14.40.29/code/hydra/hydra.yaml
create mode 100644 exp/kitti_test2/2025.08.15.14.40.29/code/hydra/overrides.yaml
create mode 100644 exp/kitti_test2/2025.08.15.14.40.29/log.txt
create mode 100644 exp/kitti_test2/2025.08.15.14.43.13/code/hydra/config.yaml
create mode 100644 exp/kitti_test2/2025.08.15.14.43.13/code/hydra/hydra.yaml
create mode 100644 exp/kitti_test2/2025.08.15.14.43.13/code/hydra/overrides.yaml
create mode 100644 exp/kitti_test2/2025.08.15.14.43.13/log.txt
create mode 100644 exp/kitti_test2/2025.08.15.14.46.49/code/hydra/config.yaml
create mode 100644 exp/kitti_test2/2025.08.15.14.46.49/code/hydra/hydra.yaml
create mode 100644 exp/kitti_test2/2025.08.15.14.46.49/code/hydra/overrides.yaml
create mode 100644 exp/kitti_test2/2025.08.15.14.46.49/log.txt
create mode 100644 exp/kitti_test2/2025.08.15.14.50.55/code/hydra/config.yaml
create mode 100644 exp/kitti_test2/2025.08.15.14.50.55/code/hydra/hydra.yaml
create mode 100644 exp/kitti_test2/2025.08.15.14.50.55/code/hydra/overrides.yaml
create mode 100644 exp/kitti_test2/2025.08.15.14.50.55/log.txt
create mode 100644 exp/kitti_test2/2025.08.15.14.52.39/code/hydra/config.yaml
create mode 100644 exp/kitti_test2/2025.08.15.14.52.39/code/hydra/hydra.yaml
create mode 100644 exp/kitti_test2/2025.08.15.14.52.39/code/hydra/overrides.yaml
create mode 100644 exp/kitti_test2/2025.08.15.14.52.39/log.txt
create mode 100644 jbwang_test2.py
diff --git a/d123/common/datatypes/sensor/lidar_index.py b/d123/common/datatypes/sensor/lidar_index.py
index 0df92cff..4e7ad133 100644
--- a/d123/common/datatypes/sensor/lidar_index.py
+++ b/d123/common/datatypes/sensor/lidar_index.py
@@ -60,3 +60,10 @@ class WopdLidarIndex(LiDARIndex):
X = 3
Y = 4
Z = 5
+
+@register_lidar_index
+class Kitti360LidarIndex(LiDARIndex):
+ X = 0
+ Y = 1
+ Z = 2
+ INTENSITY = 3
\ No newline at end of file
diff --git a/d123/dataset/dataset_specific/kitti_360/__init__ .py b/d123/dataset/dataset_specific/kitti_360/__init__.py
similarity index 100%
rename from d123/dataset/dataset_specific/kitti_360/__init__ .py
rename to d123/dataset/dataset_specific/kitti_360/__init__.py
diff --git a/d123/dataset/dataset_specific/kitti_360/jbwang_test.py b/d123/dataset/dataset_specific/kitti_360/jbwang_test.py
new file mode 100644
index 00000000..6f0bdbd9
--- /dev/null
+++ b/d123/dataset/dataset_specific/kitti_360/jbwang_test.py
@@ -0,0 +1,154 @@
+import gc
+import json
+import os
+import pickle
+from dataclasses import asdict
+from functools import partial
+from pathlib import Path
+from typing import Any, Dict, Final, List, Optional, Tuple, Union
+
+import numpy as np
+import pyarrow as pa
+import yaml
+from nuplan.database.nuplan_db.nuplan_scenario_queries import get_cameras, get_images_from_lidar_tokens
+from nuplan.database.nuplan_db_orm.ego_pose import EgoPose
+from nuplan.database.nuplan_db_orm.lidar_box import LidarBox
+from nuplan.database.nuplan_db_orm.lidar_pc import LidarPc
+from nuplan.database.nuplan_db_orm.nuplandb import NuPlanDB
+from nuplan.planning.simulation.observation.observation_type import CameraChannel
+from nuplan.planning.utils.multithreading.worker_utils import WorkerPool, worker_map
+from pyquaternion import Quaternion
+from sqlalchemy import func
+
+
+from kitti_360_data_converter import _extract_ego_state_all,get_kitti360_lidar_metadata,_extract_cameras,_extract_detections
+
+# a = _extract_ego_state_all("2013_05_28_drive_0000_sync")
+# print(a[0])
+# print(a[1])
+# print(a[10])
+from d123.common.datatypes.time.time_point import TimePoint
+from d123.common.datatypes.sensor.camera import CameraMetadata, CameraType, camera_metadata_dict_to_json
+
+NUPLAN_CAMERA_TYPES = {
+ CameraType.CAM_F0: CameraChannel.CAM_F0,
+ CameraType.CAM_B0: CameraChannel.CAM_B0,
+ CameraType.CAM_L0: CameraChannel.CAM_L0,
+ CameraType.CAM_L1: CameraChannel.CAM_L1,
+ CameraType.CAM_L2: CameraChannel.CAM_L2,
+ CameraType.CAM_R0: CameraChannel.CAM_R0,
+ CameraType.CAM_R1: CameraChannel.CAM_R1,
+ CameraType.CAM_R2: CameraChannel.CAM_R2,
+}
+
+NUPLAN_DATA_ROOT = Path(os.environ["NUPLAN_DATA_ROOT"])
+NUPLAN_ROLLING_SHUTTER_S: Final[TimePoint] = TimePoint.from_s(1 / 60)
+
+def _extract_camera(
+ log_db: NuPlanDB,
+ lidar_pc: LidarPc,
+ source_log_path: Path,
+) -> Dict[CameraType, Union[str, bytes]]:
+
+ camera_dict: Dict[str, Union[str, bytes]] = {}
+ sensor_root = NUPLAN_DATA_ROOT / "nuplan-v1.1" / "sensor_blobs"
+
+ log_cam_infos = {camera.token: camera for camera in log_db.log.cameras}
+ for camera_type, camera_channel in NUPLAN_CAMERA_TYPES.items():
+ camera_data: Optional[Union[str, bytes]] = None
+ c2e: Optional[List[float]] = None
+ image_class = list(get_images_from_lidar_tokens(source_log_path, [lidar_pc.token], [str(camera_channel.value)]))
+ # print("image_class",image_class)
+ if len(image_class) != 0:
+ image = image_class[0]
+ filename_jpg = sensor_root / image.filename_jpg
+
+ timestamp = image.timestamp + NUPLAN_ROLLING_SHUTTER_S.time_us
+ img_ego_pose: EgoPose = (
+ log_db.log._session.query(EgoPose).order_by(func.abs(EgoPose.timestamp - timestamp)).first()
+ )
+ img_e2g = img_ego_pose.trans_matrix
+ g2e = lidar_pc.ego_pose.trans_matrix_inv
+ img_e2e = g2e @ img_e2g
+ cam_info = log_cam_infos[image.camera_token]
+ c2img_e = cam_info.trans_matrix
+ c2e = img_e2e @ c2img_e
+ # print(f"Camera {camera_type} found for lidar {lidar_pc.token} at timestamp {timestamp}")
+ print(camera_type,"c2e:", c2e)
+ camera_dict[camera_type] = camera_data
+
+ return camera_dict
+
+
+def get_cam_info_from_lidar_pc(log,log_file, lidar_pc, rolling_shutter_s=1/60):
+
+ retrieved_images = get_images_from_lidar_tokens(
+ log_file, [lidar_pc.token], [str(channel.value) for channel in CameraChannel]
+ )
+
+ # if interp_trans:
+ # neighbours = []
+ # ego_poses_dict = {}
+ # for ego_pose in log.ego_poses:
+ # ego_poses_dict[ego_pose.token] = ego_pose
+ # if abs(ego_pose.timestamp - lidar_pc.ego_pose.timestamp) / 1e6 < 0.5:
+ # neighbours.append(ego_pose)
+ # timestamps = [pose.timestamp for pose in neighbours]
+ # translations = [pose.translation_np for pose in neighbours]
+ # splines = [CubicSpline(timestamps, [translation[i] for translation in translations]) for i in range(2)]
+
+ log_cam_infos = {camera.token : camera for camera in log.camera}
+ cams = {}
+ for img in retrieved_images:
+ channel = img.channel
+ filename = img.filename_jpg
+
+ # if interp_trans:
+ # img_ego_pose = ego_poses_dict[img.ego_pose_token]
+ # interpolated_translation = np.array([splines[0](timestamp), splines[1](timestamp), img_ego_pose.z])
+ # delta = interpolated_translation - lidar_pc.ego_pose.translation_np
+ # delta = np.dot(lidar_pc.ego_pose.quaternion.rotation_matrix.T, delta)
+ if channel == "CAM_F0":
+ timestamp = img.timestamp + (rolling_shutter_s * 1e6)
+ img_ego_pose = log.session.query(EgoPose).order_by(func.abs(EgoPose.timestamp - timestamp)).first()
+ img_e2g = img_ego_pose.trans_matrix
+ # print("img_e2g:", img_e2g)
+
+ g2e = lidar_pc.ego_pose.trans_matrix_inv
+ # print("g2e:", g2e) #change obviously
+ img_e2e = g2e @ img_e2g
+ # print("img_e2e:", img_e2e)
+ cam_info = log_cam_infos[img.camera_token]
+ c2img_e = cam_info.trans_matrix
+ # print("c2img_e:", c2img_e)
+ c2e = img_e2e @ c2img_e
+ # print("channel:", channel, "c2e:", c2e)
+
+ cams[channel] = dict(
+ data_path = filename,
+ timestamp = img.timestamp,
+ token=img.token,
+ sensor2ego_rotation = Quaternion(matrix=c2e[:3, :3]),
+ sensor2ego_translation = c2e[:3, 3],
+ cam_intrinsic = cam_info.intrinsic_np,
+ distortion = cam_info.distortion_np,
+ )
+
+
+ if len(cams) != 8:
+ return None
+ # print(cams)
+ return cams
+
+if __name__ == "__main__":
+ # Example usage
+ # data_converter_config: DataConverterConfig
+ # log_path = Path("/nas/datasets/nuplan/nuplan-v1.1/splits/mini/2021.10.11.07.12.18_veh-50_00211_00304.db")
+ # log_path = Path("/nas/datasets/nuplan/nuplan-v1.1/splits/mini/2021.09.16.15.12.03_veh-42_01037_01434.db")
+ # log_db = NuPlanDB(NUPLAN_DATA_ROOT, str(log_path), None)
+
+ # for lidar_pc in log_db.lidar_pc: # Replace with actual token
+ # # camera_data = _extract_camera(log_db, lidar_pc, log_path)
+ # camera_data = get_cam_info_from_lidar_pc(log_db,log_path, lidar_pc, rolling_shutter_s=1/60)
+ # print(_extract_cameras("2013_05_28_drive_0000_sync",0))
+ _extract_detections("2013_05_28_drive_0000_sync", 0)
\ No newline at end of file
diff --git a/d123/dataset/dataset_specific/kitti_360/kitti_360_data_converter.py b/d123/dataset/dataset_specific/kitti_360/kitti_360_data_converter.py
index b6e97d8c..c79ce0b2 100644
--- a/d123/dataset/dataset_specific/kitti_360/kitti_360_data_converter.py
+++ b/d123/dataset/dataset_specific/kitti_360/kitti_360_data_converter.py
@@ -9,27 +9,35 @@
import numpy as np
import datetime
import hashlib
+import xml.etree.ElementTree as ET
import pyarrow as pa
from PIL import Image
+
from nuplan.planning.utils.multithreading.worker_utils import WorkerPool, worker_map
+from d123.common.datatypes.detection.detection_types import DetectionType
from d123.common.datatypes.sensor.camera import CameraMetadata, CameraType, camera_metadata_dict_to_json
+from d123.common.datatypes.sensor.lidar import LiDARMetadata, LiDARType, lidar_metadata_dict_to_json
+from d123.common.datatypes.sensor.lidar_index import Kitti360LidarIndex
from d123.common.datatypes.time.time_point import TimePoint
-from d123.common.datatypes.vehicle_state.ego_state import EgoStateSE3Index
-from d123.common.datatypes.vehicle_state.vehicle_parameters import get_kitti360_station_wagon_parameters
+from d123.common.datatypes.vehicle_state.ego_state import DynamicStateSE3, EgoStateSE3, EgoStateSE3Index
+from d123.common.datatypes.vehicle_state.vehicle_parameters import get_kitti360_station_wagon_parameters,rear_axle_se3_to_center_se3
+from d123.common.geometry.base import StateSE3
from d123.common.geometry.bounding_box.bounding_box import BoundingBoxSE3Index
-from d123.common.geometry.vector import Vector3DIndex
+from d123.common.geometry.vector import Vector3D, Vector3DIndex
from d123.dataset.arrow.helper import open_arrow_table, write_arrow_table
from d123.dataset.dataset_specific.raw_data_converter import DataConverterConfig, RawDataConverter
from d123.dataset.logs.log_metadata import LogMetadata
+from kitti_360_helper import KITTI360Bbox3D
+
KITTI360_DT: Final[float] = 0.1
SORT_BY_TIMESTAMP: Final[bool] = True
KITTI360_DATA_ROOT = Path(os.environ["KITTI360_DATA_ROOT"])
#TODO carera mismatch
-KITTI360_CAMERA_TYPES: Final[Dict[CameraType, str]] = {
+KITTI360_CAMERA_TYPES = {
CameraType.CAM_L0: "image_00",
CameraType.CAM_R0: "image_01",
# TODO fisheye camera
@@ -63,6 +71,16 @@
# DIR_POSES: PATH_POSES_ROOT,
}
+#TODO
+KIITI360_DETECTION_NAME_DICT = {
+ "truck": DetectionType.VEHICLE,
+ "bus": DetectionType.VEHICLE,
+ "car": DetectionType.VEHICLE,
+ "motorcycle": DetectionType.BICYCLE,
+ "bicycle": DetectionType.BICYCLE,
+ "pedestrian": DetectionType.PEDESTRIAN,
+}
+
def create_token(input_data: str) -> str:
# TODO: Refactor this function.
@@ -73,43 +91,6 @@ def create_token(input_data: str) -> str:
hash_obj = hashlib.sha256(input_data)
return hash_obj.hexdigest()[:16]
-def _load_calibration() -> Tuple[Dict[str, np.ndarray], Dict[str, np.ndarray]]:
- """
- 读取 KITTI-360 全局标定文件,返回:
- - intrinsics[image_02] = 3x3
- - c2e[image_02] = 4x4(camera->ego/body),这里将 cam_to_pose 视为 camera->vehicle(简化)
- """
- calib_dir = KITTI360_DATA_ROOT / DIR_CALIB
- intrinsics: Dict[str, np.ndarray] = {}
- c2e: Dict[str, np.ndarray] = {}
-
- # 内参:perspective.txt 中的 P_rect_0{0..3}
- persp = calib_dir / "perspective.txt"
- if persp.exists():
- with open(persp, "r") as f:
- lines = [ln.strip() for ln in f if ln.strip()]
- for ln in lines:
- if ln.startswith("P_rect_02"):
- intrinsics["image_02"] = _read_projection_matrix(ln)
- elif ln.startswith("P_rect_03"):
- intrinsics["image_03"] = _read_projection_matrix(ln)
-
- # 外参:cam_to_pose.txt 中 Tr_cam02(相机到车体/pose)
- c2p = calib_dir / "cam_to_pose.txt"
- if c2p.exists():
- with open(c2p, "r") as f:
- lines = [ln.strip() for ln in f if ln.strip()]
- for ln in lines:
- if ln.startswith("Tr_cam02"):
- vals = [float(x) for x in ln.split(":")[1].strip().split()]
- T = np.array(vals, dtype=np.float64).reshape(4, 4)
- c2e["image_02"] = T
- elif ln.startswith("Tr_cam03"):
- vals = [float(x) for x in ln.split(":")[1].strip().split()]
- T = np.array(vals, dtype=np.float64).reshape(4, 4)
- c2e["image_03"] = T
-
- return intrinsics, c2e
class Kitti360DataConverter(RawDataConverter):
def __init__(
@@ -204,6 +185,19 @@ def convert_kitti360_log_to_arrow(
if not log_file_path.parent.exists():
log_file_path.parent.mkdir(parents=True, exist_ok=True)
+ metadata = LogMetadata(
+ dataset="kitti360",
+ log_name=log_name,
+ location="None",
+ timestep_seconds=KITTI360_DT,
+ map_has_z=False,
+ )
+
+ vehicle_parameters = get_kitti360_station_wagon_parameters()
+ camera_metadata = get_kitti360_camera_metadata()
+ #TODO now only velodyne lidar
+ lidar_metadata = get_kitti360_lidar_metadata()
+
schema_column_list = [
("token", pa.string()),
("timestamp", pa.int64()),
@@ -218,38 +212,29 @@ def convert_kitti360_log_to_arrow(
("route_lane_group_ids", pa.list_(pa.int64())),
]
if data_converter_config.lidar_store_option is not None:
- if data_converter_config.lidar_store_option == "path":
- schema_column_list.append(("lidar", pa.string()))
- elif data_converter_config.lidar_store_option == "binary":
- raise NotImplementedError("Binary lidar storage is not implemented.")
+ for lidar_type in lidar_metadata.keys():
+ if data_converter_config.lidar_store_option == "path":
+ schema_column_list.append((lidar_type.serialize(), pa.string()))
+ elif data_converter_config.lidar_store_option == "binary":
+ raise NotImplementedError("Binary lidar storage is not implemented.")
- # TODO: Adjust how cameras are added
if data_converter_config.camera_store_option is not None:
- for cam_type in KITTI360_CAMERA_TYPES.keys():
+ for camera_type in camera_metadata.keys():
if data_converter_config.camera_store_option == "path":
- schema_column_list.append((cam_type.serialize(), pa.string()))
- schema_column_list.append((f"{cam_type.serialize()}_extrinsic", pa.list_(pa.float64(), 16)))
+ schema_column_list.append((camera_type.serialize(), pa.string()))
+ schema_column_list.append(
+ (f"{camera_type.serialize()}_extrinsic", pa.list_(pa.float64(), 4 * 4))
+ )
elif data_converter_config.camera_store_option == "binary":
raise NotImplementedError("Binary camera storage is not implemented.")
recording_schema = pa.schema(schema_column_list)
- #TODO location
- metadata = LogMetadata(
- dataset="kitti360",
- log_name=log_name,
- location="None",
- timestep_seconds=KITTI360_DT,
- map_has_z=False,
- )
-
- #TODO vehicle parameters
- vehicle_parameters = get_kitti360_station_wagon_parameters()
- camera_metadata = get_kitti360_camera_metadata()
recording_schema = recording_schema.with_metadata(
{
"log_metadata": json.dumps(asdict(metadata)),
"vehicle_parameters": json.dumps(asdict(vehicle_parameters)),
"camera_metadata": camera_metadata_dict_to_json(camera_metadata),
+ "lidar_metadata": lidar_metadata_dict_to_json(lidar_metadata),
}
)
@@ -298,6 +283,35 @@ def _read_projection_matrix(p_line: str) -> np.ndarray:
K = P[:, :3]
return K
+def get_kitti360_lidar_metadata(log_name: str) -> Dict[LiDARType, LiDARMetadata]:
+ metadata: Dict[LiDARType, LiDARMetadata] = {}
+
+ cam2pose_txt = PATH_CALIB_ROOT / "calib_cam_to_pose.txt"
+ if not cam2pose_txt.exists():
+ raise FileNotFoundError(f"calib_cam_to_pose.txt file not found: {cam2pose_txt}")
+
+ cam2velo_txt = PATH_CALIB_ROOT / "calib_cam_to_velo.txt"
+ if not cam2velo_txt.exists():
+ raise FileNotFoundError(f"calib_cam_to_velo.txt file not found: {cam2velo_txt}")
+
+ lastrow = np.array([0,0,0,1]).reshape(1,4)
+
+ with open(cam2pose_txt, 'r') as f:
+ image_00 = next(f)
+ values = list(map(float, image_00.strip().split()[1:]))
+ matrix = np.array(values).reshape(3, 4)
+ cam2pose = np.concatenate((matrix, lastrow))
+
+ cam2velo = np.concatenate((np.loadtxt(cam2velo_txt).reshape(3,4), lastrow))
+ extrinsic = cam2velo @ np.linalg.inv(cam2pose)
+
+ metadata[LiDARType.LIDAR_TOP] = LiDARMetadata(
+ lidar_type=LiDARType.LIDAR_TOP,
+ lidar_index=Kitti360LidarIndex,
+ extrinsic=extrinsic,
+ )
+ return metadata
+
def _write_recording_table(
log_name: str,
recording_schema: pa.Schema,
@@ -306,31 +320,33 @@ def _write_recording_table(
) -> None:
ts_list = _read_timestamps(log_name)
+ ego_state_all = _extract_ego_state_all(log_name)
+ detections_states,detections_velocity,detections_tokens,detections_types = _extract_detections(log_name,len(ts_list))
with pa.OSFile(str(log_file_path), "wb") as sink:
with pa.ipc.new_file(sink, recording_schema) as writer:
- for i, tp in enumerate(ts_list):
+ for idx, tp in enumerate(ts_list):
+
row_data = {
- "token": [create_token(f"{log_name}_{i}")],
+ "token": [create_token(f"{log_name}_{idx}")],
"timestamp": [tp.time_us],
- "detections_state": [],
- "detections_velocity": [],
- "detections_token": [],
- "detections_type": [],
- "ego_states": [],
- "traffic_light_ids": [],
- "traffic_light_types": [],
- "scenario_tag": [],
- "route_lane_group_ids": [],
+ "detections_state": [detections_states[idx]],
+ "detections_velocity": [detections_velocity[idx]],
+ "detections_token": [detections_tokens[idx]],
+ "detections_type": [detections_types[idx]],
+ "ego_states": [ego_state_all[idx]],
+ "traffic_light_ids": [[]],
+ #may TODO traffic light types
+ "traffic_light_types": [[]],
+ "scenario_tag": [['unknown']],
+ "route_lane_group_ids": [[]],
}
if data_converter_config.lidar_store_option is not None:
- row_data["lidar"] = []
- # row_data["lidar"] = [_extract_lidar(log_name, data_converter_config)]
+ row_data["lidar"] = [_extract_lidar(log_name, idx, data_converter_config)]
if data_converter_config.camera_store_option is not None:
- # camera_data_dict = _extract_camera(log_db, lidar_pc, source_log_path, data_converter_config)
- camera_data_dict = {}
+ camera_data_dict = _extract_cameras(log_name, idx, data_converter_config)
for camera_type, camera_data in camera_data_dict.items():
if camera_data is not None:
row_data[camera_type.serialize()] = [camera_data[0]]
@@ -348,9 +364,8 @@ def _write_recording_table(
write_arrow_table(recording_table, log_file_path)
#TODO default timestamps
-# If timestamps are not provided, we can generate them based on the KITTI-360 DT
def _read_timestamps(log_name: str) -> Optional[List[TimePoint]]:
-
+ # unix
ts_file = PATH_2D_RAW_ROOT / log_name / "image_01" / "timestamps.txt"
if ts_file.exists():
tps: List[TimePoint] = []
@@ -375,82 +390,160 @@ def _read_timestamps(log_name: str) -> Optional[List[TimePoint]]:
return tps
return None
+def _extract_ego_state_all(log_name: str) -> List[List[float]]:
+
+ ego_state_all: List[List[float]] = []
+
+ pose_file = PATH_POSES_ROOT / log_name / "poses.txt"
+ if not pose_file.exists():
+ raise FileNotFoundError(f"Pose file not found: {pose_file}")
+ poses = np.loadtxt(pose_file)
+ poses_time = poses[:, 0] - 1 # Adjusting time to start from 0
+
+ #TODO
+ oxts_path = Path("/data/jbwang/d123/data_poses/") / log_name / "oxts" / "data"
+
+ for idx in range(len(list(oxts_path.glob("*.txt")))):
+ oxts_path_file = oxts_path / f"{int(idx):010d}.txt"
+ oxts_data = np.loadtxt(oxts_path_file)
+
+ roll, pitch, yaw = oxts_data[3:6]
+ vehicle_parameters = get_kitti360_station_wagon_parameters()
+
+ pos = np.searchsorted(poses_time, idx, side='right') - 1
+
+ rear_axle_pose = StateSE3(
+ x=poses[pos, 4],
+ y=poses[pos, 8],
+ z=poses[pos, 12],
+ roll=roll,
+ pitch=pitch,
+ yaw=yaw,
+ )
+ # NOTE: The height to rear axle is not provided the dataset and is merely approximated.
+ center = rear_axle_se3_to_center_se3(rear_axle_se3=rear_axle_pose, vehicle_parameters=vehicle_parameters)
+ dynamic_state = DynamicStateSE3(
+ velocity=Vector3D(
+ x=oxts_data[8],
+ y=oxts_data[9],
+ z=oxts_data[10],
+ ),
+ acceleration=Vector3D(
+ x=oxts_data[14],
+ y=oxts_data[15],
+ z=oxts_data[16],
+ ),
+ angular_velocity=Vector3D(
+ x=oxts_data[20],
+ y=oxts_data[21],
+ z=oxts_data[22],
+ ),
+ )
+ ego_state_all.append(
+ EgoStateSE3(
+ center_se3=center,
+ dynamic_state_se3=dynamic_state,
+ vehicle_parameters=vehicle_parameters,
+ timepoint=None,
+ ).array.tolist()
+ )
+ return ego_state_all
+
+#TODO now only divided by data_3d_semantics
+# We may distinguish between image and lidar detections
+# besides, now it is based only on start and end frame
+def _extract_detections(
+ log_name: str,
+ ts_len: int
+) -> Tuple[List[List[float]], List[List[float]], List[str], List[int]]:
+
+ detections_states: List[List[List[float]]] = [[] for _ in range(ts_len)]
+ detections_velocity: List[List[List[float]]] = [[] for _ in range(ts_len)]
+ detections_tokens: List[List[str]] = [[] for _ in range(ts_len)]
+ detections_types: List[List[int]] = [[] for _ in range(ts_len)]
+
+ bbox_3d_path = PATH_3D_BBOX_ROOT / "train" / f"{log_name}.xml"
+ if not bbox_3d_path.exists():
+ raise FileNotFoundError(f"BBox 3D file not found: {bbox_3d_path}")
+
+ tree = ET.parse(bbox_3d_path)
+ root = tree.getroot()
+
+ for child in root:
+ label = child.find('label').text
+ if child.find('transform') is None or label not in KIITI360_DETECTION_NAME_DICT.keys():
+ continue
+ obj = KITTI360Bbox3D()
+ obj.parseBbox(child)
+
+ # static
+ if obj.timestamp == -1:
+ start_frame = obj.start_frame
+ end_frame = obj.end_frame
+ for frame in range(start_frame, end_frame + 1):
+ #TODO check if valid in each frame
+ if frame < 0 or frame >= ts_len:
+ continue
+ #TODO check yaw
+ detections_states[frame].append(obj.get_state_array())
+ detections_velocity[frame].append([0.0, 0.0, 0.0])
+ detections_tokens[frame].append(str(obj.globalID))
+ detections_types[frame].append(int(KIITI360_DETECTION_NAME_DICT[label]))
+ # dynamic
+ else:
+ frame = obj.timestamp
+ detections_states[frame].append(obj.get_state_array())
+ #TODO velocity not provided
+ detections_velocity[frame].append([0.0, 0.0, 0.0])
+ detections_tokens[frame].append(str(obj.globalID))
+ detections_types[frame].append(int(KIITI360_DETECTION_NAME_DICT[label]))
+
+ return detections_states, detections_velocity, detections_tokens, detections_types
+
#TODO lidar extraction
-def _extract_lidar(log_name: str, data_converter_config: DataConverterConfig) -> Optional[str]:
+def _extract_lidar(log_name: str, idx: int, data_converter_config: DataConverterConfig) -> Optional[str]:
lidar: Optional[str] = None
- lidar_full_path = DIR_3D_SMT / "train" / log_name / "0000000002_0000000385.ply"
+ lidar_full_path = DIR_3D_RAW / log_name / "velodyne_points" / "data" / f"{idx:010d}.bin"
if lidar_full_path.exists():
if data_converter_config.lidar_store_option == "path":
- lidar = f"{log_name}/lidar/{sample_name}.npy"
+ lidar = f"/data_3d_raw/{log_name}/velodyne_points/data/{idx:010d}.bin"
elif data_converter_config.lidar_store_option == "binary":
raise NotImplementedError("Binary lidar storage is not implemented.")
else:
raise FileNotFoundError(f"LiDAR file not found: {lidar_full_path}")
- return lidar
-
-def _extract_camera():
- pass
-
-
-
-# for idx in range(n_frames):
-# token = f"{seq_name}_{idx:06d}"
-# t_us = ts_list[idx].time_us
-
-# row = {
-# "token": [token],
-# "timestamp": [t_us],
-# # 以下先填空/占位,方便后续替换为真实标注
-# "detections_state": [[]],
-# "detections_velocity": [[]],
-# "detections_token": [[]],
-# "detections_type": [[]],
-# "ego_states": [([0.0] * len(EgoStateSE3Index))], # 占位
-# "traffic_light_ids": [[]],
-# "traffic_light_types": [[]],
-# "scenario_tag": [["unknown"]],
-# "route_lane_group_ids": [[]],
-# }
-
-# # lidar 路径(若存在)
-# if data_converter_config.lidar_store_option is not None:
-# # velodyne bin:KITTI-360/data_3d_raw//velodyne_points/data/0000000000.bin
-# velodyne_dir = (
-# KITTI360_DATA_ROOT / DIR_3D / seq_name / "velodyne_points" / "data"
-# )
-# # 文件名位数可能为 10 位,这里做两种尝试
-# bin_path = None
-# for fmt in [f"{idx:010d}.bin", f"{idx:06d}.bin", f"{idx:08d}.bin"]:
-# cand = velodyne_dir / fmt
-# if cand.exists():
-# bin_path = cand
-# break
-# row["lidar"] = [str(bin_path.relative_to(KITTI360_DATA_ROOT)) if bin_path else None]
-
-# # 相机路径与外参
-# if data_converter_config.camera_store_option is not None:
-# for cam_type, cam_dir_name in KITTI360_CAMERA_TYPES.items():
-# img_dir = seq_dir_2d / cam_dir_name / "data"
-# # 文件名位数尝试
-# img_path = None
-# for ext in (".png", ".jpg", ".jpeg"):
-# for fmt in [f"{idx:010d}{ext}", f"{idx:06d}{ext}", f"{idx:08d}{ext}"]:
-# cand = img_dir / fmt
-# if cand.exists():
-# img_path = cand
-# break
-# if img_path:
-# break
-# if img_path is not None:
-# rel = str(img_path.relative_to(KITTI360_DATA_ROOT))
-# row[cam_type.serialize()] = [rel]
-# # 外参:固定 cam->ego(全局标定),逐帧不变(如需 rolling/姿态,可在此替换)
-# T = c2e.get(KITTI360_CAMERA_TYPES[cam_type], np.eye(4, dtype=np.float64))
-# row[f"{cam_type.serialize()}_extrinsic"] = [T.astype(np.float64).reshape(-1).tolist()]
-# else:
-# row[cam_type.serialize()] = [None]
-# row[f"{cam_type.serialize()}_extrinsic"] = [None]
-
-# batch = pa.record_batch(row, schema=recording_schema)
-# writer.write_batch(batch)
-# del batch, row
\ No newline at end of file
+ return {LiDARType.LIDAR_TOP: lidar} if lidar else None
+
+#TODO check camera extrinsic now is from camera to pose
+def _extract_cameras(
+ log_name: str, idx: int, data_converter_config: DataConverterConfig
+) -> Dict[CameraType, Optional[str]]:
+
+ camera_dict: Dict[str, Union[str, bytes]] = {}
+ for camera_type, cam_dir_name in KITTI360_CAMERA_TYPES.items():
+ img_path_png = PATH_2D_RAW_ROOT / log_name / cam_dir_name / "data_rect" / f"{idx:010d}.png"
+ if img_path_png.exists():
+
+ cam2pose_txt = PATH_CALIB_ROOT / "calib_cam_to_pose.txt"
+ if not cam2pose_txt.exists():
+ raise FileNotFoundError(f"calib_cam_to_pose.txt file not found: {cam2pose_txt}")
+
+ lastrow = np.array([0,0,0,1]).reshape(1,4)
+
+ with open(cam2pose_txt, 'r') as f:
+ for line in f:
+ parts = line.strip().split()
+ key = parts[0][:-1]
+ if key == cam_dir_name:
+ values = list(map(float, parts[1:]))
+ matrix = np.array(values).reshape(3, 4)
+ cam2pose = np.concatenate((matrix, lastrow))
+
+ if data_converter_config.camera_store_option == "path":
+ camera_data = str(img_path_png), cam2pose.flatten().tolist()
+ elif data_converter_config.camera_store_option == "binary":
+ with open(img_path_png, "rb") as f:
+ camera_data = f.read(), cam2pose
+ else:
+ raise FileNotFoundError(f"Camera image not found: {img_path_png}")
+ camera_dict[camera_type] = camera_data
+ return camera_dict
diff --git a/d123/dataset/dataset_specific/kitti_360/kitti_360_helper.py b/d123/dataset/dataset_specific/kitti_360/kitti_360_helper.py
new file mode 100644
index 00000000..da79cf3e
--- /dev/null
+++ b/d123/dataset/dataset_specific/kitti_360/kitti_360_helper.py
@@ -0,0 +1,102 @@
+import numpy as np
+
+from collections import defaultdict
+from labels import kittiId2label
+
+from scipy.linalg import polar
+from scipy.spatial.transform import Rotation as R
+
+from d123.common.geometry.base import StateSE3
+from d123.common.geometry.bounding_box.bounding_box import BoundingBoxSE3
+
+DEFAULT_ROLL = 0.0
+DEFAULT_PITCH = 0.0
+
+MAX_N = 1000
+def local2global(semanticId, instanceId):
+ globalId = semanticId*MAX_N + instanceId
+ if isinstance(globalId, np.ndarray):
+ return globalId.astype(np.int32)
+ else:
+ return int(globalId)
+
+def global2local(globalId):
+ semanticId = globalId // MAX_N
+ instanceId = globalId % MAX_N
+ if isinstance(globalId, np.ndarray):
+ return semanticId.astype(np.int32), instanceId.astype(np.int32)
+ else:
+ return int(semanticId), int(instanceId)
+
+class KITTI360Bbox3D():
+ # Constructor
+ def __init__(self):
+
+ # the ID of the corresponding object
+ self.semanticId = -1
+ self.instanceId = -1
+ self.annotationId = -1
+ self.globalID = -1
+
+ # the window that contains the bbox
+ self.start_frame = -1
+ self.end_frame = -1
+
+ # timestamp of the bbox (-1 if statis)
+ self.timestamp = -1
+
+ # name
+ self.name = ''
+
+ def parseOpencvMatrix(self, node):
+ rows = int(node.find('rows').text)
+ cols = int(node.find('cols').text)
+ data = node.find('data').text.split(' ')
+
+ mat = []
+ for d in data:
+ d = d.replace('\n', '')
+ if len(d)<1:
+ continue
+ mat.append(float(d))
+ mat = np.reshape(mat, [rows, cols])
+ return mat
+
+ def parseBbox(self, child):
+ semanticIdKITTI = int(child.find('semanticId').text)
+ self.semanticId = kittiId2label[semanticIdKITTI].id
+ self.instanceId = int(child.find('instanceId').text)
+ self.name = kittiId2label[semanticIdKITTI].name
+
+ self.start_frame = int(child.find('start_frame').text)
+ self.end_frame = int(child.find('end_frame').text)
+
+ self.timestamp = int(child.find('timestamp').text)
+
+ self.annotationId = int(child.find('index').text) + 1
+
+ self.globalID = local2global(self.semanticId, self.instanceId)
+ transform = self.parseOpencvMatrix(child.find('transform'))
+ self.R = transform[:3,:3]
+ self.T = transform[:3,3]
+
+ def polar_decompose_rotation_scale(self):
+ Rm, Sm = polar(self.R)
+ scale = np.diag(Sm)
+ yaw, pitch, roll = R.from_matrix(Rm).as_euler('zyx', degrees=False)
+
+ return scale, (yaw, pitch, roll)
+
+ def get_state_array(self):
+ scale, (yaw, pitch, roll) = self.polar_decompose_rotation_scale()
+ center = StateSE3(
+ x=self.T[0],
+ y=self.T[1],
+ z=self.T[2],
+ roll=DEFAULT_ROLL,
+ pitch=DEFAULT_PITCH,
+ yaw=yaw,
+ )
+ bounding_box_se3 = BoundingBoxSE3(center, scale[0], scale[1], scale[2])
+
+ return bounding_box_se3.array
\ No newline at end of file
diff --git a/d123/dataset/dataset_specific/kitti_360/labels.py b/d123/dataset/dataset_specific/kitti_360/labels.py
new file mode 100644
index 00000000..38f8a91c
--- /dev/null
+++ b/d123/dataset/dataset_specific/kitti_360/labels.py
@@ -0,0 +1,168 @@
+#!/usr/bin/python
+#
+# KITTI-360 labels
+#
+
+from collections import namedtuple
+
+
+#--------------------------------------------------------------------------------
+# Definitions
+#--------------------------------------------------------------------------------
+
+# a label and all meta information
+Label = namedtuple( 'Label' , [
+
+ 'name' , # The identifier of this label, e.g. 'car', 'person', ... .
+ # We use them to uniquely name a class
+
+ 'id' , # An integer ID that is associated with this label.
+ # The IDs are used to represent the label in ground truth images
+ # An ID of -1 means that this label does not have an ID and thus
+ # is ignored when creating ground truth images (e.g. license plate).
+ # Do not modify these IDs, since exactly these IDs are expected by the
+ # evaluation server.
+
+ 'kittiId' , # An integer ID that is associated with this label for KITTI-360
+ # NOT FOR RELEASING
+
+ 'trainId' , # Feel free to modify these IDs as suitable for your method. Then create
+ # ground truth images with train IDs, using the tools provided in the
+ # 'preparation' folder. However, make sure to validate or submit results
+ # to our evaluation server using the regular IDs above!
+ # For trainIds, multiple labels might have the same ID. Then, these labels
+ # are mapped to the same class in the ground truth images. For the inverse
+ # mapping, we use the label that is defined first in the list below.
+ # For example, mapping all void-type classes to the same ID in training,
+ # might make sense for some approaches.
+ # Max value is 255!
+
+ 'category' , # The name of the category that this label belongs to
+
+ 'categoryId' , # The ID of this category. Used to create ground truth images
+ # on category level.
+
+ 'hasInstances', # Whether this label distinguishes between single instances or not
+
+ 'ignoreInEval', # Whether pixels having this class as ground truth label are ignored
+ # during evaluations or not
+
+ 'ignoreInInst', # Whether pixels having this class as ground truth label are ignored
+ # during evaluations of instance segmentation or not
+
+ 'color' , # The color of this label
+ ] )
+
+
+#--------------------------------------------------------------------------------
+# A list of all labels
+#--------------------------------------------------------------------------------
+
+# Please adapt the train IDs as appropriate for your approach.
+# Note that you might want to ignore labels with ID 255 during training.
+# Further note that the current train IDs are only a suggestion. You can use whatever you like.
+# Make sure to provide your results using the original IDs and not the training IDs.
+# Note that many IDs are ignored in evaluation and thus you never need to predict these!
+
+labels = [
+ # name id kittiId, trainId category catId hasInstances ignoreInEval ignoreInInst color
+ Label( 'unlabeled' , 0 , -1 , 255 , 'void' , 0 , False , True , True , ( 0, 0, 0) ),
+ Label( 'ego vehicle' , 1 , -1 , 255 , 'void' , 0 , False , True , True , ( 0, 0, 0) ),
+ Label( 'rectification border' , 2 , -1 , 255 , 'void' , 0 , False , True , True , ( 0, 0, 0) ),
+ Label( 'out of roi' , 3 , -1 , 255 , 'void' , 0 , False , True , True , ( 0, 0, 0) ),
+ Label( 'static' , 4 , -1 , 255 , 'void' , 0 , False , True , True , ( 0, 0, 0) ),
+ Label( 'dynamic' , 5 , -1 , 255 , 'void' , 0 , False , True , True , (111, 74, 0) ),
+ Label( 'ground' , 6 , -1 , 255 , 'void' , 0 , False , True , True , ( 81, 0, 81) ),
+ Label( 'road' , 7 , 1 , 0 , 'flat' , 1 , False , False , False , (128, 64,128) ),
+ Label( 'sidewalk' , 8 , 3 , 1 , 'flat' , 1 , False , False , False , (244, 35,232) ),
+ Label( 'parking' , 9 , 2 , 255 , 'flat' , 1 , False , True , True , (250,170,160) ),
+ Label( 'rail track' , 10 , 10, 255 , 'flat' , 1 , False , True , True , (230,150,140) ),
+ Label( 'building' , 11 , 11, 2 , 'construction' , 2 , True , False , False , ( 70, 70, 70) ),
+ Label( 'wall' , 12 , 7 , 3 , 'construction' , 2 , False , False , False , (102,102,156) ),
+ Label( 'fence' , 13 , 8 , 4 , 'construction' , 2 , False , False , False , (190,153,153) ),
+ Label( 'guard rail' , 14 , 30, 255 , 'construction' , 2 , False , True , True , (180,165,180) ),
+ Label( 'bridge' , 15 , 31, 255 , 'construction' , 2 , False , True , True , (150,100,100) ),
+ Label( 'tunnel' , 16 , 32, 255 , 'construction' , 2 , False , True , True , (150,120, 90) ),
+ Label( 'pole' , 17 , 21, 5 , 'object' , 3 , True , False , True , (153,153,153) ),
+ Label( 'polegroup' , 18 , -1 , 255 , 'object' , 3 , False , True , True , (153,153,153) ),
+ Label( 'traffic light' , 19 , 23, 6 , 'object' , 3 , True , False , True , (250,170, 30) ),
+ Label( 'traffic sign' , 20 , 24, 7 , 'object' , 3 , True , False , True , (220,220, 0) ),
+ Label( 'vegetation' , 21 , 5 , 8 , 'nature' , 4 , False , False , False , (107,142, 35) ),
+ Label( 'terrain' , 22 , 4 , 9 , 'nature' , 4 , False , False , False , (152,251,152) ),
+ Label( 'sky' , 23 , 9 , 10 , 'sky' , 5 , False , False , False , ( 70,130,180) ),
+ Label( 'person' , 24 , 19, 11 , 'human' , 6 , True , False , False , (220, 20, 60) ),
+ Label( 'rider' , 25 , 20, 12 , 'human' , 6 , True , False , False , (255, 0, 0) ),
+ Label( 'car' , 26 , 13, 13 , 'vehicle' , 7 , True , False , False , ( 0, 0,142) ),
+ Label( 'truck' , 27 , 14, 14 , 'vehicle' , 7 , True , False , False , ( 0, 0, 70) ),
+ Label( 'bus' , 28 , 34, 15 , 'vehicle' , 7 , True , False , False , ( 0, 60,100) ),
+ Label( 'caravan' , 29 , 16, 255 , 'vehicle' , 7 , True , True , True , ( 0, 0, 90) ),
+ Label( 'trailer' , 30 , 15, 255 , 'vehicle' , 7 , True , True , True , ( 0, 0,110) ),
+ Label( 'train' , 31 , 33, 16 , 'vehicle' , 7 , True , False , False , ( 0, 80,100) ),
+ Label( 'motorcycle' , 32 , 17, 17 , 'vehicle' , 7 , True , False , False , ( 0, 0,230) ),
+ Label( 'bicycle' , 33 , 18, 18 , 'vehicle' , 7 , True , False , False , (119, 11, 32) ),
+ Label( 'garage' , 34 , 12, 2 , 'construction' , 2 , True , True , True , ( 64,128,128) ),
+ Label( 'gate' , 35 , 6 , 4 , 'construction' , 2 , False , True , True , (190,153,153) ),
+ Label( 'stop' , 36 , 29, 255 , 'construction' , 2 , True , True , True , (150,120, 90) ),
+ Label( 'smallpole' , 37 , 22, 5 , 'object' , 3 , True , True , True , (153,153,153) ),
+ Label( 'lamp' , 38 , 25, 255 , 'object' , 3 , True , True , True , (0, 64, 64) ),
+ Label( 'trash bin' , 39 , 26, 255 , 'object' , 3 , True , True , True , (0, 128,192) ),
+ Label( 'vending machine' , 40 , 27, 255 , 'object' , 3 , True , True , True , (128, 64, 0) ),
+ Label( 'box' , 41 , 28, 255 , 'object' , 3 , True , True , True , (64, 64,128) ),
+ Label( 'unknown construction' , 42 , 35, 255 , 'void' , 0 , False , True , True , (102, 0, 0) ),
+ Label( 'unknown vehicle' , 43 , 36, 255 , 'void' , 0 , False , True , True , ( 51, 0, 51) ),
+ Label( 'unknown object' , 44 , 37, 255 , 'void' , 0 , False , True , True , ( 32, 32, 32) ),
+ Label( 'license plate' , -1 , -1, -1 , 'vehicle' , 7 , False , True , True , ( 0, 0,142) ),
+]
+
+#--------------------------------------------------------------------------------
+# Create dictionaries for a fast lookup
+#--------------------------------------------------------------------------------
+
+# Please refer to the main method below for example usages!
+
+# name to label object
+name2label = { label.name : label for label in labels }
+# id to label object
+id2label = { label.id : label for label in labels }
+# trainId to label object
+trainId2label = { label.trainId : label for label in reversed(labels) }
+# KITTI-360 ID to cityscapes ID
+kittiId2label = { label.kittiId : label for label in labels }
+# category to list of label objects
+category2labels = {}
+for label in labels:
+ category = label.category
+ if category in category2labels:
+ category2labels[category].append(label)
+ else:
+ category2labels[category] = [label]
+
+#--------------------------------------------------------------------------------
+# Assure single instance name
+#--------------------------------------------------------------------------------
+
+# returns the label name that describes a single instance (if possible)
+# e.g. input | output
+# ----------------------
+# car | car
+# cargroup | car
+# foo | None
+# foogroup | None
+# skygroup | None
+def assureSingleInstanceName( name ):
+ # if the name is known, it is not a group
+ if name in name2label:
+ return name
+ # test if the name actually denotes a group
+ if not name.endswith("group"):
+ return None
+ # remove group
+ name = name[:-len("group")]
+ # test if the new name exists
+ if not name in name2label:
+ return None
+ # test if the new name denotes a label that actually has instances
+ if not name2label[name].hasInstances:
+ return None
+ # all good then
+ return name
diff --git a/d123/script/config/dataset_conversion/default_dataset_conversion.yaml b/d123/script/config/dataset_conversion/default_dataset_conversion.yaml
index b844fa03..e1c76c60 100644
--- a/d123/script/config/dataset_conversion/default_dataset_conversion.yaml
+++ b/d123/script/config/dataset_conversion/default_dataset_conversion.yaml
@@ -17,11 +17,7 @@ defaults:
- datasets:
- kitti360_dataset
# - nuplan_private_dataset
-<<<<<<< HEAD
# - carla_dataset
-=======
- - carla_dataset
->>>>>>> dev_v0.0.6
# - wopd_dataset
force_log_conversion: True
diff --git a/d123/script/config/datasets/kitti360_dataset.yaml b/d123/script/config/datasets/kitti360_dataset.yaml
index 418d36a4..17b9e863 100644
--- a/d123/script/config/datasets/kitti360_dataset.yaml
+++ b/d123/script/config/datasets/kitti360_dataset.yaml
@@ -1,4 +1,4 @@
-nuplan_dataset:
+kitti360_dataset:
_target_: d123.dataset.dataset_specific.kitti_360.kitti_360_data_converter.Kitti360DataConverter
_convert_: 'all'
diff --git a/exp/kitti360_test/2025.08.15.14.31.57/code/hydra/config.yaml b/exp/kitti360_test/2025.08.15.14.31.57/code/hydra/config.yaml
new file mode 100644
index 00000000..a505c4d2
--- /dev/null
+++ b/exp/kitti360_test/2025.08.15.14.31.57/code/hydra/config.yaml
@@ -0,0 +1,60 @@
+worker:
+ _target_: nuplan.planning.utils.multithreading.worker_ray.RayDistributed
+ _convert_: all
+ master_node_ip: null
+ threads_per_node: null
+ debug_mode: false
+ log_to_driver: true
+ logs_subdir: logs
+ use_distributed: false
+scene_filter:
+ _target_: d123.dataset.scene.scene_filter.SceneFilter
+ _convert_: all
+ split_types: null
+ split_names: null
+ log_names: null
+ map_names: null
+ scene_tokens: null
+ timestamp_threshold_s: null
+ ego_displacement_minimum_m: null
+ duration_s: 9.2
+ history_s: 3.0
+scene_builder:
+ _target_: d123.dataset.scene.scene_builder.ArrowSceneBuilder
+ _convert_: all
+ dataset_path: ${d123_data_root}
+distributed_timeout_seconds: 7200
+selected_simulation_metrics: null
+verbose: false
+logger_level: info
+logger_format_string: null
+max_number_of_workers: null
+gpu: true
+seed: 42
+d123_devkit_root: ${oc.env:D123_DEVKIT_ROOT}
+d123_maps_root: ${oc.env:D123_MAPS_ROOT}
+d123_data_root: ${oc.env:D123_DATA_ROOT}
+nuplan_devkit_root: ${oc.env:NUPLAN_DEVKIT_ROOT}
+nuplan_maps_root: ${oc.env:NUPLAN_MAPS_ROOT}
+nuplan_data_root: ${oc.env:NUPLAN_DATA_ROOT}
+experiment_name: kitti360_test
+date_format: '%Y.%m.%d.%H.%M.%S'
+experiment_uid: ${now:${date_format}}
+output_dir: ${oc.env:D123_EXP_ROOT}/${experiment_name}/${experiment_uid}
+force_log_conversion: true
+force_map_conversion: false
+datasets:
+ nuplan_dataset:
+ _target_: d123.dataset.dataset_specific.kitti_360.kitti_360_data_converter.Kitti360DataConverter
+ _convert_: all
+ splits:
+ - kitti360
+ log_path: ${oc.env:KITTI360_DATA_ROOT}
+ data_converter_config:
+ _target_: d123.dataset.dataset_specific.raw_data_converter.DataConverterConfig
+ _convert_: all
+ output_path: ${d123_data_root}
+ force_log_conversion: ${force_log_conversion}
+ force_map_conversion: ${force_map_conversion}
+ camera_store_option: path
+ lidar_store_option: path
diff --git a/exp/kitti360_test/2025.08.15.14.31.57/code/hydra/hydra.yaml b/exp/kitti360_test/2025.08.15.14.31.57/code/hydra/hydra.yaml
new file mode 100644
index 00000000..406ccbe7
--- /dev/null
+++ b/exp/kitti360_test/2025.08.15.14.31.57/code/hydra/hydra.yaml
@@ -0,0 +1,177 @@
+hydra:
+ run:
+ dir: ${output_dir}
+ sweep:
+ dir: multirun/${now:%Y-%m-%d}/${now:%H-%M-%S}
+ subdir: ${hydra.job.num}
+ launcher:
+ _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher
+ sweeper:
+ _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper
+ max_batch_size: null
+ params: null
+ help:
+ app_name: ${hydra.job.name}
+ header: '${hydra.help.app_name} is powered by Hydra.
+
+ '
+ footer: 'Powered by Hydra (https://hydra.cc)
+
+ Use --hydra-help to view Hydra specific help
+
+ '
+ template: '${hydra.help.header}
+
+ == Configuration groups ==
+
+ Compose your configuration from those groups (group=option)
+
+
+ $APP_CONFIG_GROUPS
+
+
+ == Config ==
+
+ Override anything in the config (foo.bar=value)
+
+
+ $CONFIG
+
+
+ ${hydra.help.footer}
+
+ '
+ hydra_help:
+ template: 'Hydra (${hydra.runtime.version})
+
+ See https://hydra.cc for more info.
+
+
+ == Flags ==
+
+ $FLAGS_HELP
+
+
+ == Configuration groups ==
+
+ Compose your configuration from those groups (For example, append hydra/job_logging=disabled
+ to command line)
+
+
+ $HYDRA_CONFIG_GROUPS
+
+
+ Use ''--cfg hydra'' to Show the Hydra config.
+
+ '
+ hydra_help: ???
+ hydra_logging:
+ version: 1
+ formatters:
+ colorlog:
+ (): colorlog.ColoredFormatter
+ format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s'
+ handlers:
+ console:
+ class: logging.StreamHandler
+ formatter: colorlog
+ stream: ext://sys.stdout
+ root:
+ level: INFO
+ handlers:
+ - console
+ disable_existing_loggers: false
+ job_logging:
+ version: 1
+ formatters:
+ simple:
+ format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s'
+ colorlog:
+ (): colorlog.ColoredFormatter
+ format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s]
+ - %(message)s'
+ log_colors:
+ DEBUG: purple
+ INFO: green
+ WARNING: yellow
+ ERROR: red
+ CRITICAL: red
+ handlers:
+ console:
+ class: logging.StreamHandler
+ formatter: colorlog
+ stream: ext://sys.stdout
+ file:
+ class: logging.FileHandler
+ formatter: simple
+ filename: ${hydra.job.name}.log
+ root:
+ level: INFO
+ handlers:
+ - console
+ - file
+ disable_existing_loggers: false
+ env: {}
+ mode: RUN
+ searchpath:
+ - pkg://d123.script.config
+ - pkg://d123.script.config.common
+ callbacks: {}
+ output_subdir: ${output_dir}/code/hydra
+ overrides:
+ hydra:
+ - hydra.mode=RUN
+ task:
+ - experiment_name=kitti360_test
+ job:
+ name: run_dataset_conversion
+ chdir: false
+ override_dirname: experiment_name=kitti360_test
+ id: ???
+ num: ???
+ config_name: default_dataset_conversion
+ env_set: {}
+ env_copy: []
+ config:
+ override_dirname:
+ kv_sep: '='
+ item_sep: ','
+ exclude_keys: []
+ runtime:
+ version: 1.3.2
+ version_base: '1.3'
+ cwd: /home/jbwang/d123/d123/script
+ config_sources:
+ - path: hydra.conf
+ schema: pkg
+ provider: hydra
+ - path: /home/jbwang/d123/d123/script/config/dataset_conversion
+ schema: file
+ provider: main
+ - path: hydra_plugins.hydra_colorlog.conf
+ schema: pkg
+ provider: hydra-colorlog
+ - path: d123.script.config
+ schema: pkg
+ provider: hydra.searchpath in main
+ - path: d123.script.config.common
+ schema: pkg
+ provider: hydra.searchpath in main
+ - path: ''
+ schema: structured
+ provider: schema
+ output_dir: /home/jbwang/d123/exp/kitti360_test/2025.08.15.14.31.57
+ choices:
+ scene_builder: default_scene_builder
+ scene_filter: all_scenes
+ worker: ray_distributed
+ hydra/env: default
+ hydra/callbacks: null
+ hydra/job_logging: colorlog
+ hydra/hydra_logging: colorlog
+ hydra/hydra_help: default
+ hydra/help: default
+ hydra/sweeper: basic
+ hydra/launcher: basic
+ hydra/output: default
+ verbose: false
diff --git a/exp/kitti360_test/2025.08.15.14.31.57/code/hydra/overrides.yaml b/exp/kitti360_test/2025.08.15.14.31.57/code/hydra/overrides.yaml
new file mode 100644
index 00000000..6c8e6217
--- /dev/null
+++ b/exp/kitti360_test/2025.08.15.14.31.57/code/hydra/overrides.yaml
@@ -0,0 +1 @@
+- experiment_name=kitti360_test
diff --git a/exp/kitti360_test/2025.08.15.14.31.57/log.txt b/exp/kitti360_test/2025.08.15.14.31.57/log.txt
new file mode 100644
index 00000000..984f705a
--- /dev/null
+++ b/exp/kitti360_test/2025.08.15.14.31.57/log.txt
@@ -0,0 +1,10 @@
+2025-08-15 14:31:57,385 INFO {/home/jbwang/d123/d123/script/builders/worker_pool_builder.py:19} Building WorkerPool...
+2025-08-15 14:32:14,105 INFO {/data/jbwang/submodule/nuplan-devkit-master/nuplan/planning/utils/multithreading/worker_ray.py:78} Starting ray local!
+2025-08-15 14:32:35,603 INFO {/data/jbwang/submodule/nuplan-devkit-master/nuplan/planning/utils/multithreading/worker_pool.py:101} Worker: RayDistributed
+2025-08-15 14:32:35,604 INFO {/data/jbwang/submodule/nuplan-devkit-master/nuplan/planning/utils/multithreading/worker_pool.py:102} Number of nodes: 1
+Number of CPUs per node: 64
+Number of GPUs per node: 8
+Number of threads across all nodes: 64
+2025-08-15 14:32:35,604 INFO {/home/jbwang/d123/d123/script/builders/worker_pool_builder.py:27} Building WorkerPool...DONE!
+2025-08-15 14:32:35,604 INFO {/home/jbwang/d123/d123/script/run_dataset_conversion.py:30} Starting Dataset Caching...
+2025-08-15 14:32:35,605 INFO {/home/jbwang/d123/d123/script/builders/data_converter_builder.py:14} Building RawDataProcessor...
diff --git a/exp/kitti360_test/2025.08.15.14.36.40/code/hydra/config.yaml b/exp/kitti360_test/2025.08.15.14.36.40/code/hydra/config.yaml
new file mode 100644
index 00000000..0fd6120d
--- /dev/null
+++ b/exp/kitti360_test/2025.08.15.14.36.40/code/hydra/config.yaml
@@ -0,0 +1,60 @@
+worker:
+ _target_: nuplan.planning.utils.multithreading.worker_ray.RayDistributed
+ _convert_: all
+ master_node_ip: null
+ threads_per_node: null
+ debug_mode: false
+ log_to_driver: true
+ logs_subdir: logs
+ use_distributed: false
+scene_filter:
+ _target_: d123.dataset.scene.scene_filter.SceneFilter
+ _convert_: all
+ split_types: null
+ split_names: null
+ log_names: null
+ map_names: null
+ scene_tokens: null
+ timestamp_threshold_s: null
+ ego_displacement_minimum_m: null
+ duration_s: 9.2
+ history_s: 3.0
+scene_builder:
+ _target_: d123.dataset.scene.scene_builder.ArrowSceneBuilder
+ _convert_: all
+ dataset_path: ${d123_data_root}
+distributed_timeout_seconds: 7200
+selected_simulation_metrics: null
+verbose: false
+logger_level: info
+logger_format_string: null
+max_number_of_workers: null
+gpu: true
+seed: 42
+d123_devkit_root: ${oc.env:D123_DEVKIT_ROOT}
+d123_maps_root: ${oc.env:D123_MAPS_ROOT}
+d123_data_root: ${oc.env:D123_DATA_ROOT}
+nuplan_devkit_root: ${oc.env:NUPLAN_DEVKIT_ROOT}
+nuplan_maps_root: ${oc.env:NUPLAN_MAPS_ROOT}
+nuplan_data_root: ${oc.env:NUPLAN_DATA_ROOT}
+experiment_name: kitti360_test
+date_format: '%Y.%m.%d.%H.%M.%S'
+experiment_uid: ${now:${date_format}}
+output_dir: ${oc.env:D123_EXP_ROOT}/${experiment_name}/${experiment_uid}
+force_log_conversion: true
+force_map_conversion: false
+datasets:
+ kitti360_dataset:
+ _target_: d123.dataset.dataset_specific.kitti_360.kitti_360_data_converter.Kitti360DataConverter
+ _convert_: all
+ splits:
+ - kitti360
+ log_path: ${oc.env:KITTI360_DATA_ROOT}
+ data_converter_config:
+ _target_: d123.dataset.dataset_specific.raw_data_converter.DataConverterConfig
+ _convert_: all
+ output_path: ${d123_data_root}
+ force_log_conversion: ${force_log_conversion}
+ force_map_conversion: ${force_map_conversion}
+ camera_store_option: path
+ lidar_store_option: path
diff --git a/exp/kitti360_test/2025.08.15.14.36.40/code/hydra/hydra.yaml b/exp/kitti360_test/2025.08.15.14.36.40/code/hydra/hydra.yaml
new file mode 100644
index 00000000..4eee2c65
--- /dev/null
+++ b/exp/kitti360_test/2025.08.15.14.36.40/code/hydra/hydra.yaml
@@ -0,0 +1,177 @@
+hydra:
+ run:
+ dir: ${output_dir}
+ sweep:
+ dir: multirun/${now:%Y-%m-%d}/${now:%H-%M-%S}
+ subdir: ${hydra.job.num}
+ launcher:
+ _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher
+ sweeper:
+ _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper
+ max_batch_size: null
+ params: null
+ help:
+ app_name: ${hydra.job.name}
+ header: '${hydra.help.app_name} is powered by Hydra.
+
+ '
+ footer: 'Powered by Hydra (https://hydra.cc)
+
+ Use --hydra-help to view Hydra specific help
+
+ '
+ template: '${hydra.help.header}
+
+ == Configuration groups ==
+
+ Compose your configuration from those groups (group=option)
+
+
+ $APP_CONFIG_GROUPS
+
+
+ == Config ==
+
+ Override anything in the config (foo.bar=value)
+
+
+ $CONFIG
+
+
+ ${hydra.help.footer}
+
+ '
+ hydra_help:
+ template: 'Hydra (${hydra.runtime.version})
+
+ See https://hydra.cc for more info.
+
+
+ == Flags ==
+
+ $FLAGS_HELP
+
+
+ == Configuration groups ==
+
+ Compose your configuration from those groups (For example, append hydra/job_logging=disabled
+ to command line)
+
+
+ $HYDRA_CONFIG_GROUPS
+
+
+ Use ''--cfg hydra'' to Show the Hydra config.
+
+ '
+ hydra_help: ???
+ hydra_logging:
+ version: 1
+ formatters:
+ colorlog:
+ (): colorlog.ColoredFormatter
+ format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s'
+ handlers:
+ console:
+ class: logging.StreamHandler
+ formatter: colorlog
+ stream: ext://sys.stdout
+ root:
+ level: INFO
+ handlers:
+ - console
+ disable_existing_loggers: false
+ job_logging:
+ version: 1
+ formatters:
+ simple:
+ format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s'
+ colorlog:
+ (): colorlog.ColoredFormatter
+ format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s]
+ - %(message)s'
+ log_colors:
+ DEBUG: purple
+ INFO: green
+ WARNING: yellow
+ ERROR: red
+ CRITICAL: red
+ handlers:
+ console:
+ class: logging.StreamHandler
+ formatter: colorlog
+ stream: ext://sys.stdout
+ file:
+ class: logging.FileHandler
+ formatter: simple
+ filename: ${hydra.job.name}.log
+ root:
+ level: INFO
+ handlers:
+ - console
+ - file
+ disable_existing_loggers: false
+ env: {}
+ mode: RUN
+ searchpath:
+ - pkg://d123.script.config
+ - pkg://d123.script.config.common
+ callbacks: {}
+ output_subdir: ${output_dir}/code/hydra
+ overrides:
+ hydra:
+ - hydra.mode=RUN
+ task:
+ - experiment_name=kitti360_test
+ job:
+ name: run_dataset_conversion
+ chdir: false
+ override_dirname: experiment_name=kitti360_test
+ id: ???
+ num: ???
+ config_name: default_dataset_conversion
+ env_set: {}
+ env_copy: []
+ config:
+ override_dirname:
+ kv_sep: '='
+ item_sep: ','
+ exclude_keys: []
+ runtime:
+ version: 1.3.2
+ version_base: '1.3'
+ cwd: /home/jbwang/d123/d123/script
+ config_sources:
+ - path: hydra.conf
+ schema: pkg
+ provider: hydra
+ - path: /home/jbwang/d123/d123/script/config/dataset_conversion
+ schema: file
+ provider: main
+ - path: hydra_plugins.hydra_colorlog.conf
+ schema: pkg
+ provider: hydra-colorlog
+ - path: d123.script.config
+ schema: pkg
+ provider: hydra.searchpath in main
+ - path: d123.script.config.common
+ schema: pkg
+ provider: hydra.searchpath in main
+ - path: ''
+ schema: structured
+ provider: schema
+ output_dir: /home/jbwang/d123/exp/kitti360_test/2025.08.15.14.36.40
+ choices:
+ scene_builder: default_scene_builder
+ scene_filter: all_scenes
+ worker: ray_distributed
+ hydra/env: default
+ hydra/callbacks: null
+ hydra/job_logging: colorlog
+ hydra/hydra_logging: colorlog
+ hydra/hydra_help: default
+ hydra/help: default
+ hydra/sweeper: basic
+ hydra/launcher: basic
+ hydra/output: default
+ verbose: false
diff --git a/exp/kitti360_test/2025.08.15.14.36.40/code/hydra/overrides.yaml b/exp/kitti360_test/2025.08.15.14.36.40/code/hydra/overrides.yaml
new file mode 100644
index 00000000..6c8e6217
--- /dev/null
+++ b/exp/kitti360_test/2025.08.15.14.36.40/code/hydra/overrides.yaml
@@ -0,0 +1 @@
+- experiment_name=kitti360_test
diff --git a/exp/kitti360_test/2025.08.15.14.36.40/log.txt b/exp/kitti360_test/2025.08.15.14.36.40/log.txt
new file mode 100644
index 00000000..5f939dac
--- /dev/null
+++ b/exp/kitti360_test/2025.08.15.14.36.40/log.txt
@@ -0,0 +1,10 @@
+2025-08-15 14:36:40,989 INFO {/home/jbwang/d123/d123/script/builders/worker_pool_builder.py:19} Building WorkerPool...
+2025-08-15 14:36:56,167 INFO {/data/jbwang/submodule/nuplan-devkit-master/nuplan/planning/utils/multithreading/worker_ray.py:78} Starting ray local!
+2025-08-15 14:37:18,685 INFO {/data/jbwang/submodule/nuplan-devkit-master/nuplan/planning/utils/multithreading/worker_pool.py:101} Worker: RayDistributed
+2025-08-15 14:37:18,686 INFO {/data/jbwang/submodule/nuplan-devkit-master/nuplan/planning/utils/multithreading/worker_pool.py:102} Number of nodes: 1
+Number of CPUs per node: 64
+Number of GPUs per node: 8
+Number of threads across all nodes: 64
+2025-08-15 14:37:18,686 INFO {/home/jbwang/d123/d123/script/builders/worker_pool_builder.py:27} Building WorkerPool...DONE!
+2025-08-15 14:37:18,686 INFO {/home/jbwang/d123/d123/script/run_dataset_conversion.py:30} Starting Dataset Caching...
+2025-08-15 14:37:18,687 INFO {/home/jbwang/d123/d123/script/builders/data_converter_builder.py:14} Building RawDataProcessor...
diff --git a/exp/kitti_test2/2025.08.15.14.40.29/code/hydra/config.yaml b/exp/kitti_test2/2025.08.15.14.40.29/code/hydra/config.yaml
new file mode 100644
index 00000000..5ce47ba9
--- /dev/null
+++ b/exp/kitti_test2/2025.08.15.14.40.29/code/hydra/config.yaml
@@ -0,0 +1,60 @@
+worker:
+ _target_: nuplan.planning.utils.multithreading.worker_ray.RayDistributed
+ _convert_: all
+ master_node_ip: null
+ threads_per_node: null
+ debug_mode: false
+ log_to_driver: true
+ logs_subdir: logs
+ use_distributed: false
+scene_filter:
+ _target_: d123.dataset.scene.scene_filter.SceneFilter
+ _convert_: all
+ split_types: null
+ split_names: null
+ log_names: null
+ map_names: null
+ scene_tokens: null
+ timestamp_threshold_s: null
+ ego_displacement_minimum_m: null
+ duration_s: 9.2
+ history_s: 3.0
+scene_builder:
+ _target_: d123.dataset.scene.scene_builder.ArrowSceneBuilder
+ _convert_: all
+ dataset_path: ${d123_data_root}
+distributed_timeout_seconds: 7200
+selected_simulation_metrics: null
+verbose: false
+logger_level: info
+logger_format_string: null
+max_number_of_workers: null
+gpu: true
+seed: 42
+d123_devkit_root: ${oc.env:D123_DEVKIT_ROOT}
+d123_maps_root: ${oc.env:D123_MAPS_ROOT}
+d123_data_root: ${oc.env:D123_DATA_ROOT}
+nuplan_devkit_root: ${oc.env:NUPLAN_DEVKIT_ROOT}
+nuplan_maps_root: ${oc.env:NUPLAN_MAPS_ROOT}
+nuplan_data_root: ${oc.env:NUPLAN_DATA_ROOT}
+experiment_name: kitti_test2
+date_format: '%Y.%m.%d.%H.%M.%S'
+experiment_uid: ${now:${date_format}}
+output_dir: ${oc.env:D123_EXP_ROOT}/${experiment_name}/${experiment_uid}
+force_log_conversion: true
+force_map_conversion: false
+datasets:
+ kitti360_dataset:
+ _target_: d123.dataset.dataset_specific.kitti_360.kitti_360_data_converter.Kitti360DataConverter
+ _convert_: all
+ splits:
+ - kitti360
+ log_path: ${oc.env:KITTI360_DATA_ROOT}
+ data_converter_config:
+ _target_: d123.dataset.dataset_specific.raw_data_converter.DataConverterConfig
+ _convert_: all
+ output_path: ${d123_data_root}
+ force_log_conversion: ${force_log_conversion}
+ force_map_conversion: ${force_map_conversion}
+ camera_store_option: path
+ lidar_store_option: path
diff --git a/exp/kitti_test2/2025.08.15.14.40.29/code/hydra/hydra.yaml b/exp/kitti_test2/2025.08.15.14.40.29/code/hydra/hydra.yaml
new file mode 100644
index 00000000..2d1c615a
--- /dev/null
+++ b/exp/kitti_test2/2025.08.15.14.40.29/code/hydra/hydra.yaml
@@ -0,0 +1,177 @@
+hydra:
+ run:
+ dir: ${output_dir}
+ sweep:
+ dir: multirun/${now:%Y-%m-%d}/${now:%H-%M-%S}
+ subdir: ${hydra.job.num}
+ launcher:
+ _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher
+ sweeper:
+ _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper
+ max_batch_size: null
+ params: null
+ help:
+ app_name: ${hydra.job.name}
+ header: '${hydra.help.app_name} is powered by Hydra.
+
+ '
+ footer: 'Powered by Hydra (https://hydra.cc)
+
+ Use --hydra-help to view Hydra specific help
+
+ '
+ template: '${hydra.help.header}
+
+ == Configuration groups ==
+
+ Compose your configuration from those groups (group=option)
+
+
+ $APP_CONFIG_GROUPS
+
+
+ == Config ==
+
+ Override anything in the config (foo.bar=value)
+
+
+ $CONFIG
+
+
+ ${hydra.help.footer}
+
+ '
+ hydra_help:
+ template: 'Hydra (${hydra.runtime.version})
+
+ See https://hydra.cc for more info.
+
+
+ == Flags ==
+
+ $FLAGS_HELP
+
+
+ == Configuration groups ==
+
+ Compose your configuration from those groups (For example, append hydra/job_logging=disabled
+ to command line)
+
+
+ $HYDRA_CONFIG_GROUPS
+
+
+ Use ''--cfg hydra'' to Show the Hydra config.
+
+ '
+ hydra_help: ???
+ hydra_logging:
+ version: 1
+ formatters:
+ colorlog:
+ (): colorlog.ColoredFormatter
+ format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s'
+ handlers:
+ console:
+ class: logging.StreamHandler
+ formatter: colorlog
+ stream: ext://sys.stdout
+ root:
+ level: INFO
+ handlers:
+ - console
+ disable_existing_loggers: false
+ job_logging:
+ version: 1
+ formatters:
+ simple:
+ format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s'
+ colorlog:
+ (): colorlog.ColoredFormatter
+ format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s]
+ - %(message)s'
+ log_colors:
+ DEBUG: purple
+ INFO: green
+ WARNING: yellow
+ ERROR: red
+ CRITICAL: red
+ handlers:
+ console:
+ class: logging.StreamHandler
+ formatter: colorlog
+ stream: ext://sys.stdout
+ file:
+ class: logging.FileHandler
+ formatter: simple
+ filename: ${hydra.job.name}.log
+ root:
+ level: INFO
+ handlers:
+ - console
+ - file
+ disable_existing_loggers: false
+ env: {}
+ mode: RUN
+ searchpath:
+ - pkg://d123.script.config
+ - pkg://d123.script.config.common
+ callbacks: {}
+ output_subdir: ${output_dir}/code/hydra
+ overrides:
+ hydra:
+ - hydra.mode=RUN
+ task:
+ - experiment_name=kitti_test2
+ job:
+ name: run_dataset_conversion
+ chdir: false
+ override_dirname: experiment_name=kitti_test2
+ id: ???
+ num: ???
+ config_name: default_dataset_conversion
+ env_set: {}
+ env_copy: []
+ config:
+ override_dirname:
+ kv_sep: '='
+ item_sep: ','
+ exclude_keys: []
+ runtime:
+ version: 1.3.2
+ version_base: '1.3'
+ cwd: /home/jbwang/d123
+ config_sources:
+ - path: hydra.conf
+ schema: pkg
+ provider: hydra
+ - path: /home/jbwang/d123/d123/script/config/dataset_conversion
+ schema: file
+ provider: main
+ - path: hydra_plugins.hydra_colorlog.conf
+ schema: pkg
+ provider: hydra-colorlog
+ - path: d123.script.config
+ schema: pkg
+ provider: hydra.searchpath in main
+ - path: d123.script.config.common
+ schema: pkg
+ provider: hydra.searchpath in main
+ - path: ''
+ schema: structured
+ provider: schema
+ output_dir: /home/jbwang/d123/exp/kitti_test2/2025.08.15.14.40.29
+ choices:
+ scene_builder: default_scene_builder
+ scene_filter: all_scenes
+ worker: ray_distributed
+ hydra/env: default
+ hydra/callbacks: null
+ hydra/job_logging: colorlog
+ hydra/hydra_logging: colorlog
+ hydra/hydra_help: default
+ hydra/help: default
+ hydra/sweeper: basic
+ hydra/launcher: basic
+ hydra/output: default
+ verbose: false
diff --git a/exp/kitti_test2/2025.08.15.14.40.29/code/hydra/overrides.yaml b/exp/kitti_test2/2025.08.15.14.40.29/code/hydra/overrides.yaml
new file mode 100644
index 00000000..676c1042
--- /dev/null
+++ b/exp/kitti_test2/2025.08.15.14.40.29/code/hydra/overrides.yaml
@@ -0,0 +1 @@
+- experiment_name=kitti_test2
diff --git a/exp/kitti_test2/2025.08.15.14.40.29/log.txt b/exp/kitti_test2/2025.08.15.14.40.29/log.txt
new file mode 100644
index 00000000..8437d38e
--- /dev/null
+++ b/exp/kitti_test2/2025.08.15.14.40.29/log.txt
@@ -0,0 +1,10 @@
+2025-08-15 14:40:29,427 INFO {/home/jbwang/d123/d123/script/builders/worker_pool_builder.py:19} Building WorkerPool...
+2025-08-15 14:40:42,538 INFO {/data/jbwang/submodule/nuplan-devkit-master/nuplan/planning/utils/multithreading/worker_ray.py:78} Starting ray local!
+2025-08-15 14:41:00,324 INFO {/data/jbwang/submodule/nuplan-devkit-master/nuplan/planning/utils/multithreading/worker_pool.py:101} Worker: RayDistributed
+2025-08-15 14:41:00,325 INFO {/data/jbwang/submodule/nuplan-devkit-master/nuplan/planning/utils/multithreading/worker_pool.py:102} Number of nodes: 1
+Number of CPUs per node: 64
+Number of GPUs per node: 8
+Number of threads across all nodes: 64
+2025-08-15 14:41:00,325 INFO {/home/jbwang/d123/d123/script/builders/worker_pool_builder.py:27} Building WorkerPool...DONE!
+2025-08-15 14:41:00,325 INFO {/home/jbwang/d123/d123/script/run_dataset_conversion.py:30} Starting Dataset Caching...
+2025-08-15 14:41:00,326 INFO {/home/jbwang/d123/d123/script/builders/data_converter_builder.py:14} Building RawDataProcessor...
diff --git a/exp/kitti_test2/2025.08.15.14.43.13/code/hydra/config.yaml b/exp/kitti_test2/2025.08.15.14.43.13/code/hydra/config.yaml
new file mode 100644
index 00000000..de70bfa3
--- /dev/null
+++ b/exp/kitti_test2/2025.08.15.14.43.13/code/hydra/config.yaml
@@ -0,0 +1,60 @@
+worker:
+ _target_: nuplan.planning.utils.multithreading.worker_ray.RayDistributed
+ _convert_: all
+ master_node_ip: null
+ threads_per_node: null
+ debug_mode: false
+ log_to_driver: true
+ logs_subdir: logs
+ use_distributed: false
+scene_filter:
+ _target_: d123.dataset.scene.scene_filter.SceneFilter
+ _convert_: all
+ split_types: null
+ split_names: null
+ log_names: null
+ map_names: null
+ scene_tokens: null
+ timestamp_threshold_s: null
+ ego_displacement_minimum_m: null
+ duration_s: 9.2
+ history_s: 3.0
+scene_builder:
+ _target_: d123.dataset.scene.scene_builder.ArrowSceneBuilder
+ _convert_: all
+ dataset_path: ${d123_data_root}
+distributed_timeout_seconds: 7200
+selected_simulation_metrics: null
+verbose: false
+logger_level: info
+logger_format_string: null
+max_number_of_workers: null
+gpu: true
+seed: 42
+d123_devkit_root: ${oc.env:D123_DEVKIT_ROOT}
+d123_maps_root: ${oc.env:D123_MAPS_ROOT}
+d123_data_root: ${oc.env:D123_DATA_ROOT}
+nuplan_devkit_root: ${oc.env:NUPLAN_DEVKIT_ROOT}
+nuplan_maps_root: ${oc.env:NUPLAN_MAPS_ROOT}
+nuplan_data_root: ${oc.env:NUPLAN_DATA_ROOT}
+experiment_name: kitti_test2
+date_format: '%Y.%m.%d.%H.%M.%S'
+experiment_uid: ${now:${date_format}}
+output_dir: ${oc.env:D123_EXP_ROOT}/${experiment_name}/${experiment_uid}
+force_log_conversion: true
+force_map_conversion: false
+datasets:
+ nuplan_private_dataset:
+ _target_: d123.dataset.dataset_specific.nuplan.nuplan_data_converter.NuplanDataConverter
+ _convert_: all
+ splits:
+ - nuplan_private_test
+ log_path: ${oc.env:NUPLAN_DATA_ROOT}/nuplan-v1.1/splits
+ data_converter_config:
+ _target_: d123.dataset.dataset_specific.raw_data_converter.DataConverterConfig
+ _convert_: all
+ output_path: ${d123_data_root}
+ force_log_conversion: ${force_log_conversion}
+ force_map_conversion: ${force_map_conversion}
+ camera_store_option: path
+ lidar_store_option: path
diff --git a/exp/kitti_test2/2025.08.15.14.43.13/code/hydra/hydra.yaml b/exp/kitti_test2/2025.08.15.14.43.13/code/hydra/hydra.yaml
new file mode 100644
index 00000000..cca44d29
--- /dev/null
+++ b/exp/kitti_test2/2025.08.15.14.43.13/code/hydra/hydra.yaml
@@ -0,0 +1,177 @@
+hydra:
+ run:
+ dir: ${output_dir}
+ sweep:
+ dir: multirun/${now:%Y-%m-%d}/${now:%H-%M-%S}
+ subdir: ${hydra.job.num}
+ launcher:
+ _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher
+ sweeper:
+ _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper
+ max_batch_size: null
+ params: null
+ help:
+ app_name: ${hydra.job.name}
+ header: '${hydra.help.app_name} is powered by Hydra.
+
+ '
+ footer: 'Powered by Hydra (https://hydra.cc)
+
+ Use --hydra-help to view Hydra specific help
+
+ '
+ template: '${hydra.help.header}
+
+ == Configuration groups ==
+
+ Compose your configuration from those groups (group=option)
+
+
+ $APP_CONFIG_GROUPS
+
+
+ == Config ==
+
+ Override anything in the config (foo.bar=value)
+
+
+ $CONFIG
+
+
+ ${hydra.help.footer}
+
+ '
+ hydra_help:
+ template: 'Hydra (${hydra.runtime.version})
+
+ See https://hydra.cc for more info.
+
+
+ == Flags ==
+
+ $FLAGS_HELP
+
+
+ == Configuration groups ==
+
+ Compose your configuration from those groups (For example, append hydra/job_logging=disabled
+ to command line)
+
+
+ $HYDRA_CONFIG_GROUPS
+
+
+ Use ''--cfg hydra'' to Show the Hydra config.
+
+ '
+ hydra_help: ???
+ hydra_logging:
+ version: 1
+ formatters:
+ colorlog:
+ (): colorlog.ColoredFormatter
+ format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s'
+ handlers:
+ console:
+ class: logging.StreamHandler
+ formatter: colorlog
+ stream: ext://sys.stdout
+ root:
+ level: INFO
+ handlers:
+ - console
+ disable_existing_loggers: false
+ job_logging:
+ version: 1
+ formatters:
+ simple:
+ format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s'
+ colorlog:
+ (): colorlog.ColoredFormatter
+ format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s]
+ - %(message)s'
+ log_colors:
+ DEBUG: purple
+ INFO: green
+ WARNING: yellow
+ ERROR: red
+ CRITICAL: red
+ handlers:
+ console:
+ class: logging.StreamHandler
+ formatter: colorlog
+ stream: ext://sys.stdout
+ file:
+ class: logging.FileHandler
+ formatter: simple
+ filename: ${hydra.job.name}.log
+ root:
+ level: INFO
+ handlers:
+ - console
+ - file
+ disable_existing_loggers: false
+ env: {}
+ mode: RUN
+ searchpath:
+ - pkg://d123.script.config
+ - pkg://d123.script.config.common
+ callbacks: {}
+ output_subdir: ${output_dir}/code/hydra
+ overrides:
+ hydra:
+ - hydra.mode=RUN
+ task:
+ - experiment_name=kitti_test2
+ job:
+ name: run_dataset_conversion
+ chdir: false
+ override_dirname: experiment_name=kitti_test2
+ id: ???
+ num: ???
+ config_name: default_dataset_conversion
+ env_set: {}
+ env_copy: []
+ config:
+ override_dirname:
+ kv_sep: '='
+ item_sep: ','
+ exclude_keys: []
+ runtime:
+ version: 1.3.2
+ version_base: '1.3'
+ cwd: /home/jbwang/d123
+ config_sources:
+ - path: hydra.conf
+ schema: pkg
+ provider: hydra
+ - path: /home/jbwang/d123/d123/script/config/dataset_conversion
+ schema: file
+ provider: main
+ - path: hydra_plugins.hydra_colorlog.conf
+ schema: pkg
+ provider: hydra-colorlog
+ - path: d123.script.config
+ schema: pkg
+ provider: hydra.searchpath in main
+ - path: d123.script.config.common
+ schema: pkg
+ provider: hydra.searchpath in main
+ - path: ''
+ schema: structured
+ provider: schema
+ output_dir: /home/jbwang/d123/exp/kitti_test2/2025.08.15.14.43.13
+ choices:
+ scene_builder: default_scene_builder
+ scene_filter: all_scenes
+ worker: ray_distributed
+ hydra/env: default
+ hydra/callbacks: null
+ hydra/job_logging: colorlog
+ hydra/hydra_logging: colorlog
+ hydra/hydra_help: default
+ hydra/help: default
+ hydra/sweeper: basic
+ hydra/launcher: basic
+ hydra/output: default
+ verbose: false
diff --git a/exp/kitti_test2/2025.08.15.14.43.13/code/hydra/overrides.yaml b/exp/kitti_test2/2025.08.15.14.43.13/code/hydra/overrides.yaml
new file mode 100644
index 00000000..676c1042
--- /dev/null
+++ b/exp/kitti_test2/2025.08.15.14.43.13/code/hydra/overrides.yaml
@@ -0,0 +1 @@
+- experiment_name=kitti_test2
diff --git a/exp/kitti_test2/2025.08.15.14.43.13/log.txt b/exp/kitti_test2/2025.08.15.14.43.13/log.txt
new file mode 100644
index 00000000..fec50568
--- /dev/null
+++ b/exp/kitti_test2/2025.08.15.14.43.13/log.txt
@@ -0,0 +1,12 @@
+2025-08-15 14:43:13,965 INFO {/home/jbwang/d123/d123/script/builders/worker_pool_builder.py:19} Building WorkerPool...
+2025-08-15 14:43:24,401 INFO {/data/jbwang/submodule/nuplan-devkit-master/nuplan/planning/utils/multithreading/worker_ray.py:78} Starting ray local!
+2025-08-15 14:43:39,643 INFO {/data/jbwang/submodule/nuplan-devkit-master/nuplan/planning/utils/multithreading/worker_pool.py:101} Worker: RayDistributed
+2025-08-15 14:43:39,644 INFO {/data/jbwang/submodule/nuplan-devkit-master/nuplan/planning/utils/multithreading/worker_pool.py:102} Number of nodes: 1
+Number of CPUs per node: 64
+Number of GPUs per node: 8
+Number of threads across all nodes: 64
+2025-08-15 14:43:39,644 INFO {/home/jbwang/d123/d123/script/builders/worker_pool_builder.py:27} Building WorkerPool...DONE!
+2025-08-15 14:43:39,644 INFO {/home/jbwang/d123/d123/script/run_dataset_conversion.py:30} Starting Dataset Caching...
+2025-08-15 14:43:39,645 INFO {/home/jbwang/d123/d123/script/builders/data_converter_builder.py:14} Building RawDataProcessor...
+2025-08-15 14:43:44,316 INFO {/home/jbwang/d123/d123/script/builders/data_converter_builder.py:21} Building RawDataProcessor...DONE!
+2025-08-15 14:43:44,316 INFO {/home/jbwang/d123/d123/script/run_dataset_conversion.py:34} Processing dataset: NuplanDataConverter
diff --git a/exp/kitti_test2/2025.08.15.14.46.49/code/hydra/config.yaml b/exp/kitti_test2/2025.08.15.14.46.49/code/hydra/config.yaml
new file mode 100644
index 00000000..5ce47ba9
--- /dev/null
+++ b/exp/kitti_test2/2025.08.15.14.46.49/code/hydra/config.yaml
@@ -0,0 +1,60 @@
+worker:
+ _target_: nuplan.planning.utils.multithreading.worker_ray.RayDistributed
+ _convert_: all
+ master_node_ip: null
+ threads_per_node: null
+ debug_mode: false
+ log_to_driver: true
+ logs_subdir: logs
+ use_distributed: false
+scene_filter:
+ _target_: d123.dataset.scene.scene_filter.SceneFilter
+ _convert_: all
+ split_types: null
+ split_names: null
+ log_names: null
+ map_names: null
+ scene_tokens: null
+ timestamp_threshold_s: null
+ ego_displacement_minimum_m: null
+ duration_s: 9.2
+ history_s: 3.0
+scene_builder:
+ _target_: d123.dataset.scene.scene_builder.ArrowSceneBuilder
+ _convert_: all
+ dataset_path: ${d123_data_root}
+distributed_timeout_seconds: 7200
+selected_simulation_metrics: null
+verbose: false
+logger_level: info
+logger_format_string: null
+max_number_of_workers: null
+gpu: true
+seed: 42
+d123_devkit_root: ${oc.env:D123_DEVKIT_ROOT}
+d123_maps_root: ${oc.env:D123_MAPS_ROOT}
+d123_data_root: ${oc.env:D123_DATA_ROOT}
+nuplan_devkit_root: ${oc.env:NUPLAN_DEVKIT_ROOT}
+nuplan_maps_root: ${oc.env:NUPLAN_MAPS_ROOT}
+nuplan_data_root: ${oc.env:NUPLAN_DATA_ROOT}
+experiment_name: kitti_test2
+date_format: '%Y.%m.%d.%H.%M.%S'
+experiment_uid: ${now:${date_format}}
+output_dir: ${oc.env:D123_EXP_ROOT}/${experiment_name}/${experiment_uid}
+force_log_conversion: true
+force_map_conversion: false
+datasets:
+ kitti360_dataset:
+ _target_: d123.dataset.dataset_specific.kitti_360.kitti_360_data_converter.Kitti360DataConverter
+ _convert_: all
+ splits:
+ - kitti360
+ log_path: ${oc.env:KITTI360_DATA_ROOT}
+ data_converter_config:
+ _target_: d123.dataset.dataset_specific.raw_data_converter.DataConverterConfig
+ _convert_: all
+ output_path: ${d123_data_root}
+ force_log_conversion: ${force_log_conversion}
+ force_map_conversion: ${force_map_conversion}
+ camera_store_option: path
+ lidar_store_option: path
diff --git a/exp/kitti_test2/2025.08.15.14.46.49/code/hydra/hydra.yaml b/exp/kitti_test2/2025.08.15.14.46.49/code/hydra/hydra.yaml
new file mode 100644
index 00000000..bd9698a2
--- /dev/null
+++ b/exp/kitti_test2/2025.08.15.14.46.49/code/hydra/hydra.yaml
@@ -0,0 +1,177 @@
+hydra:
+ run:
+ dir: ${output_dir}
+ sweep:
+ dir: multirun/${now:%Y-%m-%d}/${now:%H-%M-%S}
+ subdir: ${hydra.job.num}
+ launcher:
+ _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher
+ sweeper:
+ _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper
+ max_batch_size: null
+ params: null
+ help:
+ app_name: ${hydra.job.name}
+ header: '${hydra.help.app_name} is powered by Hydra.
+
+ '
+ footer: 'Powered by Hydra (https://hydra.cc)
+
+ Use --hydra-help to view Hydra specific help
+
+ '
+ template: '${hydra.help.header}
+
+ == Configuration groups ==
+
+ Compose your configuration from those groups (group=option)
+
+
+ $APP_CONFIG_GROUPS
+
+
+ == Config ==
+
+ Override anything in the config (foo.bar=value)
+
+
+ $CONFIG
+
+
+ ${hydra.help.footer}
+
+ '
+ hydra_help:
+ template: 'Hydra (${hydra.runtime.version})
+
+ See https://hydra.cc for more info.
+
+
+ == Flags ==
+
+ $FLAGS_HELP
+
+
+ == Configuration groups ==
+
+ Compose your configuration from those groups (For example, append hydra/job_logging=disabled
+ to command line)
+
+
+ $HYDRA_CONFIG_GROUPS
+
+
+ Use ''--cfg hydra'' to Show the Hydra config.
+
+ '
+ hydra_help: ???
+ hydra_logging:
+ version: 1
+ formatters:
+ colorlog:
+ (): colorlog.ColoredFormatter
+ format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s'
+ handlers:
+ console:
+ class: logging.StreamHandler
+ formatter: colorlog
+ stream: ext://sys.stdout
+ root:
+ level: INFO
+ handlers:
+ - console
+ disable_existing_loggers: false
+ job_logging:
+ version: 1
+ formatters:
+ simple:
+ format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s'
+ colorlog:
+ (): colorlog.ColoredFormatter
+ format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s]
+ - %(message)s'
+ log_colors:
+ DEBUG: purple
+ INFO: green
+ WARNING: yellow
+ ERROR: red
+ CRITICAL: red
+ handlers:
+ console:
+ class: logging.StreamHandler
+ formatter: colorlog
+ stream: ext://sys.stdout
+ file:
+ class: logging.FileHandler
+ formatter: simple
+ filename: ${hydra.job.name}.log
+ root:
+ level: INFO
+ handlers:
+ - console
+ - file
+ disable_existing_loggers: false
+ env: {}
+ mode: RUN
+ searchpath:
+ - pkg://d123.script.config
+ - pkg://d123.script.config.common
+ callbacks: {}
+ output_subdir: ${output_dir}/code/hydra
+ overrides:
+ hydra:
+ - hydra.mode=RUN
+ task:
+ - experiment_name=kitti_test2
+ job:
+ name: run_dataset_conversion
+ chdir: false
+ override_dirname: experiment_name=kitti_test2
+ id: ???
+ num: ???
+ config_name: default_dataset_conversion
+ env_set: {}
+ env_copy: []
+ config:
+ override_dirname:
+ kv_sep: '='
+ item_sep: ','
+ exclude_keys: []
+ runtime:
+ version: 1.3.2
+ version_base: '1.3'
+ cwd: /home/jbwang/d123
+ config_sources:
+ - path: hydra.conf
+ schema: pkg
+ provider: hydra
+ - path: /home/jbwang/d123/d123/script/config/dataset_conversion
+ schema: file
+ provider: main
+ - path: hydra_plugins.hydra_colorlog.conf
+ schema: pkg
+ provider: hydra-colorlog
+ - path: d123.script.config
+ schema: pkg
+ provider: hydra.searchpath in main
+ - path: d123.script.config.common
+ schema: pkg
+ provider: hydra.searchpath in main
+ - path: ''
+ schema: structured
+ provider: schema
+ output_dir: /home/jbwang/d123/exp/kitti_test2/2025.08.15.14.46.49
+ choices:
+ scene_builder: default_scene_builder
+ scene_filter: all_scenes
+ worker: ray_distributed
+ hydra/env: default
+ hydra/callbacks: null
+ hydra/job_logging: colorlog
+ hydra/hydra_logging: colorlog
+ hydra/hydra_help: default
+ hydra/help: default
+ hydra/sweeper: basic
+ hydra/launcher: basic
+ hydra/output: default
+ verbose: false
diff --git a/exp/kitti_test2/2025.08.15.14.46.49/code/hydra/overrides.yaml b/exp/kitti_test2/2025.08.15.14.46.49/code/hydra/overrides.yaml
new file mode 100644
index 00000000..676c1042
--- /dev/null
+++ b/exp/kitti_test2/2025.08.15.14.46.49/code/hydra/overrides.yaml
@@ -0,0 +1 @@
+- experiment_name=kitti_test2
diff --git a/exp/kitti_test2/2025.08.15.14.46.49/log.txt b/exp/kitti_test2/2025.08.15.14.46.49/log.txt
new file mode 100644
index 00000000..00286f48
--- /dev/null
+++ b/exp/kitti_test2/2025.08.15.14.46.49/log.txt
@@ -0,0 +1,10 @@
+2025-08-15 14:46:49,566 INFO {/home/jbwang/d123/d123/script/builders/worker_pool_builder.py:19} Building WorkerPool...
+2025-08-15 14:46:59,509 INFO {/data/jbwang/submodule/nuplan-devkit-master/nuplan/planning/utils/multithreading/worker_ray.py:78} Starting ray local!
+2025-08-15 14:47:14,118 INFO {/data/jbwang/submodule/nuplan-devkit-master/nuplan/planning/utils/multithreading/worker_pool.py:101} Worker: RayDistributed
+2025-08-15 14:47:14,118 INFO {/data/jbwang/submodule/nuplan-devkit-master/nuplan/planning/utils/multithreading/worker_pool.py:102} Number of nodes: 1
+Number of CPUs per node: 64
+Number of GPUs per node: 8
+Number of threads across all nodes: 64
+2025-08-15 14:47:14,119 INFO {/home/jbwang/d123/d123/script/builders/worker_pool_builder.py:27} Building WorkerPool...DONE!
+2025-08-15 14:47:14,119 INFO {/home/jbwang/d123/d123/script/run_dataset_conversion.py:30} Starting Dataset Caching...
+2025-08-15 14:47:14,122 INFO {/home/jbwang/d123/d123/script/builders/data_converter_builder.py:14} Building RawDataProcessor...
diff --git a/exp/kitti_test2/2025.08.15.14.50.55/code/hydra/config.yaml b/exp/kitti_test2/2025.08.15.14.50.55/code/hydra/config.yaml
new file mode 100644
index 00000000..5ce47ba9
--- /dev/null
+++ b/exp/kitti_test2/2025.08.15.14.50.55/code/hydra/config.yaml
@@ -0,0 +1,60 @@
+worker:
+ _target_: nuplan.planning.utils.multithreading.worker_ray.RayDistributed
+ _convert_: all
+ master_node_ip: null
+ threads_per_node: null
+ debug_mode: false
+ log_to_driver: true
+ logs_subdir: logs
+ use_distributed: false
+scene_filter:
+ _target_: d123.dataset.scene.scene_filter.SceneFilter
+ _convert_: all
+ split_types: null
+ split_names: null
+ log_names: null
+ map_names: null
+ scene_tokens: null
+ timestamp_threshold_s: null
+ ego_displacement_minimum_m: null
+ duration_s: 9.2
+ history_s: 3.0
+scene_builder:
+ _target_: d123.dataset.scene.scene_builder.ArrowSceneBuilder
+ _convert_: all
+ dataset_path: ${d123_data_root}
+distributed_timeout_seconds: 7200
+selected_simulation_metrics: null
+verbose: false
+logger_level: info
+logger_format_string: null
+max_number_of_workers: null
+gpu: true
+seed: 42
+d123_devkit_root: ${oc.env:D123_DEVKIT_ROOT}
+d123_maps_root: ${oc.env:D123_MAPS_ROOT}
+d123_data_root: ${oc.env:D123_DATA_ROOT}
+nuplan_devkit_root: ${oc.env:NUPLAN_DEVKIT_ROOT}
+nuplan_maps_root: ${oc.env:NUPLAN_MAPS_ROOT}
+nuplan_data_root: ${oc.env:NUPLAN_DATA_ROOT}
+experiment_name: kitti_test2
+date_format: '%Y.%m.%d.%H.%M.%S'
+experiment_uid: ${now:${date_format}}
+output_dir: ${oc.env:D123_EXP_ROOT}/${experiment_name}/${experiment_uid}
+force_log_conversion: true
+force_map_conversion: false
+datasets:
+ kitti360_dataset:
+ _target_: d123.dataset.dataset_specific.kitti_360.kitti_360_data_converter.Kitti360DataConverter
+ _convert_: all
+ splits:
+ - kitti360
+ log_path: ${oc.env:KITTI360_DATA_ROOT}
+ data_converter_config:
+ _target_: d123.dataset.dataset_specific.raw_data_converter.DataConverterConfig
+ _convert_: all
+ output_path: ${d123_data_root}
+ force_log_conversion: ${force_log_conversion}
+ force_map_conversion: ${force_map_conversion}
+ camera_store_option: path
+ lidar_store_option: path
diff --git a/exp/kitti_test2/2025.08.15.14.50.55/code/hydra/hydra.yaml b/exp/kitti_test2/2025.08.15.14.50.55/code/hydra/hydra.yaml
new file mode 100644
index 00000000..acff45d7
--- /dev/null
+++ b/exp/kitti_test2/2025.08.15.14.50.55/code/hydra/hydra.yaml
@@ -0,0 +1,177 @@
+hydra:
+ run:
+ dir: ${output_dir}
+ sweep:
+ dir: multirun/${now:%Y-%m-%d}/${now:%H-%M-%S}
+ subdir: ${hydra.job.num}
+ launcher:
+ _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher
+ sweeper:
+ _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper
+ max_batch_size: null
+ params: null
+ help:
+ app_name: ${hydra.job.name}
+ header: '${hydra.help.app_name} is powered by Hydra.
+
+ '
+ footer: 'Powered by Hydra (https://hydra.cc)
+
+ Use --hydra-help to view Hydra specific help
+
+ '
+ template: '${hydra.help.header}
+
+ == Configuration groups ==
+
+ Compose your configuration from those groups (group=option)
+
+
+ $APP_CONFIG_GROUPS
+
+
+ == Config ==
+
+ Override anything in the config (foo.bar=value)
+
+
+ $CONFIG
+
+
+ ${hydra.help.footer}
+
+ '
+ hydra_help:
+ template: 'Hydra (${hydra.runtime.version})
+
+ See https://hydra.cc for more info.
+
+
+ == Flags ==
+
+ $FLAGS_HELP
+
+
+ == Configuration groups ==
+
+ Compose your configuration from those groups (For example, append hydra/job_logging=disabled
+ to command line)
+
+
+ $HYDRA_CONFIG_GROUPS
+
+
+ Use ''--cfg hydra'' to Show the Hydra config.
+
+ '
+ hydra_help: ???
+ hydra_logging:
+ version: 1
+ formatters:
+ colorlog:
+ (): colorlog.ColoredFormatter
+ format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s'
+ handlers:
+ console:
+ class: logging.StreamHandler
+ formatter: colorlog
+ stream: ext://sys.stdout
+ root:
+ level: INFO
+ handlers:
+ - console
+ disable_existing_loggers: false
+ job_logging:
+ version: 1
+ formatters:
+ simple:
+ format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s'
+ colorlog:
+ (): colorlog.ColoredFormatter
+ format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s]
+ - %(message)s'
+ log_colors:
+ DEBUG: purple
+ INFO: green
+ WARNING: yellow
+ ERROR: red
+ CRITICAL: red
+ handlers:
+ console:
+ class: logging.StreamHandler
+ formatter: colorlog
+ stream: ext://sys.stdout
+ file:
+ class: logging.FileHandler
+ formatter: simple
+ filename: ${hydra.job.name}.log
+ root:
+ level: INFO
+ handlers:
+ - console
+ - file
+ disable_existing_loggers: false
+ env: {}
+ mode: RUN
+ searchpath:
+ - pkg://d123.script.config
+ - pkg://d123.script.config.common
+ callbacks: {}
+ output_subdir: ${output_dir}/code/hydra
+ overrides:
+ hydra:
+ - hydra.mode=RUN
+ task:
+ - experiment_name=kitti_test2
+ job:
+ name: run_dataset_conversion
+ chdir: false
+ override_dirname: experiment_name=kitti_test2
+ id: ???
+ num: ???
+ config_name: default_dataset_conversion
+ env_set: {}
+ env_copy: []
+ config:
+ override_dirname:
+ kv_sep: '='
+ item_sep: ','
+ exclude_keys: []
+ runtime:
+ version: 1.3.2
+ version_base: '1.3'
+ cwd: /home/jbwang/d123
+ config_sources:
+ - path: hydra.conf
+ schema: pkg
+ provider: hydra
+ - path: /home/jbwang/d123/d123/script/config/dataset_conversion
+ schema: file
+ provider: main
+ - path: hydra_plugins.hydra_colorlog.conf
+ schema: pkg
+ provider: hydra-colorlog
+ - path: d123.script.config
+ schema: pkg
+ provider: hydra.searchpath in main
+ - path: d123.script.config.common
+ schema: pkg
+ provider: hydra.searchpath in main
+ - path: ''
+ schema: structured
+ provider: schema
+ output_dir: /home/jbwang/d123/exp/kitti_test2/2025.08.15.14.50.55
+ choices:
+ scene_builder: default_scene_builder
+ scene_filter: all_scenes
+ worker: ray_distributed
+ hydra/env: default
+ hydra/callbacks: null
+ hydra/job_logging: colorlog
+ hydra/hydra_logging: colorlog
+ hydra/hydra_help: default
+ hydra/help: default
+ hydra/sweeper: basic
+ hydra/launcher: basic
+ hydra/output: default
+ verbose: false
diff --git a/exp/kitti_test2/2025.08.15.14.50.55/code/hydra/overrides.yaml b/exp/kitti_test2/2025.08.15.14.50.55/code/hydra/overrides.yaml
new file mode 100644
index 00000000..676c1042
--- /dev/null
+++ b/exp/kitti_test2/2025.08.15.14.50.55/code/hydra/overrides.yaml
@@ -0,0 +1 @@
+- experiment_name=kitti_test2
diff --git a/exp/kitti_test2/2025.08.15.14.50.55/log.txt b/exp/kitti_test2/2025.08.15.14.50.55/log.txt
new file mode 100644
index 00000000..9902e0ce
--- /dev/null
+++ b/exp/kitti_test2/2025.08.15.14.50.55/log.txt
@@ -0,0 +1,11 @@
+2025-08-15 14:50:55,950 INFO {/home/jbwang/d123/d123/script/builders/worker_pool_builder.py:19} Building WorkerPool...
+2025-08-15 14:51:19,466 INFO {/data/jbwang/submodule/nuplan-devkit-master/nuplan/planning/utils/multithreading/worker_ray.py:78} Starting ray local!
+2025-08-15 14:51:52,653 INFO {/data/jbwang/submodule/nuplan-devkit-master/nuplan/planning/utils/multithreading/worker_pool.py:101} Worker: RayDistributed
+2025-08-15 14:51:52,653 INFO {/data/jbwang/submodule/nuplan-devkit-master/nuplan/planning/utils/multithreading/worker_pool.py:102} Number of nodes: 1
+Number of CPUs per node: 64
+Number of GPUs per node: 8
+Number of threads across all nodes: 64
+2025-08-15 14:51:52,654 INFO {/home/jbwang/d123/d123/script/builders/worker_pool_builder.py:27} Building WorkerPool...DONE!
+2025-08-15 14:51:52,654 INFO {/home/jbwang/d123/d123/script/run_dataset_conversion.py:30} Starting Dataset Caching...
+2025-08-15 14:51:52,654 INFO {/home/jbwang/d123/d123/script/builders/data_converter_builder.py:14} Building RawDataProcessor...
+2025-08-15 14:51:52,655 INFO {/home/jbwang/d123/d123/script/builders/data_converter_builder.py:17} Instantiating dataset type: {'_target_': 'd123.dataset.dataset_specific.kitti_360.kitti_360_data_converter.Kitti360DataConverter', '_convert_': 'all', 'splits': ['kitti360'], 'log_path': '${oc.env:KITTI360_DATA_ROOT}', 'data_converter_config': {'_target_': 'd123.dataset.dataset_specific.raw_data_converter.DataConverterConfig', '_convert_': 'all', 'output_path': '${d123_data_root}', 'force_log_conversion': '${force_log_conversion}', 'force_map_conversion': '${force_map_conversion}', 'camera_store_option': 'path', 'lidar_store_option': 'path'}}
diff --git a/exp/kitti_test2/2025.08.15.14.52.39/code/hydra/config.yaml b/exp/kitti_test2/2025.08.15.14.52.39/code/hydra/config.yaml
new file mode 100644
index 00000000..de70bfa3
--- /dev/null
+++ b/exp/kitti_test2/2025.08.15.14.52.39/code/hydra/config.yaml
@@ -0,0 +1,60 @@
+worker:
+ _target_: nuplan.planning.utils.multithreading.worker_ray.RayDistributed
+ _convert_: all
+ master_node_ip: null
+ threads_per_node: null
+ debug_mode: false
+ log_to_driver: true
+ logs_subdir: logs
+ use_distributed: false
+scene_filter:
+ _target_: d123.dataset.scene.scene_filter.SceneFilter
+ _convert_: all
+ split_types: null
+ split_names: null
+ log_names: null
+ map_names: null
+ scene_tokens: null
+ timestamp_threshold_s: null
+ ego_displacement_minimum_m: null
+ duration_s: 9.2
+ history_s: 3.0
+scene_builder:
+ _target_: d123.dataset.scene.scene_builder.ArrowSceneBuilder
+ _convert_: all
+ dataset_path: ${d123_data_root}
+distributed_timeout_seconds: 7200
+selected_simulation_metrics: null
+verbose: false
+logger_level: info
+logger_format_string: null
+max_number_of_workers: null
+gpu: true
+seed: 42
+d123_devkit_root: ${oc.env:D123_DEVKIT_ROOT}
+d123_maps_root: ${oc.env:D123_MAPS_ROOT}
+d123_data_root: ${oc.env:D123_DATA_ROOT}
+nuplan_devkit_root: ${oc.env:NUPLAN_DEVKIT_ROOT}
+nuplan_maps_root: ${oc.env:NUPLAN_MAPS_ROOT}
+nuplan_data_root: ${oc.env:NUPLAN_DATA_ROOT}
+experiment_name: kitti_test2
+date_format: '%Y.%m.%d.%H.%M.%S'
+experiment_uid: ${now:${date_format}}
+output_dir: ${oc.env:D123_EXP_ROOT}/${experiment_name}/${experiment_uid}
+force_log_conversion: true
+force_map_conversion: false
+datasets:
+ nuplan_private_dataset:
+ _target_: d123.dataset.dataset_specific.nuplan.nuplan_data_converter.NuplanDataConverter
+ _convert_: all
+ splits:
+ - nuplan_private_test
+ log_path: ${oc.env:NUPLAN_DATA_ROOT}/nuplan-v1.1/splits
+ data_converter_config:
+ _target_: d123.dataset.dataset_specific.raw_data_converter.DataConverterConfig
+ _convert_: all
+ output_path: ${d123_data_root}
+ force_log_conversion: ${force_log_conversion}
+ force_map_conversion: ${force_map_conversion}
+ camera_store_option: path
+ lidar_store_option: path
diff --git a/exp/kitti_test2/2025.08.15.14.52.39/code/hydra/hydra.yaml b/exp/kitti_test2/2025.08.15.14.52.39/code/hydra/hydra.yaml
new file mode 100644
index 00000000..d053f8e7
--- /dev/null
+++ b/exp/kitti_test2/2025.08.15.14.52.39/code/hydra/hydra.yaml
@@ -0,0 +1,177 @@
+hydra:
+ run:
+ dir: ${output_dir}
+ sweep:
+ dir: multirun/${now:%Y-%m-%d}/${now:%H-%M-%S}
+ subdir: ${hydra.job.num}
+ launcher:
+ _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher
+ sweeper:
+ _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper
+ max_batch_size: null
+ params: null
+ help:
+ app_name: ${hydra.job.name}
+ header: '${hydra.help.app_name} is powered by Hydra.
+
+ '
+ footer: 'Powered by Hydra (https://hydra.cc)
+
+ Use --hydra-help to view Hydra specific help
+
+ '
+ template: '${hydra.help.header}
+
+ == Configuration groups ==
+
+ Compose your configuration from those groups (group=option)
+
+
+ $APP_CONFIG_GROUPS
+
+
+ == Config ==
+
+ Override anything in the config (foo.bar=value)
+
+
+ $CONFIG
+
+
+ ${hydra.help.footer}
+
+ '
+ hydra_help:
+ template: 'Hydra (${hydra.runtime.version})
+
+ See https://hydra.cc for more info.
+
+
+ == Flags ==
+
+ $FLAGS_HELP
+
+
+ == Configuration groups ==
+
+ Compose your configuration from those groups (For example, append hydra/job_logging=disabled
+ to command line)
+
+
+ $HYDRA_CONFIG_GROUPS
+
+
+ Use ''--cfg hydra'' to Show the Hydra config.
+
+ '
+ hydra_help: ???
+ hydra_logging:
+ version: 1
+ formatters:
+ colorlog:
+ (): colorlog.ColoredFormatter
+ format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s'
+ handlers:
+ console:
+ class: logging.StreamHandler
+ formatter: colorlog
+ stream: ext://sys.stdout
+ root:
+ level: INFO
+ handlers:
+ - console
+ disable_existing_loggers: false
+ job_logging:
+ version: 1
+ formatters:
+ simple:
+ format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s'
+ colorlog:
+ (): colorlog.ColoredFormatter
+ format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s]
+ - %(message)s'
+ log_colors:
+ DEBUG: purple
+ INFO: green
+ WARNING: yellow
+ ERROR: red
+ CRITICAL: red
+ handlers:
+ console:
+ class: logging.StreamHandler
+ formatter: colorlog
+ stream: ext://sys.stdout
+ file:
+ class: logging.FileHandler
+ formatter: simple
+ filename: ${hydra.job.name}.log
+ root:
+ level: INFO
+ handlers:
+ - console
+ - file
+ disable_existing_loggers: false
+ env: {}
+ mode: RUN
+ searchpath:
+ - pkg://d123.script.config
+ - pkg://d123.script.config.common
+ callbacks: {}
+ output_subdir: ${output_dir}/code/hydra
+ overrides:
+ hydra:
+ - hydra.mode=RUN
+ task:
+ - experiment_name=kitti_test2
+ job:
+ name: run_dataset_conversion
+ chdir: false
+ override_dirname: experiment_name=kitti_test2
+ id: ???
+ num: ???
+ config_name: default_dataset_conversion
+ env_set: {}
+ env_copy: []
+ config:
+ override_dirname:
+ kv_sep: '='
+ item_sep: ','
+ exclude_keys: []
+ runtime:
+ version: 1.3.2
+ version_base: '1.3'
+ cwd: /home/jbwang/d123
+ config_sources:
+ - path: hydra.conf
+ schema: pkg
+ provider: hydra
+ - path: /home/jbwang/d123/d123/script/config/dataset_conversion
+ schema: file
+ provider: main
+ - path: hydra_plugins.hydra_colorlog.conf
+ schema: pkg
+ provider: hydra-colorlog
+ - path: d123.script.config
+ schema: pkg
+ provider: hydra.searchpath in main
+ - path: d123.script.config.common
+ schema: pkg
+ provider: hydra.searchpath in main
+ - path: ''
+ schema: structured
+ provider: schema
+ output_dir: /home/jbwang/d123/exp/kitti_test2/2025.08.15.14.52.39
+ choices:
+ scene_builder: default_scene_builder
+ scene_filter: all_scenes
+ worker: ray_distributed
+ hydra/env: default
+ hydra/callbacks: null
+ hydra/job_logging: colorlog
+ hydra/hydra_logging: colorlog
+ hydra/hydra_help: default
+ hydra/help: default
+ hydra/sweeper: basic
+ hydra/launcher: basic
+ hydra/output: default
+ verbose: false
diff --git a/exp/kitti_test2/2025.08.15.14.52.39/code/hydra/overrides.yaml b/exp/kitti_test2/2025.08.15.14.52.39/code/hydra/overrides.yaml
new file mode 100644
index 00000000..676c1042
--- /dev/null
+++ b/exp/kitti_test2/2025.08.15.14.52.39/code/hydra/overrides.yaml
@@ -0,0 +1 @@
+- experiment_name=kitti_test2
diff --git a/exp/kitti_test2/2025.08.15.14.52.39/log.txt b/exp/kitti_test2/2025.08.15.14.52.39/log.txt
new file mode 100644
index 00000000..e2585299
--- /dev/null
+++ b/exp/kitti_test2/2025.08.15.14.52.39/log.txt
@@ -0,0 +1,11 @@
+2025-08-15 14:52:39,717 INFO {/home/jbwang/d123/d123/script/builders/worker_pool_builder.py:19} Building WorkerPool...
+2025-08-15 14:53:02,994 INFO {/data/jbwang/submodule/nuplan-devkit-master/nuplan/planning/utils/multithreading/worker_ray.py:78} Starting ray local!
+2025-08-15 14:53:36,548 INFO {/data/jbwang/submodule/nuplan-devkit-master/nuplan/planning/utils/multithreading/worker_pool.py:101} Worker: RayDistributed
+2025-08-15 14:53:36,549 INFO {/data/jbwang/submodule/nuplan-devkit-master/nuplan/planning/utils/multithreading/worker_pool.py:102} Number of nodes: 1
+Number of CPUs per node: 64
+Number of GPUs per node: 8
+Number of threads across all nodes: 64
+2025-08-15 14:53:36,549 INFO {/home/jbwang/d123/d123/script/builders/worker_pool_builder.py:27} Building WorkerPool...DONE!
+2025-08-15 14:53:36,549 INFO {/home/jbwang/d123/d123/script/run_dataset_conversion.py:30} Starting Dataset Caching...
+2025-08-15 14:53:36,550 INFO {/home/jbwang/d123/d123/script/builders/data_converter_builder.py:14} Building RawDataProcessor...
+2025-08-15 14:53:36,550 INFO {/home/jbwang/d123/d123/script/builders/data_converter_builder.py:17} Instantiating dataset type: {'_target_': 'd123.dataset.dataset_specific.nuplan.nuplan_data_converter.NuplanDataConverter', '_convert_': 'all', 'splits': ['nuplan_private_test'], 'log_path': '${oc.env:NUPLAN_DATA_ROOT}/nuplan-v1.1/splits', 'data_converter_config': {'_target_': 'd123.dataset.dataset_specific.raw_data_converter.DataConverterConfig', '_convert_': 'all', 'output_path': '${d123_data_root}', 'force_log_conversion': '${force_log_conversion}', 'force_map_conversion': '${force_map_conversion}', 'camera_store_option': 'path', 'lidar_store_option': 'path'}}
diff --git a/jbwang_test.py b/jbwang_test.py
index ac3afac5..ff320df9 100644
--- a/jbwang_test.py
+++ b/jbwang_test.py
@@ -65,4 +65,21 @@
# log_name = "1230_asd_"
# for i in range(20):
# a = create_token(f"{log_name}_{i}")
-# print(a)
+# print(a)ee
+
+
+import numpy as np
+from pathlib import Path
+a = np.loadtxt("/data/jbwang/d123/data_poses/2013_05_28_drive_0000_sync/oxts/data/0000000000.txt")
+b = np.loadtxt("/nas/datasets/KITTI-360/data_poses/2013_05_28_drive_0018_sync/poses.txt")
+data = b
+ts = data[:, 0].astype(np.int32)
+poses = np.reshape(data[:, 1:], (-1, 3, 4))
+poses = np.concatenate((poses, np.tile(np.array([0, 0, 0, 1]).reshape(1,1,4),(poses.shape[0],1,1))), 1)
+print(a)
+print(b.shape)
+print(ts.shape)
+print(poses.shape)
+
+ccc = Path("/data/jbwang/d123/data_poses/2013_05_28_drive_0000_sync/oxts/data/")
+print(len(list(ccc.glob("*.txt"))))
\ No newline at end of file
diff --git a/jbwang_test2.py b/jbwang_test2.py
new file mode 100644
index 00000000..b1229356
--- /dev/null
+++ b/jbwang_test2.py
@@ -0,0 +1,70 @@
+# import numpy as np
+# import pickle
+
+# # path = "/nas/datasets/KITTI-360/data_3d_raw/2013_05_28_drive_0000_sync/velodyne_points/data/0000000000.bin"
+# # a = np.fromfile(path, dtype=np.float32)
+
+# # print(a.shape)
+# # print(a[:10])
+
+# # path2 = "/nas/datasets/KITTI-360/calibration/calib_cam_to_pose.txt"
+# # c = np.loadtxt(path2)
+# # print(c)
+
+# import open3d as o3d
+# import numpy as np
+
+# def read_ply_file(file_path):
+# # 读取 PLY 文件
+# pcd = o3d.io.read_point_cloud(file_path)
+# print(len(pcd.points), len(pcd.colors))
+# # 提取顶点信息
+# points = np.asarray(pcd.points) # x, y, z
+# colors = np.asarray(pcd.colors) # red, green, blue
+# # semantics = np.asarray(pcd.semantic) # semanticID, instanceID, isVisible, confidence
+
+# # 将所有信息合并到一个数组中
+# vertices = np.hstack((points, colors))
+
+# return vertices
+
+# # 示例用法
+# file_path = '/nas/datasets/KITTI-360/data_3d_semantics/train/2013_05_28_drive_0000_sync/static/0000000002_0000000385.ply' # 替换为你的 PLY 文件路径
+# vertices = read_ply_file(file_path)
+
+# # 打印前几个顶点信息
+# print("顶点信息 (前5个顶点):")
+# print(vertices[:5])
+
+import numpy as np
+from scipy.linalg import polar
+from scipy.spatial.transform import Rotation as R
+
+def polar_decompose_rotation_scale(A: np.ndarray):
+ """
+ A: 3x3 (含旋转+缩放+剪切)
+ 返回:
+ Rm: 纯旋转
+ Sm: 对称正定 (缩放+剪切)
+ scale: 近似轴缩放(从 Sm 特征值开方或对角提取;若存在剪切需谨慎)
+ yaw,pitch,roll: 使用 ZYX 序列 (常对应 yaw(Z), pitch(Y), roll(X))
+ """
+ Rm, Sm = polar(A) # A = Rm @ Sm
+ # 近似各向缩放(若无剪切):
+ scale = np.diag(Sm)
+ # 欧拉角
+ yaw, pitch, roll = R.from_matrix(Rm).as_euler('zyx', degrees=False)
+ return {
+ "R": Rm,
+ "S": Sm,
+ "scale_diag": scale,
+ "yaw_pitch_roll": (yaw, pitch, roll),
+ }
+
+M = np.array([
+ [-3.97771668e+00, -1.05715942e+00,-2.18206085e-02],
+ [2.43555284e+00, -1.72707462e+00, -1.03932284e-02],
+ [-4.41359095e-02, -2.94448305e-02, 1.39303744e+00],
+])
+out = polar_decompose_rotation_scale(M)
+print(out)
\ No newline at end of file
diff --git a/notebooks/dataset/jbwang_test.py b/notebooks/dataset/jbwang_test.py
index caaa3201..0996734b 100644
--- a/notebooks/dataset/jbwang_test.py
+++ b/notebooks/dataset/jbwang_test.py
@@ -1,5 +1,5 @@
-s3_uri = "/data/jbwang/d123/data/nuplan_mini_train/2021.10.11.07.12.18_veh-50_00211_00304.arrow"
-# s3_uri = "/data/jbwang/d123/data/nuplan_private_test/2021.09.22.13.20.34_veh-28_01446_01583.arrow"
+# s3_uri = "/data/jbwang/d123/data/nuplan_mini_train/2021.10.11.07.12.18_veh-50_00211_00304.arrow"
+s3_uri = "/data/jbwang/d123/data/nuplan_private_test/2021.09.22.13.20.34_veh-28_01446_01583.arrow"
# s3_uri = "/data/jbwang/d123/data/carla/_Rep0_routes_validation1_route0_07_23_14_33_15.arrow"
# s3_uri = "/data/jbwang/d123/data/nuplan_mini_val/2021.06.07.12.54.00_veh-35_01843_02314.arrow"
@@ -33,10 +33,11 @@
for col in table.column_names:
if col == "lidar":
continue
- print(f"Column: {col}, Type: {table.schema.field(col).type}")
- tokens = table[col] # 或 table.column("token")
+ print(f"Column : {col}, Type: {table.schema.field(col).type}")
+ # tokens = table[col] # 或 table.column("token")
+ # print(tokens)
# print(len(tokens))
- print(tokens.slice(0, 4).to_pylist())
+ # print(tokens.slice(0, 100).to_pylist())
# print(table["traffic_light_ids"])
timer.log("3. Table created")
# Save locally
From 5b8a2074e5e014536ae9408a4955ff167fcf536e Mon Sep 17 00:00:00 2001
From: jbwang <1159270049@qq.com>
Date: Fri, 15 Aug 2025 15:00:52 +0800
Subject: [PATCH 003/145] delete exp/*
---
.gitignore | 1 +
1 file changed, 1 insertion(+)
diff --git a/.gitignore b/.gitignore
index 22cfdee9..0baa64c4 100644
--- a/.gitignore
+++ b/.gitignore
@@ -23,3 +23,4 @@
*.csv
*.log
*.mp4
+exp/*
From 0db8c31aea69a814e66a003e6a970c6b776b1d17 Mon Sep 17 00:00:00 2001
From: jbwang <1159270049@qq.com>
Date: Fri, 15 Aug 2025 15:54:04 +0800
Subject: [PATCH 004/145] fix hydra and other bugs
---
.../kitti_360/kitti_360_data_converter.py | 30 +++++++++++--------
.../kitti_360/kitti_360_helper.py | 6 ++--
2 files changed, 20 insertions(+), 16 deletions(-)
diff --git a/d123/dataset/dataset_specific/kitti_360/kitti_360_data_converter.py b/d123/dataset/dataset_specific/kitti_360/kitti_360_data_converter.py
index c79ce0b2..02dc0add 100644
--- a/d123/dataset/dataset_specific/kitti_360/kitti_360_data_converter.py
+++ b/d123/dataset/dataset_specific/kitti_360/kitti_360_data_converter.py
@@ -28,8 +28,7 @@
from d123.dataset.arrow.helper import open_arrow_table, write_arrow_table
from d123.dataset.dataset_specific.raw_data_converter import DataConverterConfig, RawDataConverter
from d123.dataset.logs.log_metadata import LogMetadata
-
-from kitti_360_helper import KITTI360Bbox3D
+from d123.dataset.dataset_specific.kitti_360.kitti_360_helper import KITTI360Bbox3D
KITTI360_DT: Final[float] = 0.1
SORT_BY_TIMESTAMP: Final[bool] = True
@@ -62,11 +61,10 @@
PATH_POSES_ROOT: Path = KITTI360_DATA_ROOT / DIR_POSES
PATH_CALIB_ROOT: Path = KITTI360_DATA_ROOT / DIR_CALIB
+#TODO check all paths
KITTI360_REQUIRED_MODALITY_ROOTS: Dict[str, Path] = {
DIR_2D_RAW: PATH_2D_RAW_ROOT,
- # DIR_2D_SMT: PATH_2D_SMT_ROOT,
- # DIR_3D_RAW: PATH_3D_RAW_ROOT,
- # DIR_3D_SMT: PATH_3D_SMT_ROOT,
+ DIR_3D_RAW: PATH_3D_RAW_ROOT,
# DIR_3D_BBOX: PATH_3D_BBOX_ROOT,
# DIR_POSES: PATH_POSES_ROOT,
}
@@ -138,6 +136,7 @@ def _collect_log_paths(self) -> Dict[str, List[Path]]:
# f"Sequence '{seq_name}' skipped: missing modalities {missing_modalities}. "
# f"Root: {KITTI360_DATA_ROOT}"
# )
+ print("valid",valid_seqs)
return {"kitti360": valid_seqs}
def get_available_splits(self) -> List[str]:
@@ -244,7 +243,7 @@ def convert_kitti360_log_to_arrow(
return []
-def get_kitti360_camera_metadata() -> Dict[str, CameraMetadata]:
+def get_kitti360_camera_metadata() -> Dict[CameraType, CameraMetadata]:
persp = PATH_CALIB_ROOT / "perspective.txt"
@@ -265,7 +264,7 @@ def get_kitti360_camera_metadata() -> Dict[str, CameraMetadata]:
log_cam_infos: Dict[str, CameraMetadata] = {}
for cam_type, cam_name in KITTI360_CAMERA_TYPES.items():
- log_cam_infos[cam_type.serialize()] = CameraMetadata(
+ log_cam_infos[cam_type] = CameraMetadata(
camera_type=cam_type,
width=result[cam_name]["wh"][0],
height=result[cam_name]["wh"][1],
@@ -283,7 +282,7 @@ def _read_projection_matrix(p_line: str) -> np.ndarray:
K = P[:, :3]
return K
-def get_kitti360_lidar_metadata(log_name: str) -> Dict[LiDARType, LiDARMetadata]:
+def get_kitti360_lidar_metadata() -> Dict[LiDARType, LiDARMetadata]:
metadata: Dict[LiDARType, LiDARMetadata] = {}
cam2pose_txt = PATH_CALIB_ROOT / "calib_cam_to_pose.txt"
@@ -343,7 +342,12 @@ def _write_recording_table(
}
if data_converter_config.lidar_store_option is not None:
- row_data["lidar"] = [_extract_lidar(log_name, idx, data_converter_config)]
+ lidar_data_dict = _extract_lidar(log_name, idx, data_converter_config)
+ for lidar_type, lidar_data in lidar_data_dict.items():
+ if lidar_data is not None:
+ row_data[lidar_type.serialize()] = [lidar_data]
+ else:
+ row_data[lidar_type.serialize()] = [None]
if data_converter_config.camera_store_option is not None:
camera_data_dict = _extract_cameras(log_name, idx, data_converter_config)
@@ -363,7 +367,7 @@ def _write_recording_table(
recording_table = recording_table.sort_by([("timestamp", "ascending")])
write_arrow_table(recording_table, log_file_path)
-#TODO default timestamps
+#TODO default timestamps and Synchronization all other parts
def _read_timestamps(log_name: str) -> Optional[List[TimePoint]]:
# unix
ts_file = PATH_2D_RAW_ROOT / log_name / "image_01" / "timestamps.txt"
@@ -501,9 +505,9 @@ def _extract_detections(
return detections_states, detections_velocity, detections_tokens, detections_types
#TODO lidar extraction
-def _extract_lidar(log_name: str, idx: int, data_converter_config: DataConverterConfig) -> Optional[str]:
+def _extract_lidar(log_name: str, idx: int, data_converter_config: DataConverterConfig) -> Dict[LiDARType, Optional[str]]:
lidar: Optional[str] = None
- lidar_full_path = DIR_3D_RAW / log_name / "velodyne_points" / "data" / f"{idx:010d}.bin"
+ lidar_full_path = PATH_3D_RAW_ROOT / log_name / "velodyne_points" / "data" / f"{idx:010d}.bin"
if lidar_full_path.exists():
if data_converter_config.lidar_store_option == "path":
lidar = f"/data_3d_raw/{log_name}/velodyne_points/data/{idx:010d}.bin"
@@ -511,7 +515,7 @@ def _extract_lidar(log_name: str, idx: int, data_converter_config: DataConverter
raise NotImplementedError("Binary lidar storage is not implemented.")
else:
raise FileNotFoundError(f"LiDAR file not found: {lidar_full_path}")
- return {LiDARType.LIDAR_TOP: lidar} if lidar else None
+ return {LiDARType.LIDAR_TOP: lidar}
#TODO check camera extrinsic now is from camera to pose
def _extract_cameras(
diff --git a/d123/dataset/dataset_specific/kitti_360/kitti_360_helper.py b/d123/dataset/dataset_specific/kitti_360/kitti_360_helper.py
index da79cf3e..c86d9604 100644
--- a/d123/dataset/dataset_specific/kitti_360/kitti_360_helper.py
+++ b/d123/dataset/dataset_specific/kitti_360/kitti_360_helper.py
@@ -1,13 +1,13 @@
import numpy as np
from collections import defaultdict
-from labels import kittiId2label
from scipy.linalg import polar
from scipy.spatial.transform import Rotation as R
from d123.common.geometry.base import StateSE3
from d123.common.geometry.bounding_box.bounding_box import BoundingBoxSE3
+from d123.dataset.dataset_specific.kitti_360.labels import kittiId2label
DEFAULT_ROLL = 0.0
DEFAULT_PITCH = 0.0
@@ -93,8 +93,8 @@ def get_state_array(self):
x=self.T[0],
y=self.T[1],
z=self.T[2],
- roll=DEFAULT_ROLL,
- pitch=DEFAULT_PITCH,
+ roll=roll,
+ pitch=pitch,
yaw=yaw,
)
bounding_box_se3 = BoundingBoxSE3(center, scale[0], scale[1], scale[2])
From 2d129ee0df6980d219e529a57a60ba6e487a47d6 Mon Sep 17 00:00:00 2001
From: jbwang <1159270049@qq.com>
Date: Sun, 17 Aug 2025 12:27:05 +0800
Subject: [PATCH 005/145] add pose calibration to align with nuplan
---
d123/script/run_viser.py | 7 +++++--
1 file changed, 5 insertions(+), 2 deletions(-)
diff --git a/d123/script/run_viser.py b/d123/script/run_viser.py
index e682a96e..e977c669 100644
--- a/d123/script/run_viser.py
+++ b/d123/script/run_viser.py
@@ -19,10 +19,13 @@ def main(cfg: DictConfig) -> None:
worker = build_worker(cfg)
scene_filter = build_scene_filter(cfg.scene_filter)
+ logger.info(f"Scene filter: {scene_filter}")
+ logger.info(f"Using {cfg.scene_builder}")
+ scene_filter.duration_s = 50
scene_builder = build_scene_builder(cfg.scene_builder)
scenes = scene_builder.get_scenes(scene_filter, worker=worker)
-
- ViserVisualizationServer(scenes=scenes)
+ logger.info(f"Found {len(scenes)} scenes.")
+ ViserVisualizationServer(scenes=scenes,scene_index=0)
if __name__ == "__main__":
From 7dd70e6e0713628cb8cf19c9c282814ca04d2e5b Mon Sep 17 00:00:00 2001
From: jbwang <1159270049@qq.com>
Date: Sun, 17 Aug 2025 12:30:07 +0800
Subject: [PATCH 006/145] add pose calibration to align with nuplan
---
d123/dataset/arrow/conversion.py | 1 +
.../kitti_360/kitti_360_data_converter.py | 55 ++++--
jbwang_test.py | 43 +++--
jbwang_test2.py | 162 +++++++++++-------
notebooks/dataset/jbwang_test.py | 7 +-
5 files changed, 172 insertions(+), 96 deletions(-)
diff --git a/d123/dataset/arrow/conversion.py b/d123/dataset/arrow/conversion.py
index d9afba6f..69488545 100644
--- a/d123/dataset/arrow/conversion.py
+++ b/d123/dataset/arrow/conversion.py
@@ -33,6 +33,7 @@
DATASET_SENSOR_ROOT: Dict[str, Path] = {
"nuplan": Path(os.environ["NUPLAN_DATA_ROOT"]) / "nuplan-v1.1" / "sensor_blobs",
"carla": Path(os.environ["CARLA_DATA_ROOT"]) / "sensor_blobs",
+ "kitti360": Path(os.environ["KITTI360_DATA_ROOT"]),
}
diff --git a/d123/dataset/dataset_specific/kitti_360/kitti_360_data_converter.py b/d123/dataset/dataset_specific/kitti_360/kitti_360_data_converter.py
index 02dc0add..7e13b905 100644
--- a/d123/dataset/dataset_specific/kitti_360/kitti_360_data_converter.py
+++ b/d123/dataset/dataset_specific/kitti_360/kitti_360_data_converter.py
@@ -12,6 +12,7 @@
import xml.etree.ElementTree as ET
import pyarrow as pa
from PIL import Image
+import logging
from nuplan.planning.utils.multithreading.worker_utils import WorkerPool, worker_map
@@ -61,12 +62,11 @@
PATH_POSES_ROOT: Path = KITTI360_DATA_ROOT / DIR_POSES
PATH_CALIB_ROOT: Path = KITTI360_DATA_ROOT / DIR_CALIB
-#TODO check all paths
KITTI360_REQUIRED_MODALITY_ROOTS: Dict[str, Path] = {
DIR_2D_RAW: PATH_2D_RAW_ROOT,
DIR_3D_RAW: PATH_3D_RAW_ROOT,
- # DIR_3D_BBOX: PATH_3D_BBOX_ROOT,
- # DIR_POSES: PATH_POSES_ROOT,
+ DIR_POSES: PATH_POSES_ROOT,
+ DIR_3D_BBOX: PATH_3D_BBOX_ROOT / "train",
}
#TODO
@@ -79,6 +79,20 @@
"pedestrian": DetectionType.PEDESTRIAN,
}
+KITTI3602NUPLAN_IMU_CALIBRATION = np.array([
+ [1, 0, 0, 0],
+ [0, -1, 0, 0],
+ [0, 0, -1, 0],
+ [0, 0, 0, 1],
+ ], dtype=np.float64)
+
+KITTI3602NUPLAN_LIDAR_CALIBRATION = np.array([
+ [0, -1, 0, 0],
+ [1, 0, 0, 0],
+ [0, 0, 1, 0],
+ [0, 0, 0, 1],
+ ], dtype=np.float64)
+
def create_token(input_data: str) -> str:
# TODO: Refactor this function.
@@ -120,23 +134,30 @@ def _collect_log_paths(self) -> Dict[str, List[Path]]:
# Enumerate candidate sequences from data_2d_raw
candidates = sorted(p for p in PATH_2D_RAW_ROOT.iterdir() if p.is_dir() and p.name.endswith("_sync"))
+ def _has_modality(seq_name: str, modality_name: str, root: Path) -> bool:
+ if modality_name == DIR_3D_BBOX:
+ # expected: data_3d_bboxes/train/.xml
+ xml_path = root / f"{seq_name}.xml"
+ return xml_path.exists()
+ else:
+ return (root / seq_name).exists()
+
valid_seqs: List[Path] = []
for seq_dir in candidates:
seq_name = seq_dir.name
missing_modalities = [
modality_name
for modality_name, root in KITTI360_REQUIRED_MODALITY_ROOTS.items()
- if not (root / seq_name).exists()
+ if not _has_modality(seq_name, modality_name, root)
]
if not missing_modalities:
valid_seqs.append(seq_dir) #KITTI360_DATA_ROOT / DIR_2D_RAW /seq_name
- #TODO warnings
- # else:
- # warnings.warn(
- # f"Sequence '{seq_name}' skipped: missing modalities {missing_modalities}. "
- # f"Root: {KITTI360_DATA_ROOT}"
- # )
- print("valid",valid_seqs)
+ else:
+ logging.info(
+ f"Sequence '{seq_name}' skipped: missing modalities {missing_modalities}. "
+ f"Root: {KITTI360_DATA_ROOT}"
+ )
+ logging.info(f"vadid sequences found: {valid_seqs}")
return {"kitti360": valid_seqs}
def get_available_splits(self) -> List[str]:
@@ -144,7 +165,7 @@ def get_available_splits(self) -> List[str]:
return ["kitti360"]
def convert_maps(self, worker: WorkerPool) -> None:
- print("KITTI-360 does not provide standard maps. Skipping map conversion.")
+ logging.info("KITTI-360 does not provide standard maps. Skipping map conversion.")
return None
def convert_logs(self, worker: WorkerPool) -> None:
@@ -184,6 +205,7 @@ def convert_kitti360_log_to_arrow(
if not log_file_path.parent.exists():
log_file_path.parent.mkdir(parents=True, exist_ok=True)
+ #TODO location
metadata = LogMetadata(
dataset="kitti360",
log_name=log_name,
@@ -300,13 +322,17 @@ def get_kitti360_lidar_metadata() -> Dict[LiDARType, LiDARMetadata]:
values = list(map(float, image_00.strip().split()[1:]))
matrix = np.array(values).reshape(3, 4)
cam2pose = np.concatenate((matrix, lastrow))
+ cam2pose = KITTI3602NUPLAN_IMU_CALIBRATION @ cam2pose
cam2velo = np.concatenate((np.loadtxt(cam2velo_txt).reshape(3,4), lastrow))
+ cam2velo = KITTI3602NUPLAN_LIDAR_CALIBRATION @ cam2velo
+
extrinsic = cam2velo @ np.linalg.inv(cam2pose)
metadata[LiDARType.LIDAR_TOP] = LiDARMetadata(
lidar_type=LiDARType.LIDAR_TOP,
lidar_index=Kitti360LidarIndex,
+ #TODO extrinsic needed to be same with nuplan
extrinsic=extrinsic,
)
return metadata
@@ -367,7 +393,7 @@ def _write_recording_table(
recording_table = recording_table.sort_by([("timestamp", "ascending")])
write_arrow_table(recording_table, log_file_path)
-#TODO default timestamps and Synchronization all other parts
+#TODO default timestamps and Synchronization all other sequences
def _read_timestamps(log_name: str) -> Optional[List[TimePoint]]:
# unix
ts_file = PATH_2D_RAW_ROOT / log_name / "image_01" / "timestamps.txt"
@@ -504,7 +530,7 @@ def _extract_detections(
return detections_states, detections_velocity, detections_tokens, detections_types
-#TODO lidar extraction
+#TODO lidar extraction now only velo
def _extract_lidar(log_name: str, idx: int, data_converter_config: DataConverterConfig) -> Dict[LiDARType, Optional[str]]:
lidar: Optional[str] = None
lidar_full_path = PATH_3D_RAW_ROOT / log_name / "velodyne_points" / "data" / f"{idx:010d}.bin"
@@ -541,6 +567,7 @@ def _extract_cameras(
values = list(map(float, parts[1:]))
matrix = np.array(values).reshape(3, 4)
cam2pose = np.concatenate((matrix, lastrow))
+ cam2pose = KITTI3602NUPLAN_IMU_CALIBRATION @ cam2pose
if data_converter_config.camera_store_option == "path":
camera_data = str(img_path_png), cam2pose.flatten().tolist()
diff --git a/jbwang_test.py b/jbwang_test.py
index ff320df9..e42f512a 100644
--- a/jbwang_test.py
+++ b/jbwang_test.py
@@ -68,18 +68,31 @@
# print(a)ee
-import numpy as np
-from pathlib import Path
-a = np.loadtxt("/data/jbwang/d123/data_poses/2013_05_28_drive_0000_sync/oxts/data/0000000000.txt")
-b = np.loadtxt("/nas/datasets/KITTI-360/data_poses/2013_05_28_drive_0018_sync/poses.txt")
-data = b
-ts = data[:, 0].astype(np.int32)
-poses = np.reshape(data[:, 1:], (-1, 3, 4))
-poses = np.concatenate((poses, np.tile(np.array([0, 0, 0, 1]).reshape(1,1,4),(poses.shape[0],1,1))), 1)
-print(a)
-print(b.shape)
-print(ts.shape)
-print(poses.shape)
-
-ccc = Path("/data/jbwang/d123/data_poses/2013_05_28_drive_0000_sync/oxts/data/")
-print(len(list(ccc.glob("*.txt"))))
\ No newline at end of file
+# import numpy as np
+# from pathlib import Path
+# a = np.loadtxt("/data/jbwang/d123/data_poses/2013_05_28_drive_0000_sync/oxts/data/0000000000.txt")
+# b = np.loadtxt("/nas/datasets/KITTI-360/data_poses/2013_05_28_drive_0018_sync/poses.txt")
+# data = b
+# ts = data[:, 0].astype(np.int32)
+# poses = np.reshape(data[:, 1:], (-1, 3, 4))
+# poses = np.concatenate((poses, np.tile(np.array([0, 0, 0, 1]).reshape(1,1,4),(poses.shape[0],1,1))), 1)
+# print(a)
+# print(b.shape)
+# print(ts.shape)
+# print(poses.shape)
+
+# ccc = Path("/data/jbwang/d123/data_poses/2013_05_28_drive_0000_sync/oxts/data/")
+# print(len(list(ccc.glob("*.txt"))))
+
+
+
+
+from d123.dataset.dataset_specific.nuplan.nuplan_data_converter import convert_nuplan_map_to_gpkg
+
+from d123.dataset.dataset_specific.raw_data_converter import DataConverterConfig
+
+MAP_LOCATIONS = {"sg-one-north", "us-ma-boston", "us-nv-las-vegas-strip", "us-pa-pittsburgh-hazelwood"}
+maps = list(MAP_LOCATIONS)
+
+data_conveter_config = DataConverterConfig(output_path = "/nas/datasets/nuplan/maps")
+convert_nuplan_map_to_gpkg(maps,data_conveter_config)
\ No newline at end of file
diff --git a/jbwang_test2.py b/jbwang_test2.py
index b1229356..b406c52c 100644
--- a/jbwang_test2.py
+++ b/jbwang_test2.py
@@ -1,70 +1,104 @@
-# import numpy as np
-# import pickle
+# # import numpy as np
+# # import pickle
+
+# # # path = "/nas/datasets/KITTI-360/data_3d_raw/2013_05_28_drive_0000_sync/velodyne_points/data/0000000000.bin"
+# # # a = np.fromfile(path, dtype=np.float32)
+
+# # # print(a.shape)
+# # # print(a[:10])
+
+# # # path2 = "/nas/datasets/KITTI-360/calibration/calib_cam_to_pose.txt"
+# # # c = np.loadtxt(path2)
+# # # print(c)
+
+# # import open3d as o3d
+# # import numpy as np
+
+# # def read_ply_file(file_path):
+# # # 读取 PLY 文件
+# # pcd = o3d.io.read_point_cloud(file_path)
+# # print(len(pcd.points), len(pcd.colors))
+# # # 提取顶点信息
+# # points = np.asarray(pcd.points) # x, y, z
+# # colors = np.asarray(pcd.colors) # red, green, blue
+# # # semantics = np.asarray(pcd.semantic) # semanticID, instanceID, isVisible, confidence
+
+# # # 将所有信息合并到一个数组中
+# # vertices = np.hstack((points, colors))
-# # path = "/nas/datasets/KITTI-360/data_3d_raw/2013_05_28_drive_0000_sync/velodyne_points/data/0000000000.bin"
-# # a = np.fromfile(path, dtype=np.float32)
+# # return vertices
-# # print(a.shape)
-# # print(a[:10])
+# # # 示例用法
+# # file_path = '/nas/datasets/KITTI-360/data_3d_semantics/train/2013_05_28_drive_0000_sync/static/0000000002_0000000385.ply' # 替换为你的 PLY 文件路径
+# # vertices = read_ply_file(file_path)
-# # path2 = "/nas/datasets/KITTI-360/calibration/calib_cam_to_pose.txt"
-# # c = np.loadtxt(path2)
-# # print(c)
+# # # 打印前几个顶点信息
+# # print("顶点信息 (前5个顶点):")
+# # print(vertices[:5])
-# import open3d as o3d
# import numpy as np
+# from scipy.linalg import polar
+# from scipy.spatial.transform import Rotation as R
+
+# def polar_decompose_rotation_scale(A: np.ndarray):
+# """
+# A: 3x3 (含旋转+缩放+剪切)
+# 返回:
+# Rm: 纯旋转
+# Sm: 对称正定 (缩放+剪切)
+# scale: 近似轴缩放(从 Sm 特征值开方或对角提取;若存在剪切需谨慎)
+# yaw,pitch,roll: 使用 ZYX 序列 (常对应 yaw(Z), pitch(Y), roll(X))
+# """
+# Rm, Sm = polar(A) # A = Rm @ Sm
+# # 近似各向缩放(若无剪切):
+# scale = np.diag(Sm)
+# # 欧拉角
+# yaw, pitch, roll = R.from_matrix(Rm).as_euler('zyx', degrees=False)
+# return {
+# "R": Rm,
+# "S": Sm,
+# "scale_diag": scale,
+# "yaw_pitch_roll": (yaw, pitch, roll),
+# }
+
+# M = np.array([
+# [-3.97771668e+00, -1.05715942e+00,-2.18206085e-02],
+# [2.43555284e+00, -1.72707462e+00, -1.03932284e-02],
+# [-4.41359095e-02, -2.94448305e-02, 1.39303744e+00],
+# ])
+# out = polar_decompose_rotation_scale(M)
+# print(out)
+
+
+import glob
+import os
+import cv2
+
+def to_video(folder_path, fps=15, downsample=2):
+ imgs_path = glob.glob(os.path.join(folder_path, '*png*'))
+ # imgs_path = sorted(imgs_path)[:19]
+ imgs_path = sorted(imgs_path)[:700:1]
+ img_array = []
+ for img_path in imgs_path:
+ img = cv2.imread(img_path)
+ height, width, channel = img.shape
+ img = cv2.resize(img, (width // downsample, height //
+ downsample), interpolation=cv2.INTER_AREA)
+ height, width, channel = img.shape
+ size = (width, height)
+ img_array.append(img)
+
+ # media.write_video(os.path.join(folder_path, 'video.mp4'), img_array, fps=10)
+ mp4_path = os.path.join("/data/jbwang/d123/video/", 'video_one_episode.mp4')
+ if os.path.exists(mp4_path):
+ os.remove(mp4_path)
+ out = cv2.VideoWriter(
+ mp4_path,
+ cv2.VideoWriter_fourcc(*'DIVX'), fps, size
+ )
+ for i in range(len(img_array)):
+ out.write(img_array[i])
+ out.release()
+
+to_video("/nas/datasets/KITTI-360/2013_05_28_drive_0000_sync/image_00/data_rect/")
-# def read_ply_file(file_path):
-# # 读取 PLY 文件
-# pcd = o3d.io.read_point_cloud(file_path)
-# print(len(pcd.points), len(pcd.colors))
-# # 提取顶点信息
-# points = np.asarray(pcd.points) # x, y, z
-# colors = np.asarray(pcd.colors) # red, green, blue
-# # semantics = np.asarray(pcd.semantic) # semanticID, instanceID, isVisible, confidence
-
-# # 将所有信息合并到一个数组中
-# vertices = np.hstack((points, colors))
-
-# return vertices
-
-# # 示例用法
-# file_path = '/nas/datasets/KITTI-360/data_3d_semantics/train/2013_05_28_drive_0000_sync/static/0000000002_0000000385.ply' # 替换为你的 PLY 文件路径
-# vertices = read_ply_file(file_path)
-
-# # 打印前几个顶点信息
-# print("顶点信息 (前5个顶点):")
-# print(vertices[:5])
-
-import numpy as np
-from scipy.linalg import polar
-from scipy.spatial.transform import Rotation as R
-
-def polar_decompose_rotation_scale(A: np.ndarray):
- """
- A: 3x3 (含旋转+缩放+剪切)
- 返回:
- Rm: 纯旋转
- Sm: 对称正定 (缩放+剪切)
- scale: 近似轴缩放(从 Sm 特征值开方或对角提取;若存在剪切需谨慎)
- yaw,pitch,roll: 使用 ZYX 序列 (常对应 yaw(Z), pitch(Y), roll(X))
- """
- Rm, Sm = polar(A) # A = Rm @ Sm
- # 近似各向缩放(若无剪切):
- scale = np.diag(Sm)
- # 欧拉角
- yaw, pitch, roll = R.from_matrix(Rm).as_euler('zyx', degrees=False)
- return {
- "R": Rm,
- "S": Sm,
- "scale_diag": scale,
- "yaw_pitch_roll": (yaw, pitch, roll),
- }
-
-M = np.array([
- [-3.97771668e+00, -1.05715942e+00,-2.18206085e-02],
- [2.43555284e+00, -1.72707462e+00, -1.03932284e-02],
- [-4.41359095e-02, -2.94448305e-02, 1.39303744e+00],
-])
-out = polar_decompose_rotation_scale(M)
-print(out)
\ No newline at end of file
diff --git a/notebooks/dataset/jbwang_test.py b/notebooks/dataset/jbwang_test.py
index 0996734b..c2cabfbe 100644
--- a/notebooks/dataset/jbwang_test.py
+++ b/notebooks/dataset/jbwang_test.py
@@ -1,7 +1,8 @@
# s3_uri = "/data/jbwang/d123/data/nuplan_mini_train/2021.10.11.07.12.18_veh-50_00211_00304.arrow"
-s3_uri = "/data/jbwang/d123/data/nuplan_private_test/2021.09.22.13.20.34_veh-28_01446_01583.arrow"
+# s3_uri = "/data/jbwang/d123/data/nuplan_private_test/2021.09.22.13.20.34_veh-28_01446_01583.arrow"
# s3_uri = "/data/jbwang/d123/data/carla/_Rep0_routes_validation1_route0_07_23_14_33_15.arrow"
# s3_uri = "/data/jbwang/d123/data/nuplan_mini_val/2021.06.07.12.54.00_veh-35_01843_02314.arrow"
+s3_uri = "/data/jbwang/d123/data2/kitti360_c2e_train/2013_05_28_drive_0000_sync_c2e.arrow"
import pyarrow as pa
import pyarrow.fs as fs
@@ -34,9 +35,9 @@
if col == "lidar":
continue
print(f"Column : {col}, Type: {table.schema.field(col).type}")
- # tokens = table[col] # 或 table.column("token")
+ tokens = table[col] # 或 table.column("token")
# print(tokens)
- # print(len(tokens))
+ print(len(tokens))
# print(tokens.slice(0, 100).to_pylist())
# print(table["traffic_light_ids"])
timer.log("3. Table created")
From 7110af5ef464912c1fe51a673c503f8a293a9d79 Mon Sep 17 00:00:00 2001
From: jbwang <1159270049@qq.com>
Date: Mon, 18 Aug 2025 12:37:43 +0800
Subject: [PATCH 007/145] finish dynamic car and static car remains some
bug(start and end frame)
---
.../kitti_360/kitti_360_data_converter.py | 75 ++++++++++++++----
.../kitti_360/kitti_360_helper.py | 58 +++++++++++---
.../default_dataset_conversion.yaml | 4 -
d123/script/run_viser.py | 9 +--
jbwang_test2.py | 79 +++++++++++--------
5 files changed, 159 insertions(+), 66 deletions(-)
diff --git a/d123/dataset/dataset_specific/kitti_360/kitti_360_data_converter.py b/d123/dataset/dataset_specific/kitti_360/kitti_360_data_converter.py
index 7e13b905..efc0bdf2 100644
--- a/d123/dataset/dataset_specific/kitti_360/kitti_360_data_converter.py
+++ b/d123/dataset/dataset_specific/kitti_360/kitti_360_data_converter.py
@@ -7,6 +7,7 @@
from typing import Any, Dict, Final, List, Optional, Tuple, Union
import numpy as np
+from collections import defaultdict
import datetime
import hashlib
import xml.etree.ElementTree as ET
@@ -69,7 +70,7 @@
DIR_3D_BBOX: PATH_3D_BBOX_ROOT / "train",
}
-#TODO
+#TODO now only parts of labels are used
KIITI360_DETECTION_NAME_DICT = {
"truck": DetectionType.VEHICLE,
"bus": DetectionType.VEHICLE,
@@ -332,7 +333,6 @@ def get_kitti360_lidar_metadata() -> Dict[LiDARType, LiDARMetadata]:
metadata[LiDARType.LIDAR_TOP] = LiDARMetadata(
lidar_type=LiDARType.LIDAR_TOP,
lidar_index=Kitti360LidarIndex,
- #TODO extrinsic needed to be same with nuplan
extrinsic=extrinsic,
)
return metadata
@@ -345,8 +345,11 @@ def _write_recording_table(
) -> None:
ts_list = _read_timestamps(log_name)
- ego_state_all = _extract_ego_state_all(log_name)
+ #TODO
+ print("extracting detections...")
detections_states,detections_velocity,detections_tokens,detections_types = _extract_detections(log_name,len(ts_list))
+ print("extracting states...")
+ ego_state_all = _extract_ego_state_all(log_name)
with pa.OSFile(str(log_file_path), "wb") as sink:
with pa.ipc.new_file(sink, recording_schema) as writer:
@@ -437,6 +440,7 @@ def _extract_ego_state_all(log_name: str) -> List[List[float]]:
oxts_path_file = oxts_path / f"{int(idx):010d}.txt"
oxts_data = np.loadtxt(oxts_path_file)
+ #TODO check roll, pitch, yaw
roll, pitch, yaw = oxts_data[3:6]
vehicle_parameters = get_kitti360_station_wagon_parameters()
@@ -479,7 +483,7 @@ def _extract_ego_state_all(log_name: str) -> List[List[float]]:
)
return ego_state_all
-#TODO now only divided by data_3d_semantics
+#TODO
# We may distinguish between image and lidar detections
# besides, now it is based only on start and end frame
def _extract_detections(
@@ -499,6 +503,18 @@ def _extract_detections(
tree = ET.parse(bbox_3d_path)
root = tree.getroot()
+ dynamic_groups: Dict[int, List[KITTI360Bbox3D]] = defaultdict(list)
+
+ lidra_data_all = []
+ for index in range(ts_len):
+ lidar_full_path = PATH_3D_RAW_ROOT / log_name / "velodyne_points" / "data" / f"{index:010d}.bin"
+ if not lidar_full_path.exists():
+ logging.warning(f"LiDAR file not found for frame {index}: {lidar_full_path}")
+ continue
+ lidar_data = np.fromfile(lidar_full_path, dtype=np.float32)
+ lidar_data = lidar_data.reshape(-1, 4)[:, :3] # Keep only x, y, z coordinates
+ lidra_data_all.append(lidar_data)
+
for child in root:
label = child.find('label').text
if child.find('transform') is None or label not in KIITI360_DETECTION_NAME_DICT.keys():
@@ -506,27 +522,57 @@ def _extract_detections(
obj = KITTI360Bbox3D()
obj.parseBbox(child)
- # static
+ #static object
if obj.timestamp == -1:
start_frame = obj.start_frame
end_frame = obj.end_frame
for frame in range(start_frame, end_frame + 1):
- #TODO check if valid in each frame
- if frame < 0 or frame >= ts_len:
- continue
- #TODO check yaw
+ lidar_data = lidra_data_all[frame]
+ #TODO check yaw and box visible
+ # if obj.box_visible_in_point_cloud(lidar_data):
detections_states[frame].append(obj.get_state_array())
detections_velocity[frame].append([0.0, 0.0, 0.0])
detections_tokens[frame].append(str(obj.globalID))
- detections_types[frame].append(int(KIITI360_DETECTION_NAME_DICT[label]))
- # dynamic
+ detections_types[frame].append(int(KIITI360_DETECTION_NAME_DICT[obj.label]))
else:
+ ann_id = obj.annotationId
+ dynamic_groups[ann_id].append(obj)
+
+ # dynamic object
+ for ann_id, obj_list in dynamic_groups.items():
+ obj_list.sort(key=lambda obj: obj.timestamp)
+ num_frames = len(obj_list)
+
+ positions = [obj.get_state_array()[:3] for obj in obj_list]
+ timestamps = [int(obj.timestamp) for obj in obj_list]
+
+ velocities = []
+
+ for i in range(1, num_frames - 1):
+ dt_frames = timestamps[i+1] - timestamps[i-1]
+ if dt_frames > 0:
+ dt = dt_frames * KITTI360_DT
+ vel = (positions[i+1] - positions[i-1]) / dt
+ # Transform velocity to the ego frame
+ vel = obj_list[i].Rm.T @ vel
+ else:
+ vel = np.zeros(3)
+ velocities.append(vel)
+
+ if num_frames > 1:
+ # first and last frame
+ velocities.insert(0, velocities[0])
+ velocities.append(velocities[-1])
+ elif num_frames == 1:
+ velocities.append(np.zeros(3))
+
+ for obj, vel in zip(obj_list, velocities):
frame = obj.timestamp
detections_states[frame].append(obj.get_state_array())
- #TODO velocity not provided
- detections_velocity[frame].append([0.0, 0.0, 0.0])
+ detections_velocity[frame].append(vel)
detections_tokens[frame].append(str(obj.globalID))
- detections_types[frame].append(int(KIITI360_DETECTION_NAME_DICT[label]))
+ detections_types[frame].append(int(KIITI360_DETECTION_NAME_DICT[obj.label]))
+
return detections_states, detections_velocity, detections_tokens, detections_types
@@ -543,7 +589,6 @@ def _extract_lidar(log_name: str, idx: int, data_converter_config: DataConverter
raise FileNotFoundError(f"LiDAR file not found: {lidar_full_path}")
return {LiDARType.LIDAR_TOP: lidar}
-#TODO check camera extrinsic now is from camera to pose
def _extract_cameras(
log_name: str, idx: int, data_converter_config: DataConverterConfig
) -> Dict[CameraType, Optional[str]]:
diff --git a/d123/dataset/dataset_specific/kitti_360/kitti_360_helper.py b/d123/dataset/dataset_specific/kitti_360/kitti_360_helper.py
index c86d9604..dc1d10cf 100644
--- a/d123/dataset/dataset_specific/kitti_360/kitti_360_helper.py
+++ b/d123/dataset/dataset_specific/kitti_360/kitti_360_helper.py
@@ -47,6 +47,9 @@ def __init__(self):
# name
self.name = ''
+
+ #label
+ self.label = ''
def parseOpencvMatrix(self, node):
rows = int(node.find('rows').text)
@@ -75,28 +78,63 @@ def parseBbox(self, child):
self.annotationId = int(child.find('index').text) + 1
+ self.label = child.find('label').text
+
self.globalID = local2global(self.semanticId, self.instanceId)
+ self.parseVertices(child)
+ self.parse_scale_rotation()
+
+ def parseVertices(self, child):
transform = self.parseOpencvMatrix(child.find('transform'))
- self.R = transform[:3,:3]
- self.T = transform[:3,3]
+ R = transform[:3,:3]
+ T = transform[:3,3]
+ vertices = self.parseOpencvMatrix(child.find('vertices'))
+
+ vertices = np.matmul(R, vertices.transpose()).transpose() + T
+ self.vertices = vertices
+
+ self.R = R
+ self.T = T
- def polar_decompose_rotation_scale(self):
+ def parse_scale_rotation(self):
Rm, Sm = polar(self.R)
scale = np.diag(Sm)
yaw, pitch, roll = R.from_matrix(Rm).as_euler('zyx', degrees=False)
- return scale, (yaw, pitch, roll)
-
+ self.Rm = np.array(Rm)
+ self.scale = scale
+ self.yaw = yaw
+ self.pitch = pitch
+ self.roll = roll
+
+ # self.pose = np.eye(4, dtype=np.float64)
+ # self.pose[:3, :3] = self.Rm
+ # self.pose[:3, 3] = self.T
+ # self.w2e = np.linalg.inv(self.pose)
+
def get_state_array(self):
- scale, (yaw, pitch, roll) = self.polar_decompose_rotation_scale()
center = StateSE3(
x=self.T[0],
y=self.T[1],
z=self.T[2],
- roll=roll,
- pitch=pitch,
- yaw=yaw,
+ roll=self.roll,
+ pitch=self.pitch,
+ yaw=self.yaw,
)
+ scale = self.scale
bounding_box_se3 = BoundingBoxSE3(center, scale[0], scale[1], scale[2])
- return bounding_box_se3.array
\ No newline at end of file
+ return bounding_box_se3.array
+
+ def box_visible_in_point_cloud(self, points):
+ # points: (N,3) , box: (8,3)
+ box = self.vertices
+ O, A, B, C = box[0], box[1], box[2], box[5]
+ OA = A - O
+ OB = B - O
+ OC = C - O
+ POA, POB, POC = (points @ OA[..., None])[:, 0], (points @ OB[..., None])[:, 0], (points @ OC[..., None])[:, 0]
+ mask = (np.dot(O, OA) < POA) & (POA < np.dot(A, OA)) & \
+ (np.dot(O, OB) < POB) & (POB < np.dot(B, OB)) & \
+ (np.dot(O, OC) < POC) & (POC < np.dot(C, OC))
+ return True if np.sum(mask) > 100 else False
\ No newline at end of file
diff --git a/d123/script/config/dataset_conversion/default_dataset_conversion.yaml b/d123/script/config/dataset_conversion/default_dataset_conversion.yaml
index 97ca3a7a..e1c76c60 100644
--- a/d123/script/config/dataset_conversion/default_dataset_conversion.yaml
+++ b/d123/script/config/dataset_conversion/default_dataset_conversion.yaml
@@ -15,12 +15,8 @@ defaults:
- default_dataset_paths
- _self_
- datasets:
-<<<<<<< HEAD
- kitti360_dataset
# - nuplan_private_dataset
-=======
- - nuplan_private_dataset
->>>>>>> dev_v0.0.6
# - carla_dataset
# - wopd_dataset
diff --git a/d123/script/run_viser.py b/d123/script/run_viser.py
index e977c669..faaf08ca 100644
--- a/d123/script/run_viser.py
+++ b/d123/script/run_viser.py
@@ -19,14 +19,11 @@ def main(cfg: DictConfig) -> None:
worker = build_worker(cfg)
scene_filter = build_scene_filter(cfg.scene_filter)
- logger.info(f"Scene filter: {scene_filter}")
- logger.info(f"Using {cfg.scene_builder}")
- scene_filter.duration_s = 50
scene_builder = build_scene_builder(cfg.scene_builder)
scenes = scene_builder.get_scenes(scene_filter, worker=worker)
- logger.info(f"Found {len(scenes)} scenes.")
- ViserVisualizationServer(scenes=scenes,scene_index=0)
+
+ ViserVisualizationServer(scenes=scenes)
if __name__ == "__main__":
- main()
+ main()
\ No newline at end of file
diff --git a/jbwang_test2.py b/jbwang_test2.py
index b406c52c..aa685428 100644
--- a/jbwang_test2.py
+++ b/jbwang_test2.py
@@ -69,36 +69,53 @@
# out = polar_decompose_rotation_scale(M)
# print(out)
+# import numpy as np
+# path = "/nas/datasets/KITTI-360/data_3d_raw/2013_05_28_drive_0000_sync/velodyne_points/data/0000000000.bin"
+# a = np.fromfile(path, dtype=np.float32)
+# a = a.reshape((-1,4))
+# print(a[10000:10010,:3])
-import glob
-import os
-import cv2
-
-def to_video(folder_path, fps=15, downsample=2):
- imgs_path = glob.glob(os.path.join(folder_path, '*png*'))
- # imgs_path = sorted(imgs_path)[:19]
- imgs_path = sorted(imgs_path)[:700:1]
- img_array = []
- for img_path in imgs_path:
- img = cv2.imread(img_path)
- height, width, channel = img.shape
- img = cv2.resize(img, (width // downsample, height //
- downsample), interpolation=cv2.INTER_AREA)
- height, width, channel = img.shape
- size = (width, height)
- img_array.append(img)
-
- # media.write_video(os.path.join(folder_path, 'video.mp4'), img_array, fps=10)
- mp4_path = os.path.join("/data/jbwang/d123/video/", 'video_one_episode.mp4')
- if os.path.exists(mp4_path):
- os.remove(mp4_path)
- out = cv2.VideoWriter(
- mp4_path,
- cv2.VideoWriter_fourcc(*'DIVX'), fps, size
- )
- for i in range(len(img_array)):
- out.write(img_array[i])
- out.release()
-
-to_video("/nas/datasets/KITTI-360/2013_05_28_drive_0000_sync/image_00/data_rect/")
+import gc
+import json
+import os
+from dataclasses import asdict
+from functools import partial
+from pathlib import Path
+from typing import Any, Dict, Final, List, Optional, Tuple, Union
+
+import numpy as np
+from collections import defaultdict
+import datetime
+import hashlib
+import xml.etree.ElementTree as ET
+import pyarrow as pa
+from PIL import Image
+import logging
+
+from d123.common.datatypes.detection.detection_types import DetectionType
+from d123.dataset.dataset_specific.kitti_360.kitti_360_helper import KITTI360Bbox3D
+
+
+bbox_3d_path = Path("/nas/datasets/KITTI-360/data_3d_bboxes/train/2013_05_28_drive_0000_sync.xml")
+
+tree = ET.parse(bbox_3d_path)
+root = tree.getroot()
+
+KIITI360_DETECTION_NAME_DICT = {
+ "truck": DetectionType.VEHICLE,
+ "bus": DetectionType.VEHICLE,
+ "car": DetectionType.VEHICLE,
+ "motorcycle": DetectionType.BICYCLE,
+ "bicycle": DetectionType.BICYCLE,
+ "pedestrian": DetectionType.PEDESTRIAN,
+}
+
+for child in root:
+ label = child.find('label').text
+ if child.find('transform') is None or label not in KIITI360_DETECTION_NAME_DICT.keys():
+ continue
+ obj = KITTI360Bbox3D()
+ obj.parseBbox(child)
+ # print(obj.Rm)
+ # print(Sigma)
\ No newline at end of file
From 778604d4ace9cd9bbc3e27445d3a5f0449786426 Mon Sep 17 00:00:00 2001
From: jbwang <1159270049@qq.com>
Date: Tue, 19 Aug 2025 15:23:08 +0800
Subject: [PATCH 008/145] nearly done kitti_360 but remains some questions
---
.../config/dataset_conversion/default_dataset_conversion.yaml | 2 +-
d123/script/run_viser.py | 2 +-
2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/d123/script/config/dataset_conversion/default_dataset_conversion.yaml b/d123/script/config/dataset_conversion/default_dataset_conversion.yaml
index d8fa5988..52915f13 100644
--- a/d123/script/config/dataset_conversion/default_dataset_conversion.yaml
+++ b/d123/script/config/dataset_conversion/default_dataset_conversion.yaml
@@ -15,11 +15,11 @@ defaults:
- default_dataset_paths
- _self_
- datasets:
- - kitti360_dataset
# - nuplan_private_dataset
# - carla_dataset
# - wopd_dataset
# - av2_sensor_dataset
+ - kitti360_dataset
force_log_conversion: True
force_map_conversion: False
diff --git a/d123/script/run_viser.py b/d123/script/run_viser.py
index faaf08ca..8973acea 100644
--- a/d123/script/run_viser.py
+++ b/d123/script/run_viser.py
@@ -21,7 +21,7 @@ def main(cfg: DictConfig) -> None:
scene_filter = build_scene_filter(cfg.scene_filter)
scene_builder = build_scene_builder(cfg.scene_builder)
scenes = scene_builder.get_scenes(scene_filter, worker=worker)
-
+
ViserVisualizationServer(scenes=scenes)
From 94bc3f420ab5ba7486556e8aa72e30786e25f0db Mon Sep 17 00:00:00 2001
From: jbwang <1159270049@qq.com>
Date: Tue, 19 Aug 2025 15:24:35 +0800
Subject: [PATCH 009/145] nearly done kitti_360 but remains some questions
---
.gitignore | 3 +-
.../dataset_specific/kitti_360/jbwang_test.py | 5 +-
.../kitti_360/kitti_360_data_converter.py | 121 ++++++------
.../kitti_360/kitti_360_helper.py | 14 +-
docs/datasets/kitti-360.rst | 10 +-
jbwang_test2.py | 27 ++-
notebooks/dataset/jbwang_test.py | 14 +-
notebooks/gym/jbwang_test.py | 180 ++++++++++++++++++
8 files changed, 302 insertions(+), 72 deletions(-)
create mode 100644 notebooks/gym/jbwang_test.py
diff --git a/.gitignore b/.gitignore
index 3a820809..426cc468 100644
--- a/.gitignore
+++ b/.gitignore
@@ -23,8 +23,7 @@
*.csv
*.log
*.mp4
-exp/*
-
+exp/
# Sphinx documentation
docs/_build/
diff --git a/d123/dataset/dataset_specific/kitti_360/jbwang_test.py b/d123/dataset/dataset_specific/kitti_360/jbwang_test.py
index 6f0bdbd9..e480783e 100644
--- a/d123/dataset/dataset_specific/kitti_360/jbwang_test.py
+++ b/d123/dataset/dataset_specific/kitti_360/jbwang_test.py
@@ -21,7 +21,7 @@
from sqlalchemy import func
-from kitti_360_data_converter import _extract_ego_state_all,get_kitti360_lidar_metadata,_extract_cameras,_extract_detections
+from kitti_360_data_converter import _extract_ego_state_all,get_kitti360_lidar_metadata,_extract_cameras,_extract_detections,_read_timestamps
# a = _extract_ego_state_all("2013_05_28_drive_0000_sync")
# print(a[0])
@@ -151,4 +151,5 @@ def get_cam_info_from_lidar_pc(log,log_file, lidar_pc, rolling_shutter_s=1/60):
# # camera_data = _extract_camera(log_db, lidar_pc, log_path)
# camera_data = get_cam_info_from_lidar_pc(log_db,log_path, lidar_pc, rolling_shutter_s=1/60)
# print(_extract_cameras("2013_05_28_drive_0000_sync",0))
- _extract_detections("2013_05_28_drive_0000_sync", 0)
\ No newline at end of file
+ # _extract_detections("2013_05_28_drive_0000_sync", 0)
+ print(_read_timestamps("2013_05_28_drive_0000_sync"))
\ No newline at end of file
diff --git a/d123/dataset/dataset_specific/kitti_360/kitti_360_data_converter.py b/d123/dataset/dataset_specific/kitti_360/kitti_360_data_converter.py
index efc0bdf2..81057042 100644
--- a/d123/dataset/dataset_specific/kitti_360/kitti_360_data_converter.py
+++ b/d123/dataset/dataset_specific/kitti_360/kitti_360_data_converter.py
@@ -37,7 +37,7 @@
KITTI360_DATA_ROOT = Path(os.environ["KITTI360_DATA_ROOT"])
-#TODO carera mismatch
+#TODO cameraType
KITTI360_CAMERA_TYPES = {
CameraType.CAM_L0: "image_00",
CameraType.CAM_R0: "image_01",
@@ -55,6 +55,7 @@
DIR_CALIB = "calibration"
#TODO PATH_2D_RAW_ROOT
+# PATH_2D_RAW_ROOT: Path = KITTI360_DATA_ROOT / DIR_2D_RAW
PATH_2D_RAW_ROOT: Path = KITTI360_DATA_ROOT
PATH_2D_SMT_ROOT: Path = KITTI360_DATA_ROOT / DIR_2D_SMT
PATH_3D_RAW_ROOT: Path = KITTI360_DATA_ROOT / DIR_3D_RAW
@@ -206,13 +207,12 @@ def convert_kitti360_log_to_arrow(
if not log_file_path.parent.exists():
log_file_path.parent.mkdir(parents=True, exist_ok=True)
- #TODO location
metadata = LogMetadata(
dataset="kitti360",
log_name=log_name,
- location="None",
+ location=None,
timestep_seconds=KITTI360_DT,
- map_has_z=False,
+ map_has_z=True,
)
vehicle_parameters = get_kitti360_station_wagon_parameters()
@@ -345,11 +345,9 @@ def _write_recording_table(
) -> None:
ts_list = _read_timestamps(log_name)
- #TODO
- print("extracting detections...")
- detections_states,detections_velocity,detections_tokens,detections_types = _extract_detections(log_name,len(ts_list))
- print("extracting states...")
ego_state_all = _extract_ego_state_all(log_name)
+ ego_states_xyz = np.array([ego_state[:3] for ego_state in ego_state_all],dtype=np.float64)
+ detections_states,detections_velocity,detections_tokens,detections_types = _extract_detections(log_name,len(ts_list),ego_states_xyz)
with pa.OSFile(str(log_file_path), "wb") as sink:
with pa.ipc.new_file(sink, recording_schema) as writer:
@@ -364,7 +362,6 @@ def _write_recording_table(
"detections_type": [detections_types[idx]],
"ego_states": [ego_state_all[idx]],
"traffic_light_ids": [[]],
- #may TODO traffic light types
"traffic_light_types": [[]],
"scenario_tag": [['unknown']],
"route_lane_group_ids": [[]],
@@ -391,36 +388,44 @@ def _write_recording_table(
batch = pa.record_batch(row_data, schema=recording_schema)
writer.write_batch(batch)
+ del batch
+
if SORT_BY_TIMESTAMP:
recording_table = open_arrow_table(log_file_path)
recording_table = recording_table.sort_by([("timestamp", "ascending")])
write_arrow_table(recording_table, log_file_path)
-#TODO default timestamps and Synchronization all other sequences
+#TODO Synchronization all other sequences)
def _read_timestamps(log_name: str) -> Optional[List[TimePoint]]:
# unix
- ts_file = PATH_2D_RAW_ROOT / log_name / "image_01" / "timestamps.txt"
- if ts_file.exists():
- tps: List[TimePoint] = []
- with open(ts_file, "r") as f:
- for line in f:
- s = line.strip()
- if not s:
- continue
- dt_str, ns_str = s.split('.')
- dt_obj = datetime.datetime.strptime(dt_str, "%Y-%m-%d %H:%M:%S")
- dt_obj = dt_obj.replace(tzinfo=datetime.timezone.utc)
- unix_epoch = datetime.datetime(1970, 1, 1, tzinfo=datetime.timezone.utc)
-
- total_seconds = (dt_obj - unix_epoch).total_seconds()
-
- ns_value = int(ns_str)
- us_from_ns = ns_value // 1000
-
- total_us = int(total_seconds * 1_000_000) + us_from_ns
-
- tps.append(TimePoint.from_us(total_us))
- return tps
+ # default using velodyne timestamps,if not available, use camera timestamps
+ ts_files = [
+ PATH_3D_RAW_ROOT / log_name / "velodyne_points" / "timestamps.txt",
+ PATH_2D_RAW_ROOT / log_name / "image_00" / "timestamps.txt",
+ PATH_2D_RAW_ROOT / log_name / "image_01" / "timestamps.txt",
+ ]
+ for ts_file in ts_files:
+ if ts_file.exists():
+ tps: List[TimePoint] = []
+ with open(ts_file, "r") as f:
+ for line in f:
+ s = line.strip()
+ if not s:
+ continue
+ dt_str, ns_str = s.split('.')
+ dt_obj = datetime.datetime.strptime(dt_str, "%Y-%m-%d %H:%M:%S")
+ dt_obj = dt_obj.replace(tzinfo=datetime.timezone.utc)
+ unix_epoch = datetime.datetime(1970, 1, 1, tzinfo=datetime.timezone.utc)
+
+ total_seconds = (dt_obj - unix_epoch).total_seconds()
+
+ ns_value = int(ns_str)
+ us_from_ns = ns_value // 1000
+
+ total_us = int(total_seconds * 1_000_000) + us_from_ns
+
+ tps.append(TimePoint.from_us(total_us))
+ return tps
return None
def _extract_ego_state_all(log_name: str) -> List[List[float]]:
@@ -434,8 +439,12 @@ def _extract_ego_state_all(log_name: str) -> List[List[float]]:
poses_time = poses[:, 0] - 1 # Adjusting time to start from 0
#TODO
+ #oxts_path = PATH_POSES_ROOT / log_name / "oxts" / "data"
oxts_path = Path("/data/jbwang/d123/data_poses/") / log_name / "oxts" / "data"
+ pose_idx = 0
+ poses_time_len = len(poses_time)
+
for idx in range(len(list(oxts_path.glob("*.txt")))):
oxts_path_file = oxts_path / f"{int(idx):010d}.txt"
oxts_data = np.loadtxt(oxts_path_file)
@@ -444,7 +453,10 @@ def _extract_ego_state_all(log_name: str) -> List[List[float]]:
roll, pitch, yaw = oxts_data[3:6]
vehicle_parameters = get_kitti360_station_wagon_parameters()
- pos = np.searchsorted(poses_time, idx, side='right') - 1
+ while pose_idx + 1 < poses_time_len and poses_time[pose_idx + 1] <= idx:
+ pose_idx += 1
+ pos = pose_idx
+ # pos = np.searchsorted(poses_time, idx, side='right') - 1
rear_axle_pose = StateSE3(
x=poses[pos, 4],
@@ -454,7 +466,7 @@ def _extract_ego_state_all(log_name: str) -> List[List[float]]:
pitch=pitch,
yaw=yaw,
)
- # NOTE: The height to rear axle is not provided the dataset and is merely approximated.
+
center = rear_axle_se3_to_center_se3(rear_axle_se3=rear_axle_pose, vehicle_parameters=vehicle_parameters)
dynamic_state = DynamicStateSE3(
velocity=Vector3D(
@@ -483,12 +495,10 @@ def _extract_ego_state_all(log_name: str) -> List[List[float]]:
)
return ego_state_all
-#TODO
-# We may distinguish between image and lidar detections
-# besides, now it is based only on start and end frame
def _extract_detections(
log_name: str,
- ts_len: int
+ ts_len: int,
+ ego_states_xyz: np.ndarray
) -> Tuple[List[List[float]], List[List[float]], List[str], List[int]]:
detections_states: List[List[List[float]]] = [[] for _ in range(ts_len)]
@@ -505,15 +515,16 @@ def _extract_detections(
dynamic_groups: Dict[int, List[KITTI360Bbox3D]] = defaultdict(list)
- lidra_data_all = []
- for index in range(ts_len):
- lidar_full_path = PATH_3D_RAW_ROOT / log_name / "velodyne_points" / "data" / f"{index:010d}.bin"
- if not lidar_full_path.exists():
- logging.warning(f"LiDAR file not found for frame {index}: {lidar_full_path}")
- continue
- lidar_data = np.fromfile(lidar_full_path, dtype=np.float32)
- lidar_data = lidar_data.reshape(-1, 4)[:, :3] # Keep only x, y, z coordinates
- lidra_data_all.append(lidar_data)
+
+ # lidra_data_all = []
+ # for index in range(ts_len):
+ # lidar_full_path = PATH_3D_RAW_ROOT / log_name / "velodyne_points" / "data" / f"{index:010d}.bin"
+ # if not lidar_full_path.exists():
+ # logging.warning(f"LiDAR file not found for frame {index}: {lidar_full_path}")
+ # continue
+ # lidar_data = np.fromfile(lidar_full_path, dtype=np.float32)
+ # lidar_data = lidar_data.reshape(-1, 4)[:, :3] # Keep only x, y, z coordinates
+ # lidra_data_all.append(lidar_data)
for child in root:
label = child.find('label').text
@@ -524,11 +535,13 @@ def _extract_detections(
#static object
if obj.timestamp == -1:
- start_frame = obj.start_frame
- end_frame = obj.end_frame
- for frame in range(start_frame, end_frame + 1):
- lidar_data = lidra_data_all[frame]
- #TODO check yaw and box visible
+ # first filter by radius
+ obj.filter_by_radius(ego_states_xyz,radius=50.0)
+ # then filter by pointcloud
+ for frame in obj.valid_radius_frames:
+ # TODO in the future, now is too slow because cpu in the server is not free
+ # or using config?
+ # lidar_data = lidra_data_all[frame]
# if obj.box_visible_in_point_cloud(lidar_data):
detections_states[frame].append(obj.get_state_array())
detections_velocity[frame].append([0.0, 0.0, 0.0])
@@ -553,8 +566,7 @@ def _extract_detections(
if dt_frames > 0:
dt = dt_frames * KITTI360_DT
vel = (positions[i+1] - positions[i-1]) / dt
- # Transform velocity to the ego frame
- vel = obj_list[i].Rm.T @ vel
+ vel = KITTI3602NUPLAN_IMU_CALIBRATION[:3,:3] @ obj_list[i].Rm.T @ vel
else:
vel = np.zeros(3)
velocities.append(vel)
@@ -573,7 +585,6 @@ def _extract_detections(
detections_tokens[frame].append(str(obj.globalID))
detections_types[frame].append(int(KIITI360_DETECTION_NAME_DICT[obj.label]))
-
return detections_states, detections_velocity, detections_tokens, detections_types
#TODO lidar extraction now only velo
diff --git a/d123/dataset/dataset_specific/kitti_360/kitti_360_helper.py b/d123/dataset/dataset_specific/kitti_360/kitti_360_helper.py
index dc1d10cf..d4622867 100644
--- a/d123/dataset/dataset_specific/kitti_360/kitti_360_helper.py
+++ b/d123/dataset/dataset_specific/kitti_360/kitti_360_helper.py
@@ -41,6 +41,7 @@ def __init__(self):
# the window that contains the bbox
self.start_frame = -1
self.end_frame = -1
+ self.valid_radius_frames = []
# timestamp of the bbox (-1 if statis)
self.timestamp = -1
@@ -70,8 +71,8 @@ def parseBbox(self, child):
self.semanticId = kittiId2label[semanticIdKITTI].id
self.instanceId = int(child.find('instanceId').text)
self.name = kittiId2label[semanticIdKITTI].name
-
- self.start_frame = int(child.find('start_frame').text)
+
+ self.start_frame = int(child.find('start_frame').text)
self.end_frame = int(child.find('end_frame').text)
self.timestamp = int(child.find('timestamp').text)
@@ -126,6 +127,15 @@ def get_state_array(self):
return bounding_box_se3.array
+ def filter_by_radius(self,ego_state_xyz,radius=50.0):
+ # first stage of detection, used to filter out detections by radius
+
+ for index in range(len(ego_state_xyz)):
+ ego_state = ego_state_xyz[index]
+ distance = np.linalg.norm(ego_state[:3] - self.T)
+ if distance <= radius:
+ self.valid_radius_frames.append(index)
+
def box_visible_in_point_cloud(self, points):
# points: (N,3) , box: (8,3)
box = self.vertices
diff --git a/docs/datasets/kitti-360.rst b/docs/datasets/kitti-360.rst
index 76100d27..5846e53b 100644
--- a/docs/datasets/kitti-360.rst
+++ b/docs/datasets/kitti-360.rst
@@ -7,12 +7,12 @@ KiTTI-360
:alt: Dataset sample image
:width: 290px
- | **Paper:** `Name of Paper `_
- | **Download:** `Documentation `_
- | **Code:** [Code]
- | **Documentation:** [License type]
+ | **Paper:** `KITTI-360: A Novel Dataset and Benchmarks for Urban Scene Understanding in 2D and 3D `_
+ | **Download:** `www.cvlibs.net/datasets/kitti-360 `_
+ | **Code:** `www.github.com/autonomousvision/kitti360Scripts `_
+ | **Documentation:** `kitti-360 Document`_
| **License:** [License type]
- | **Duration:** [Duration here]
+ | **Duration:** 320k image
| **Supported Versions:** [Yes/No/Conditions]
| **Redistribution:** [Yes/No/Conditions]
diff --git a/jbwang_test2.py b/jbwang_test2.py
index aa685428..93d86a11 100644
--- a/jbwang_test2.py
+++ b/jbwang_test2.py
@@ -97,6 +97,7 @@
from d123.dataset.dataset_specific.kitti_360.kitti_360_helper import KITTI360Bbox3D
+#TODO train and train_full
bbox_3d_path = Path("/nas/datasets/KITTI-360/data_3d_bboxes/train/2013_05_28_drive_0000_sync.xml")
tree = ET.parse(bbox_3d_path)
@@ -110,12 +111,34 @@
"bicycle": DetectionType.BICYCLE,
"pedestrian": DetectionType.PEDESTRIAN,
}
-
+# x,y,z = 881.2268115,3247.493293,115.239219
+# x,y,z = 867.715474,3229.630439,115.189221 # 自车
+# x,y,z = 873.533508, 3227.16235, 115.185341 # 要找的那个人
+x,y,z = 874.233508, 3231.56235, 115.185341 # 要找的那个车
+CENTER_REF = np.array([x, y, z], dtype=np.float64)
+objs_name = []
for child in root:
label = child.find('label').text
if child.find('transform') is None or label not in KIITI360_DETECTION_NAME_DICT.keys():
continue
obj = KITTI360Bbox3D()
obj.parseBbox(child)
+ # obj.parseVertices(child)
+ name = child.find('label').text
+ # if obj.start_frame < 10030 and obj.end_frame > 10030:
+ center = np.array(obj.T, dtype=np.float64)
+ dist = np.linalg.norm(center - CENTER_REF)
+ if dist < 7:
+ print(f"Object ID: {obj.name}, Start Frame: {obj.start_frame}, End Frame: {obj.end_frame},self.annotationId: {obj.annotationId},{obj.timestamp},{obj.T}")
+ objs_name.append(obj.name)
+print(len(objs_name))
+print(set(objs_name))
# print(obj.Rm)
- # print(Sigma)
\ No newline at end of file
+ # print(Sigma)
+names = []
+for child in root:
+ label = child.find('label').text
+ if child.find('transform') is None:
+ continue
+ names.append(label)
+print(set(names))
\ No newline at end of file
diff --git a/notebooks/dataset/jbwang_test.py b/notebooks/dataset/jbwang_test.py
index c2cabfbe..c37d8d40 100644
--- a/notebooks/dataset/jbwang_test.py
+++ b/notebooks/dataset/jbwang_test.py
@@ -2,7 +2,9 @@
# s3_uri = "/data/jbwang/d123/data/nuplan_private_test/2021.09.22.13.20.34_veh-28_01446_01583.arrow"
# s3_uri = "/data/jbwang/d123/data/carla/_Rep0_routes_validation1_route0_07_23_14_33_15.arrow"
# s3_uri = "/data/jbwang/d123/data/nuplan_mini_val/2021.06.07.12.54.00_veh-35_01843_02314.arrow"
-s3_uri = "/data/jbwang/d123/data2/kitti360_c2e_train/2013_05_28_drive_0000_sync_c2e.arrow"
+# s3_uri = "/data/jbwang/d123/data2/kitti360_c2e_train/2013_05_28_drive_0000_sync_c2e.arrow"
+s3_uri = "/data/jbwang/d123/data2/kitti360_detection_all_test/2013_05_28_drive_0000_sync.arrow"
+
import pyarrow as pa
import pyarrow.fs as fs
@@ -35,10 +37,14 @@
if col == "lidar":
continue
print(f"Column : {col}, Type: {table.schema.field(col).type}")
- tokens = table[col] # 或 table.column("token")
+ tokens = table["detections_velocity"] # 或 table.column("token")
+ # tokens = table["detections_type"]
# print(tokens)
- print(len(tokens))
- # print(tokens.slice(0, 100).to_pylist())
+ # print(len(tokens))
+ result = tokens.slice(1470, 40).to_pylist()
+ # for item in result:
+ # print(len(item))
+print(result)
# print(table["traffic_light_ids"])
timer.log("3. Table created")
# Save locally
diff --git a/notebooks/gym/jbwang_test.py b/notebooks/gym/jbwang_test.py
new file mode 100644
index 00000000..663e2899
--- /dev/null
+++ b/notebooks/gym/jbwang_test.py
@@ -0,0 +1,180 @@
+from d123.dataset.scene.scene_builder import ArrowSceneBuilder
+from d123.dataset.scene.scene_filter import SceneFilter
+
+from d123.common.multithreading.worker_sequential import Sequential
+# from d123.common.multithreading.worker_ray import RayDistributed
+
+import os, psutil
+
+from pathlib import Path
+from typing import Optional, Tuple
+
+import matplotlib.animation as animation
+import matplotlib.pyplot as plt
+from tqdm import tqdm
+
+from d123.common.datatypes.vehicle_state.ego_state import EgoStateSE2
+from d123.common.geometry.base import Point2D, StateSE2
+from d123.common.geometry.bounding_box.bounding_box import BoundingBoxSE2
+from d123.common.visualization.color.default import EGO_VEHICLE_CONFIG
+from d123.common.visualization.matplotlib.observation import (
+ add_bounding_box_to_ax,
+ add_box_detections_to_ax,
+ add_default_map_on_ax,
+ add_traffic_lights_to_ax,
+ add_ego_vehicle_to_ax,
+)
+from d123.dataset.arrow.conversion import TrafficLightDetectionWrapper
+from d123.dataset.maps.abstract_map import AbstractMap
+from d123.common.datatypes.detection.detection import BoxDetectionWrapper
+from d123.dataset.scene.abstract_scene import AbstractScene
+import io
+from PIL import Image
+
+
+
+def _plot_scene_on_ax(
+ ax: plt.Axes,
+ map_api: AbstractMap,
+ ego_state: EgoStateSE2,
+ initial_ego_state: Optional[EgoStateSE2],
+ box_detections: BoxDetectionWrapper,
+ traffic_light_detections: TrafficLightDetectionWrapper,
+ radius: float = 120,
+) -> plt.Axes:
+
+ if initial_ego_state is not None:
+ point_2d = initial_ego_state.center.point_2d
+ else:
+ point_2d = ego_state.center.point_2d
+ add_default_map_on_ax(ax, map_api, point_2d, radius=radius)
+ add_traffic_lights_to_ax(ax, traffic_light_detections, map_api)
+
+ add_box_detections_to_ax(ax, box_detections)
+ add_ego_vehicle_to_ax(ax, ego_state)
+
+ ax.set_xlim(point_2d.x - radius, point_2d.x + radius)
+ ax.set_ylim(point_2d.y - radius, point_2d.y + radius)
+
+ ax.set_aspect("equal", adjustable="box")
+ return ax
+
+
+def plot_scene_to_image(
+ map_api: AbstractMap,
+ ego_state: EgoStateSE2,
+ initial_ego_state: Optional[EgoStateSE2],
+ box_detections: BoxDetectionWrapper,
+ traffic_light_detections: TrafficLightDetectionWrapper,
+ radius: float = 120,
+ figsize: Tuple[int, int] = (8, 8),
+) -> Image:
+
+ fig, ax = plt.subplots(figsize=figsize)
+ _plot_scene_on_ax(ax, map_api, ego_state, initial_ego_state, box_detections, traffic_light_detections, radius)
+ ax.set_aspect("equal", adjustable="box")
+ plt.subplots_adjust(left=0.05, right=0.95, top=0.95, bottom=0.05)
+ # plt.tight_layout()
+
+ buf = io.BytesIO()
+ fig.savefig(buf, format="png", bbox_inches="tight")
+ plt.close(fig)
+ buf.seek(0)
+ img = Image.open(buf)
+ return img
+
+
+def print_memory_usage():
+ process = psutil.Process(os.getpid())
+ memory_info = process.memory_info()
+ print(f"Memory usage: {memory_info.rss / 1024 ** 2:.2f} MB")
+
+
+split = "kitti360_detection_all_and_vel"
+scene_tokens = None
+log_names = None
+
+scene_filter = SceneFilter(
+ split_names=[split], log_names=log_names, scene_tokens=scene_tokens, duration_s=15.1, history_s=1.0
+)
+scene_builder = ArrowSceneBuilder("/data/jbwang/d123/data2/")
+worker = Sequential()
+# worker = RayDistributed()
+scenes = scene_builder.get_scenes(scene_filter, worker)
+
+print(len(scenes))
+
+for scene in scenes[:10]:
+ print(scene.log_name, scene.token)
+
+from d123.dataset.arrow.conversion import DetectionType
+from d123.simulation.gym.gym_env import GymEnvironment
+from d123.simulation.observation.agents_observation import _filter_agents_by_type
+
+import time
+
+images = []
+agent_rollouts = []
+plot: bool = True
+action = [1.0, -0.0] # Placeholder action, replace with actual action logic
+env = GymEnvironment(scenes)
+
+start = time.time()
+
+map_api, ego_state, detection_observation, current_scene = env.reset(scenes[1460])
+initial_ego_state = ego_state
+cars, _, _ = _filter_agents_by_type(detection_observation.box_detections, detection_types=[DetectionType.VEHICLE])
+agent_rollouts.append(BoxDetectionWrapper(cars))
+if plot:
+ images.append(
+ plot_scene_to_image(
+ map_api,
+ ego_state,
+ initial_ego_state,
+ detection_observation.box_detections,
+ detection_observation.traffic_light_detections,
+ )
+ )
+
+
+for i in range(160):
+ ego_state, detection_observation, end = env.step(action)
+ cars, _, _ = _filter_agents_by_type(detection_observation.box_detections, detection_types=[DetectionType.VEHICLE])
+ agent_rollouts.append(BoxDetectionWrapper(cars))
+ if plot:
+ images.append(
+ plot_scene_to_image(
+ map_api,
+ ego_state,
+ initial_ego_state,
+ detection_observation.box_detections,
+ detection_observation.traffic_light_detections,
+ )
+ )
+ if end:
+ print("End of scene reached.")
+ break
+
+time_s = time.time() - start
+print(time_s)
+print(151/ time_s)
+
+import numpy as np
+
+
+def create_gif(images, output_path, duration=100):
+ """
+ Create a GIF from a list of PIL images.
+
+ Args:
+ images (list): List of PIL.Image objects.
+ output_path (str): Path to save the GIF.
+ duration (int): Duration between frames in milliseconds.
+ """
+ if images:
+ print(len(images))
+ images_p = [img.convert("P", palette=Image.ADAPTIVE) for img in images]
+ images_p[0].save(output_path, save_all=True, append_images=images_p[1:], duration=duration, loop=0)
+
+
+create_gif(images, f"/data/jbwang/d123/data2/{split}_{current_scene.token}.gif", duration=20)
\ No newline at end of file
From d1945b0c470c653f893b65026be80ef66336767d Mon Sep 17 00:00:00 2001
From: jbwang <1159270049@qq.com>
Date: Thu, 21 Aug 2025 19:01:27 +0800
Subject: [PATCH 010/145] finish lidar vis and fix some bugs
---
.gitignore | 2 +
d123/common/datatypes/sensor/camera.py | 48 +++++++
.../vehicle_state/vehicle_parameters.py | 9 +-
d123/common/visualization/viser/server.py | 10 +-
d123/dataset/arrow/conversion.py | 6 +-
.../kitti_360/kitti_360_data_converter.py | 124 ++++++++++--------
.../kitti_360/kitti_360_helper.py | 24 ++++
.../dataset_specific/kitti_360/labels.py | 40 ++++++
.../dataset_specific/kitti_360/load_sensor.py | 27 ++++
jbwang_test2.py | 10 +-
10 files changed, 232 insertions(+), 68 deletions(-)
create mode 100644 d123/dataset/dataset_specific/kitti_360/load_sensor.py
diff --git a/.gitignore b/.gitignore
index 426cc468..971a12d1 100644
--- a/.gitignore
+++ b/.gitignore
@@ -30,3 +30,5 @@ docs/_build/
docs/build/
_build/
.doctrees/
+
+jbwang_*
diff --git a/d123/common/datatypes/sensor/camera.py b/d123/common/datatypes/sensor/camera.py
index 56fe6f07..c2a33d9d 100644
--- a/d123/common/datatypes/sensor/camera.py
+++ b/d123/common/datatypes/sensor/camera.py
@@ -104,6 +104,54 @@ def camera_metadata_dict_from_json(json_dict: Dict[str, Dict[str, Any]]) -> Dict
for camera_type, metadata in camera_metadata_dict.items()
}
+#TODO Code Refactoring
+@dataclass
+class FisheyeMEICameraMetadata:
+ camera_type: CameraType
+ width: int
+ height: int
+ mirror_parameters: int
+ distortion: npt.NDArray[np.float64] # k1,k2,p1,p2
+ projection_parameters: npt.NDArray[np.float64] #gamma1,gamma2,u0,v0
+
+ def to_dict(self) -> Dict[str, Any]:
+ # TODO: remove None types. Only a placeholder for now.
+ return {
+ "camera_type": int(self.camera_type),
+ "width": self.width,
+ "height": self.height,
+ "mirror_parameters": self.mirror_parameters,
+ "distortion": self.distortion.tolist() if self.distortion is not None else None,
+ "projection_parameters": self.projection_parameters.tolist() if self.projection_parameters is not None else None,
+ }
+
+ def cam2image(self, points_3d: npt.NDArray[np.float64]) -> npt.NDArray[np.float64]:
+ ''' camera coordinate to image plane '''
+ norm = np.linalg.norm(points_3d, axis=1)
+
+ x = points_3d[:,0] / norm
+ y = points_3d[:,1] / norm
+ z = points_3d[:,2] / norm
+
+ x /= z+self.mirror_parameters
+ y /= z+self.mirror_parameters
+
+ k1 = self.distortion[0]
+ k2 = self.distortion[1]
+ gamma1 = self.projection_parameters[0]
+ gamma2 = self.projection_parameters[1]
+ u0 = self.projection_parameters[2]
+ v0 = self.projection_parameters[3]
+
+ ro2 = x*x + y*y
+ x *= 1 + k1*ro2 + k2*ro2*ro2
+ y *= 1 + k1*ro2 + k2*ro2*ro2
+
+ x = gamma1*x + u0
+ y = gamma2*y + v0
+
+ return x, y, norm * points_3d[:,2] / np.abs(points_3d[:,2])
+
@dataclass
class Camera:
diff --git a/d123/common/datatypes/vehicle_state/vehicle_parameters.py b/d123/common/datatypes/vehicle_state/vehicle_parameters.py
index 5adda6b7..21a91668 100644
--- a/d123/common/datatypes/vehicle_state/vehicle_parameters.py
+++ b/d123/common/datatypes/vehicle_state/vehicle_parameters.py
@@ -60,15 +60,16 @@ def get_wopd_chrysler_pacifica_parameters() -> VehicleParameters:
)
def get_kitti360_station_wagon_parameters() -> VehicleParameters:
- #TODO except wheel_base, all need to be checked
+ #NOTE: Parameters are estimated from the vehicle model.
+ #https://www.cvlibs.net/datasets/kitti-360/documentation.php
return VehicleParameters(
vehicle_name="kitti360_station_wagon",
- width=2.297,
- length=5.176,
+ width=1.800,
+ length=3.500,
height=1.400,
wheel_base=2.710,
rear_axle_to_center_vertical=0.45,
- rear_axle_to_center_longitudinal=1.461,
+ rear_axle_to_center_longitudinal=2.71/2 + 0.05,
)
def get_av2_ford_fusion_hybrid_parameters() -> VehicleParameters:
diff --git a/d123/common/visualization/viser/server.py b/d123/common/visualization/viser/server.py
index cdca86c4..7511cdbc 100644
--- a/d123/common/visualization/viser/server.py
+++ b/d123/common/visualization/viser/server.py
@@ -33,23 +33,23 @@
LINE_WIDTH: float = 4.0
# Bounding box config:
-BOUNDING_BOX_TYPE: Literal["mesh", "lines"] = "mesh"
+BOUNDING_BOX_TYPE: Literal["mesh", "lines"] = "lines"
# Map config:
-MAP_AVAILABLE: bool = True
+MAP_AVAILABLE: bool = False
# Cameras config:
-VISUALIZE_CAMERA_FRUSTUM: List[CameraType] = [CameraType.CAM_F0, CameraType.CAM_L0, CameraType.CAM_R0]
+# VISUALIZE_CAMERA_FRUSTUM: List[CameraType] = [CameraType.CAM_F0, CameraType.CAM_L0, CameraType.CAM_R0]
# VISUALIZE_CAMERA_FRUSTUM: List[CameraType] = all_camera_types
-# VISUALIZE_CAMERA_FRUSTUM: List[CameraType] = [CameraType.CAM_STEREO_L, CameraType.CAM_STEREO_R]
+VISUALIZE_CAMERA_FRUSTUM: List[CameraType] = [CameraType.CAM_STEREO_L, CameraType.CAM_STEREO_R]
# VISUALIZE_CAMERA_FRUSTUM: List[CameraType] = []
VISUALIZE_CAMERA_GUI: List[CameraType] = [CameraType.CAM_F0]
CAMERA_SCALE: float = 1.0
# Lidar config:
-LIDAR_AVAILABLE: bool = False
+LIDAR_AVAILABLE: bool = True
LIDAR_TYPES: List[LiDARType] = [
LiDARType.LIDAR_MERGED,
diff --git a/d123/dataset/arrow/conversion.py b/d123/dataset/arrow/conversion.py
index 2429f56f..d9e5e664 100644
--- a/d123/dataset/arrow/conversion.py
+++ b/d123/dataset/arrow/conversion.py
@@ -34,7 +34,7 @@
"nuplan": Path(os.environ["NUPLAN_DATA_ROOT"]) / "nuplan-v1.1" / "sensor_blobs",
"carla": Path(os.environ["CARLA_DATA_ROOT"]) / "sensor_blobs",
# "av2-sensor": Path(os.environ["AV2_SENSOR_DATA_ROOT"]) / "sensor",
- # "kitti360": Path(os.environ["KITTI360_DATA_ROOT"]),
+ "kitti360": Path(os.environ["KITTI360_DATA_ROOT"]),
}
@@ -155,6 +155,10 @@ def get_lidar_from_arrow_table(
lidar = load_carla_lidar_from_path(full_lidar_path, lidar_metadata)
elif log_metadata.dataset == "wopd":
raise NotImplementedError
+ elif log_metadata.dataset == "kitti360":
+ from d123.dataset.dataset_specific.kitti_360.load_sensor import load_kitti360_lidar_from_path
+
+ lidar = load_kitti360_lidar_from_path(full_lidar_path, lidar_metadata)
else:
raise NotImplementedError(f"Loading LiDAR data for dataset {log_metadata.dataset} is not implemented.")
diff --git a/d123/dataset/dataset_specific/kitti_360/kitti_360_data_converter.py b/d123/dataset/dataset_specific/kitti_360/kitti_360_data_converter.py
index 81057042..77f3fff0 100644
--- a/d123/dataset/dataset_specific/kitti_360/kitti_360_data_converter.py
+++ b/d123/dataset/dataset_specific/kitti_360/kitti_360_data_converter.py
@@ -1,6 +1,8 @@
import gc
import json
import os
+import re
+import yaml
from dataclasses import asdict
from functools import partial
from pathlib import Path
@@ -18,7 +20,7 @@
from nuplan.planning.utils.multithreading.worker_utils import WorkerPool, worker_map
from d123.common.datatypes.detection.detection_types import DetectionType
-from d123.common.datatypes.sensor.camera import CameraMetadata, CameraType, camera_metadata_dict_to_json
+from d123.common.datatypes.sensor.camera import CameraMetadata, FisheyeMEICameraMetadata, CameraType, camera_metadata_dict_to_json
from d123.common.datatypes.sensor.lidar import LiDARMetadata, LiDARType, lidar_metadata_dict_to_json
from d123.common.datatypes.sensor.lidar_index import Kitti360LidarIndex
from d123.common.datatypes.time.time_point import TimePoint
@@ -30,18 +32,18 @@
from d123.dataset.arrow.helper import open_arrow_table, write_arrow_table
from d123.dataset.dataset_specific.raw_data_converter import DataConverterConfig, RawDataConverter
from d123.dataset.logs.log_metadata import LogMetadata
-from d123.dataset.dataset_specific.kitti_360.kitti_360_helper import KITTI360Bbox3D
+from d123.dataset.dataset_specific.kitti_360.kitti_360_helper import KITTI360Bbox3D, KITTI3602NUPLAN_IMU_CALIBRATION
+from d123.dataset.dataset_specific.kitti_360.labels import KIITI360_DETECTION_NAME_DICT,kittiId2label
KITTI360_DT: Final[float] = 0.1
SORT_BY_TIMESTAMP: Final[bool] = True
KITTI360_DATA_ROOT = Path(os.environ["KITTI360_DATA_ROOT"])
-#TODO cameraType
KITTI360_CAMERA_TYPES = {
- CameraType.CAM_L0: "image_00",
- CameraType.CAM_R0: "image_01",
- # TODO fisheye camera
+ CameraType.CAM_STEREO_L: "image_00",
+ CameraType.CAM_STEREO_R: "image_01",
+ # TODO need code refactoring to support fisheye cameras
# CameraType.CAM_L1: "image_02",
# CameraType.CAM_R1: "image_03",
}
@@ -71,31 +73,6 @@
DIR_3D_BBOX: PATH_3D_BBOX_ROOT / "train",
}
-#TODO now only parts of labels are used
-KIITI360_DETECTION_NAME_DICT = {
- "truck": DetectionType.VEHICLE,
- "bus": DetectionType.VEHICLE,
- "car": DetectionType.VEHICLE,
- "motorcycle": DetectionType.BICYCLE,
- "bicycle": DetectionType.BICYCLE,
- "pedestrian": DetectionType.PEDESTRIAN,
-}
-
-KITTI3602NUPLAN_IMU_CALIBRATION = np.array([
- [1, 0, 0, 0],
- [0, -1, 0, 0],
- [0, 0, -1, 0],
- [0, 0, 0, 1],
- ], dtype=np.float64)
-
-KITTI3602NUPLAN_LIDAR_CALIBRATION = np.array([
- [0, -1, 0, 0],
- [1, 0, 0, 0],
- [0, 0, 1, 0],
- [0, 0, 0, 1],
- ], dtype=np.float64)
-
-
def create_token(input_data: str) -> str:
# TODO: Refactor this function.
# TODO: Add a general function to create tokens from arbitrary data.
@@ -266,12 +243,12 @@ def convert_kitti360_log_to_arrow(
return []
-def get_kitti360_camera_metadata() -> Dict[CameraType, CameraMetadata]:
+def get_kitti360_camera_metadata() -> Dict[CameraType, Union[CameraMetadata, FisheyeMEICameraMetadata]]:
persp = PATH_CALIB_ROOT / "perspective.txt"
assert persp.exists()
- result = {"image_00": {}, "image_01": {}}
+ persp_result = {"image_00": {}, "image_01": {}}
with open(persp, "r") as f:
lines = [ln.strip() for ln in f if ln.strip()]
@@ -279,21 +256,39 @@ def get_kitti360_camera_metadata() -> Dict[CameraType, CameraMetadata]:
key, value = ln.split(" ", 1)
cam_id = key.split("_")[-1][:2]
if key.startswith("P_rect_"):
- result[f"image_{cam_id}"]["intrinsic"] = _read_projection_matrix(ln)
+ persp_result[f"image_{cam_id}"]["intrinsic"] = _read_projection_matrix(ln)
elif key.startswith("S_rect_"):
- result[f"image_{cam_id}"]["wh"] = [int(round(float(x))) for x in value.split()]
+ persp_result[f"image_{cam_id}"]["wh"] = [int(round(float(x))) for x in value.split()]
elif key.startswith("D_"):
- result[f"image_{cam_id}"]["distortion"] = [float(x) for x in value.split()]
+ persp_result[f"image_{cam_id}"]["distortion"] = [float(x) for x in value.split()]
+
+ fisheye_camera02_path = PATH_CALIB_ROOT / "image_02.yaml"
+ fisheye_camera03_path = PATH_CALIB_ROOT / "image_03.yaml"
+ assert fisheye_camera02_path.exists() and fisheye_camera03_path.exists()
+ fisheye02 = _readYAMLFile(fisheye_camera02_path)
+ fisheye03 = _readYAMLFile(fisheye_camera03_path)
+ fisheye_result = {"image_02": fisheye02, "image_03": fisheye03}
- log_cam_infos: Dict[str, CameraMetadata] = {}
+ log_cam_infos: Dict[str, Union[CameraMetadata, FisheyeMEICameraMetadata]] = {}
for cam_type, cam_name in KITTI360_CAMERA_TYPES.items():
- log_cam_infos[cam_type] = CameraMetadata(
- camera_type=cam_type,
- width=result[cam_name]["wh"][0],
- height=result[cam_name]["wh"][1],
- intrinsic=np.array(result[cam_name]["intrinsic"]),
- distortion=np.array(result[cam_name]["distortion"]),
- )
+ if cam_name in ["image_00", "image_01"]:
+ log_cam_infos[cam_type] = CameraMetadata(
+ camera_type=cam_type,
+ width=persp_result[cam_name]["wh"][0],
+ height=persp_result[cam_name]["wh"][1],
+ intrinsic=np.array(persp_result[cam_name]["intrinsic"]),
+ distortion=np.array(persp_result[cam_name]["distortion"]),
+ )
+ elif cam_name in ["image_02","image_03"]:
+ log_cam_infos[cam_type] = FisheyeMEICameraMetadata(
+ camera_type=cam_type,
+ width=fisheye_result[cam_name]["image_width"],
+ height=fisheye_result[cam_name]["image_height"],
+ mirror_parameters=fisheye_result[cam_name]["mirror_parameters"],
+ distortion=np.array(fisheye_result[cam_name]["distortion_parameters"]),
+ projection_parameters= np.array(fisheye_result[cam_name]["projection_parameters"]),
+ )
+
return log_cam_infos
def _read_projection_matrix(p_line: str) -> np.ndarray:
@@ -305,6 +300,19 @@ def _read_projection_matrix(p_line: str) -> np.ndarray:
K = P[:, :3]
return K
+def _readYAMLFile(fileName):
+ '''make OpenCV YAML file compatible with python'''
+ ret = {}
+ skip_lines=1 # Skip the first line which says "%YAML:1.0". Or replace it with "%YAML 1.0"
+ with open(fileName) as fin:
+ for i in range(skip_lines):
+ fin.readline()
+ yamlFileOut = fin.read()
+ myRe = re.compile(r":([^ ])") # Add space after ":", if it doesn't exist. Python yaml requirement
+ yamlFileOut = myRe.sub(r': \1', yamlFileOut)
+ ret = yaml.safe_load(yamlFileOut)
+ return ret
+
def get_kitti360_lidar_metadata() -> Dict[LiDARType, LiDARMetadata]:
metadata: Dict[LiDARType, LiDARMetadata] = {}
@@ -326,9 +334,7 @@ def get_kitti360_lidar_metadata() -> Dict[LiDARType, LiDARMetadata]:
cam2pose = KITTI3602NUPLAN_IMU_CALIBRATION @ cam2pose
cam2velo = np.concatenate((np.loadtxt(cam2velo_txt).reshape(3,4), lastrow))
- cam2velo = KITTI3602NUPLAN_LIDAR_CALIBRATION @ cam2velo
-
- extrinsic = cam2velo @ np.linalg.inv(cam2pose)
+ extrinsic = cam2pose @ np.linalg.inv(cam2velo)
metadata[LiDARType.LIDAR_TOP] = LiDARMetadata(
lidar_type=LiDARType.LIDAR_TOP,
@@ -449,14 +455,14 @@ def _extract_ego_state_all(log_name: str) -> List[List[float]]:
oxts_path_file = oxts_path / f"{int(idx):010d}.txt"
oxts_data = np.loadtxt(oxts_path_file)
- #TODO check roll, pitch, yaw
+ #TODO check roll, pitch, yaw again
roll, pitch, yaw = oxts_data[3:6]
vehicle_parameters = get_kitti360_station_wagon_parameters()
- while pose_idx + 1 < poses_time_len and poses_time[pose_idx + 1] <= idx:
+ while pose_idx + 1 < poses_time_len and poses_time[pose_idx + 1] < idx:
pose_idx += 1
pos = pose_idx
- # pos = np.searchsorted(poses_time, idx, side='right') - 1
+ # pos = np.searchsorted(pwwwoses_time, idx, side='right') - 1
rear_axle_pose = StateSE3(
x=poses[pos, 4],
@@ -527,8 +533,9 @@ def _extract_detections(
# lidra_data_all.append(lidar_data)
for child in root:
- label = child.find('label').text
- if child.find('transform') is None or label not in KIITI360_DETECTION_NAME_DICT.keys():
+ semanticIdKITTI = int(child.find('semanticId').text)
+ name = kittiId2label[semanticIdKITTI].name
+ if child.find('transform') is None or name not in KIITI360_DETECTION_NAME_DICT.keys():
continue
obj = KITTI360Bbox3D()
obj.parseBbox(child)
@@ -546,7 +553,7 @@ def _extract_detections(
detections_states[frame].append(obj.get_state_array())
detections_velocity[frame].append([0.0, 0.0, 0.0])
detections_tokens[frame].append(str(obj.globalID))
- detections_types[frame].append(int(KIITI360_DETECTION_NAME_DICT[obj.label]))
+ detections_types[frame].append(int(KIITI360_DETECTION_NAME_DICT[obj.name]))
else:
ann_id = obj.annotationId
dynamic_groups[ann_id].append(obj)
@@ -583,7 +590,7 @@ def _extract_detections(
detections_states[frame].append(obj.get_state_array())
detections_velocity[frame].append(vel)
detections_tokens[frame].append(str(obj.globalID))
- detections_types[frame].append(int(KIITI360_DETECTION_NAME_DICT[obj.label]))
+ detections_types[frame].append(int(KIITI360_DETECTION_NAME_DICT[obj.name]))
return detections_states, detections_velocity, detections_tokens, detections_types
@@ -593,7 +600,7 @@ def _extract_lidar(log_name: str, idx: int, data_converter_config: DataConverter
lidar_full_path = PATH_3D_RAW_ROOT / log_name / "velodyne_points" / "data" / f"{idx:010d}.bin"
if lidar_full_path.exists():
if data_converter_config.lidar_store_option == "path":
- lidar = f"/data_3d_raw/{log_name}/velodyne_points/data/{idx:010d}.bin"
+ lidar = f"data_3d_raw/{log_name}/velodyne_points/data/{idx:010d}.bin"
elif data_converter_config.lidar_store_option == "binary":
raise NotImplementedError("Binary lidar storage is not implemented.")
else:
@@ -606,9 +613,12 @@ def _extract_cameras(
camera_dict: Dict[str, Union[str, bytes]] = {}
for camera_type, cam_dir_name in KITTI360_CAMERA_TYPES.items():
- img_path_png = PATH_2D_RAW_ROOT / log_name / cam_dir_name / "data_rect" / f"{idx:010d}.png"
+ if cam_dir_name in ["image_00", "image_01"]:
+ img_path_png = PATH_2D_RAW_ROOT / log_name / cam_dir_name / "data_rect" / f"{idx:010d}.png"
+ elif cam_dir_name in ["image_02", "image_03"]:
+ img_path_png = PATH_2D_RAW_ROOT / log_name / cam_dir_name / "data_rgb" / f"{idx:010d}.png"
+
if img_path_png.exists():
-
cam2pose_txt = PATH_CALIB_ROOT / "calib_cam_to_pose.txt"
if not cam2pose_txt.exists():
raise FileNotFoundError(f"calib_cam_to_pose.txt file not found: {cam2pose_txt}")
diff --git a/d123/dataset/dataset_specific/kitti_360/kitti_360_helper.py b/d123/dataset/dataset_specific/kitti_360/kitti_360_helper.py
index d4622867..5c69264f 100644
--- a/d123/dataset/dataset_specific/kitti_360/kitti_360_helper.py
+++ b/d123/dataset/dataset_specific/kitti_360/kitti_360_helper.py
@@ -7,11 +7,33 @@
from d123.common.geometry.base import StateSE3
from d123.common.geometry.bounding_box.bounding_box import BoundingBoxSE3
+from d123.common.geometry.transform.se3 import get_rotation_matrix
from d123.dataset.dataset_specific.kitti_360.labels import kittiId2label
DEFAULT_ROLL = 0.0
DEFAULT_PITCH = 0.0
+addtional_calibration = get_rotation_matrix(
+ StateSE3(
+ x=0.0,
+ y=0.0,
+ z=0.0,
+ roll=np.deg2rad(1.0),
+ pitch=np.deg2rad(1.0),
+ yaw=np.deg2rad(0.0),
+ )
+ )
+
+kitti3602nuplan_imu_calibration_ideal = np.array([
+ [1, 0, 0, 0],
+ [0, -1, 0, 0],
+ [0, 0, -1, 0],
+ [0, 0, 0, 1],
+ ], dtype=np.float64)
+
+KITTI3602NUPLAN_IMU_CALIBRATION = np.eye(4, dtype=np.float64)
+KITTI3602NUPLAN_IMU_CALIBRATION[:3, :3] = addtional_calibration @ kitti3602nuplan_imu_calibration_ideal[:3, :3]
+
MAX_N = 1000
def local2global(semanticId, instanceId):
globalId = semanticId*MAX_N + instanceId
@@ -99,6 +121,8 @@ def parseVertices(self, child):
def parse_scale_rotation(self):
Rm, Sm = polar(self.R)
+ if np.linalg.det(Rm) < 0:
+ Rm[0] = -Rm[0]
scale = np.diag(Sm)
yaw, pitch, roll = R.from_matrix(Rm).as_euler('zyx', degrees=False)
diff --git a/d123/dataset/dataset_specific/kitti_360/labels.py b/d123/dataset/dataset_specific/kitti_360/labels.py
index 38f8a91c..de24f152 100644
--- a/d123/dataset/dataset_specific/kitti_360/labels.py
+++ b/d123/dataset/dataset_specific/kitti_360/labels.py
@@ -166,3 +166,43 @@ def assureSingleInstanceName( name ):
return None
# all good then
return name
+
+from d123.common.datatypes.detection.detection_types import DetectionType
+
+KIITI360_DETECTION_NAME_DICT = {
+ "traffic light": DetectionType.SIGN,
+ "traffic sign": DetectionType.SIGN,
+ "person": DetectionType.PEDESTRIAN,
+ "rider": DetectionType.BICYCLE,
+ "car": DetectionType.VEHICLE,
+ "truck": DetectionType.VEHICLE,
+ "bus": DetectionType.VEHICLE,
+ "caravan": DetectionType.VEHICLE,
+ "trailer": DetectionType.VEHICLE,
+ "train": DetectionType.VEHICLE,
+ "motorcycle": DetectionType.BICYCLE,
+ "bicycle": DetectionType.BICYCLE,
+ "stop": DetectionType.SIGN,
+}
+
+# KIITI360_DETECTION_NAME_DICT = {
+# "pole": DetectionType.GENERIC_OBJECT,
+# "traffic light": DetectionType.SIGN,
+# "traffic sign": DetectionType.SIGN,
+# "person": DetectionType.PEDESTRIAN,
+# "rider": DetectionType.BICYCLE,
+# "car": DetectionType.VEHICLE,
+# "truck": DetectionType.VEHICLE,
+# "bus": DetectionType.VEHICLE,
+# "caravan": DetectionType.VEHICLE,
+# "trailer": DetectionType.VEHICLE,
+# "train": DetectionType.VEHICLE,
+# "motorcycle": DetectionType.BICYCLE,
+# "bicycle": DetectionType.BICYCLE,
+# "stop": DetectionType.SIGN,
+# "smallpole": DetectionType.GENERIC_OBJECT,
+# "lamp": DetectionType.GENERIC_OBJECT,
+# "trash bin": DetectionType.GENERIC_OBJECT,
+# "vending machine": DetectionType.GENERIC_OBJECT,
+# "box": DetectionType.GENERIC_OBJECT,
+# }
diff --git a/d123/dataset/dataset_specific/kitti_360/load_sensor.py b/d123/dataset/dataset_specific/kitti_360/load_sensor.py
new file mode 100644
index 00000000..2a23401f
--- /dev/null
+++ b/d123/dataset/dataset_specific/kitti_360/load_sensor.py
@@ -0,0 +1,27 @@
+from pathlib import Path
+
+import numpy as np
+
+from d123.common.datatypes.sensor.lidar import LiDAR, LiDARMetadata
+
+
+def load_kitti360_lidar_from_path(filepath: Path, lidar_metadata: LiDARMetadata) -> LiDAR:
+ assert filepath.exists(), f"LiDAR file not found: {filepath}"
+ pcd = np.fromfile(filepath, dtype=np.float32)
+ pcd = np.reshape(pcd,[-1,4]) # [N,4]
+
+ xyz = pcd[:, :3]
+ intensity = pcd[:, 3]
+
+ ones = np.ones((xyz.shape[0], 1), dtype=pcd.dtype)
+ points_h = np.concatenate([xyz, ones], axis=1) #[N,4]
+
+ transformed_h = lidar_metadata.extrinsic @ points_h.T #[4,N]
+
+ transformed_xyz = transformed_h[:3, :] # (3,N)
+
+ intensity_row = intensity[np.newaxis, :] # (1,N)
+
+ point_cloud_4xN = np.vstack([transformed_xyz, intensity_row]).astype(np.float32) # (4,N)
+
+ return LiDAR(metadata=lidar_metadata, point_cloud=point_cloud_4xN)
diff --git a/jbwang_test2.py b/jbwang_test2.py
index 93d86a11..7128a636 100644
--- a/jbwang_test2.py
+++ b/jbwang_test2.py
@@ -117,14 +117,21 @@
x,y,z = 874.233508, 3231.56235, 115.185341 # 要找的那个车
CENTER_REF = np.array([x, y, z], dtype=np.float64)
objs_name = []
+lable_name = []
for child in root:
label = child.find('label').text
- if child.find('transform') is None or label not in KIITI360_DETECTION_NAME_DICT.keys():
+ # if child.find('transform') is None or label not in KIITI360_DETECTION_NAME_DICT.keys():
+ # continue
+
+ if child.find('transform') is None:
continue
+ print("this label is ",label)
+ print("!!!!!!!!!!!!!!!!!!!")
obj = KITTI360Bbox3D()
obj.parseBbox(child)
# obj.parseVertices(child)
name = child.find('label').text
+ lable_name.append(name)
# if obj.start_frame < 10030 and obj.end_frame > 10030:
center = np.array(obj.T, dtype=np.float64)
dist = np.linalg.norm(center - CENTER_REF)
@@ -133,6 +140,7 @@
objs_name.append(obj.name)
print(len(objs_name))
print(set(objs_name))
+print(set(lable_name))
# print(obj.Rm)
# print(Sigma)
names = []
From 571282886c2ab75b2b8f439627a21978d9100ab8 Mon Sep 17 00:00:00 2001
From: Daniel Dauner
Date: Sun, 24 Aug 2025 18:06:24 +0200
Subject: [PATCH 011/145] Adding/testing autodocstrings for geometry
documentation.
---
docs/conf.py | 13 +++++++++++++
docs/datasets/nuplan.rst | 2 +-
docs/geometry.rst | 13 +++++++++++++
docs/index.rst | 1 +
pyproject.toml | 1 +
5 files changed, 29 insertions(+), 1 deletion(-)
create mode 100644 docs/geometry.rst
diff --git a/docs/conf.py b/docs/conf.py
index 9d7e8c5f..f114d331 100644
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -20,6 +20,8 @@
"sphinx.ext.autodoc",
"sphinx.ext.autosummary",
"sphinx.ext.intersphinx",
+ "sphinx.ext.napoleon",
+ "sphinx_copybutton",
"myst_parser",
]
@@ -66,3 +68,14 @@
"includehidden": True,
"titles_only": False,
}
+
+autodoc_typehints = "both"
+autodoc_class_signature = "separated"
+autodoc_default_options = {
+ "members": True,
+ "member-order": "bysource",
+ "undoc-members": True,
+ "inherited-members": True,
+ "exclude-members": "__init__, __post_init__",
+ "imported-members": True,
+}
diff --git a/docs/datasets/nuplan.rst b/docs/datasets/nuplan.rst
index 065c26a5..c1590f03 100644
--- a/docs/datasets/nuplan.rst
+++ b/docs/datasets/nuplan.rst
@@ -1,5 +1,5 @@
nuPlan
------
+------
.. sidebar:: nuPlan
diff --git a/docs/geometry.rst b/docs/geometry.rst
new file mode 100644
index 00000000..263b7db0
--- /dev/null
+++ b/docs/geometry.rst
@@ -0,0 +1,13 @@
+
+Geometry
+========
+
+.. autoclass:: d123.common.geometry.base.Point2D()
+
+.. autoclass:: d123.common.geometry.base.Point3D()
+
+.. autoclass:: d123.common.geometry.base.StateSE2()
+
+.. autoclass:: d123.common.geometry.base.StateSE3()
+
+.. autoclass:: d123.dataset.maps.abstract_map.AbstractMap()
diff --git a/docs/index.rst b/docs/index.rst
index b169f012..c923847f 100644
--- a/docs/index.rst
+++ b/docs/index.rst
@@ -16,6 +16,7 @@ documentation for details.
:caption: Contents:
installation
+ geometry
datasets/index
schema
visualization
diff --git a/pyproject.toml b/pyproject.toml
index d5419010..ad02829a 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -76,6 +76,7 @@ docs = [
"sphinx-rtd-theme",
"sphinx-autobuild",
"myst-parser",
+ "sphinx-copybutton",
]
nuplan = [
"nuplan-devkit @ git+https://github.com/motional/nuplan-devkit/@nuplan-devkit-v1.2",
From f991513c9f1dc3d02a5da46bd70df733c6446894 Mon Sep 17 00:00:00 2001
From: DanielDauner
Date: Sun, 24 Aug 2025 19:36:59 +0200
Subject: [PATCH 012/145] Move `geometry` out of `common` (#39)
---
d123/common/datatypes/detection/detection.py | 8 ++++----
d123/common/datatypes/vehicle_state/ego_state.py | 6 +++---
.../vehicle_state/vehicle_parameters.py | 8 ++++----
.../visualization/matplotlib/camera copy.py | 6 +++---
d123/common/visualization/matplotlib/camera.py | 4 ++--
.../visualization/matplotlib/observation.py | 6 +++---
d123/common/visualization/matplotlib/utils.py | 2 +-
d123/common/visualization/viser/utils.py | 10 +++++-----
d123/common/visualization/viser/utils_v2.py | 13 +++++++------
d123/dataset/arrow/conversion.py | 4 ++--
.../conversion/map/opendrive/parser/geometry.py | 2 +-
.../conversion/map/opendrive/parser/reference.py | 2 +-
.../map/opendrive/utils/lane_helper.py | 4 ++--
.../map/opendrive/utils/objects_helper.py | 6 +++---
.../map/road_edge/road_edge_3d_utils.py | 4 ++--
.../dataset_specific/av2/av2_data_converter.py | 10 +++++-----
.../dataset_specific/av2/av2_map_conversion.py | 8 ++++----
.../carla/carla_data_converter.py | 6 +++---
.../nuplan/nuplan_data_converter.py | 8 ++++----
.../wopd/waymo_map_utils/womp_boundary_utils.py | 12 ++++++------
.../wopd/waymo_map_utils/wopd_map_utils.py | 6 +++---
.../dataset_specific/wopd/wopd_data_converter.py | 10 +++++-----
d123/dataset/maps/abstract_map.py | 2 +-
d123/dataset/maps/abstract_map_objects.py | 2 +-
d123/dataset/maps/gpkg/gpkg_map.py | 2 +-
d123/dataset/maps/gpkg/gpkg_map_objects.py | 4 ++--
d123/{common => }/geometry/__init__.py | 0
d123/{common => }/geometry/base.py | 2 +-
d123/{common => }/geometry/base_index.py | 0
.../geometry/bounding_box/__init__.py | 0
.../geometry/bounding_box/bounding_box.py | 6 +++---
.../geometry/bounding_box/bounding_box_index.py | 0
d123/{common => }/geometry/bounding_box/utils.py | 4 ++--
d123/{common => }/geometry/constants.py | 0
d123/{common => }/geometry/line/__init__.py | 0
d123/{common => }/geometry/line/helper.py | 2 +-
d123/{common => }/geometry/line/polylines.py | 8 ++++----
d123/{common => }/geometry/occupancy_map.py | 0
d123/{common => }/geometry/transform/__init__.py | 0
d123/{common => }/geometry/transform/rotation.py | 4 ++--
.../{common => }/geometry/transform/se2_array.py | 4 ++--
d123/{common => }/geometry/transform/se3.py | 4 ++--
.../geometry/transform/tranform_2d.py | 4 ++--
d123/{common => }/geometry/units.py | 0
d123/{common => }/geometry/utils.py | 0
d123/{common => }/geometry/vector.py | 2 +-
.../config/datasets/av2_sensor_dataset.yaml | 4 ++--
.../agents/constant_velocity_agents.py | 6 +++---
d123/simulation/agents/idm_agents.py | 10 +++++-----
d123/simulation/agents/path_following.py | 8 ++++----
d123/simulation/agents/smart_agents.py | 8 ++++----
.../motion_model/kinematic_bicycle_model.py | 4 ++--
.../gym_observation/raster/raster_renderer.py | 8 ++++----
.../gym/environment/helper/environment_area.py | 6 +++---
.../gym/environment/helper/environment_cache.py | 4 ++--
d123/simulation/gym/gym_env.py | 2 +-
.../metrics/sim_agents/interaction_based.py | 4 ++--
d123/simulation/metrics/sim_agents/kinematics.py | 2 +-
d123/simulation/metrics/sim_agents/map_based.py | 8 ++++----
d123/simulation/metrics/sim_agents/sim_agents.py | 2 +-
d123/simulation/metrics/sim_agents/utils.py | 2 +-
.../planner_output/action_planner_output.py | 2 +-
.../feature_builder/smart_feature_builder.py | 8 ++++----
notebooks/av2/delete_me.ipynb | 4 ++--
notebooks/av2/delete_me_map.ipynb | 8 ++++----
notebooks/deprecated/test_scene_builder.ipynb | 4 ++--
notebooks/gym/test_gym.ipynb | 4 ++--
notebooks/scene_rendering.ipynb | 6 +++---
notebooks/smarty/smart_rollout.ipynb | 2 +-
notebooks/viz/bev_matplotlib.ipynb | 14 +++++++-------
notebooks/viz/bev_matplotlib_prediction.ipynb | 2 +-
notebooks/waymo_perception/lidar_testing.ipynb | 4 ++--
notebooks/waymo_perception/map_testing.ipynb | 16 ++++++++--------
notebooks/waymo_perception/testing.ipynb | 4 ++--
74 files changed, 176 insertions(+), 175 deletions(-)
rename d123/{common => }/geometry/__init__.py (100%)
rename d123/{common => }/geometry/base.py (99%)
rename d123/{common => }/geometry/base_index.py (100%)
rename d123/{common => }/geometry/bounding_box/__init__.py (100%)
rename d123/{common => }/geometry/bounding_box/bounding_box.py (91%)
rename d123/{common => }/geometry/bounding_box/bounding_box_index.py (100%)
rename d123/{common => }/geometry/bounding_box/utils.py (93%)
rename d123/{common => }/geometry/constants.py (100%)
rename d123/{common => }/geometry/line/__init__.py (100%)
rename d123/{common => }/geometry/line/helper.py (96%)
rename d123/{common => }/geometry/line/polylines.py (96%)
rename d123/{common => }/geometry/occupancy_map.py (100%)
rename d123/{common => }/geometry/transform/__init__.py (100%)
rename d123/{common => }/geometry/transform/rotation.py (86%)
rename d123/{common => }/geometry/transform/se2_array.py (97%)
rename d123/{common => }/geometry/transform/se3.py (98%)
rename d123/{common => }/geometry/transform/tranform_2d.py (88%)
rename d123/{common => }/geometry/units.py (100%)
rename d123/{common => }/geometry/utils.py (100%)
rename d123/{common => }/geometry/vector.py (96%)
diff --git a/d123/common/datatypes/detection/detection.py b/d123/common/datatypes/detection/detection.py
index a836fd4f..6245892f 100644
--- a/d123/common/datatypes/detection/detection.py
+++ b/d123/common/datatypes/detection/detection.py
@@ -6,11 +6,11 @@
from d123.common.datatypes.detection.detection_types import DetectionType
from d123.common.datatypes.time.time_point import TimePoint
-from d123.common.geometry.base import StateSE2, StateSE3
-from d123.common.geometry.bounding_box.bounding_box import BoundingBoxSE2, BoundingBoxSE3
-from d123.common.geometry.occupancy_map import OccupancyMap2D
-from d123.common.geometry.vector import Vector2D, Vector3D
from d123.common.utils.enums import SerialIntEnum
+from d123.geometry.base import StateSE2, StateSE3
+from d123.geometry.bounding_box.bounding_box import BoundingBoxSE2, BoundingBoxSE3
+from d123.geometry.occupancy_map import OccupancyMap2D
+from d123.geometry.vector import Vector2D, Vector3D
@dataclass
diff --git a/d123/common/datatypes/vehicle_state/ego_state.py b/d123/common/datatypes/vehicle_state/ego_state.py
index 9ee67593..0fdf236e 100644
--- a/d123/common/datatypes/vehicle_state/ego_state.py
+++ b/d123/common/datatypes/vehicle_state/ego_state.py
@@ -22,10 +22,10 @@
rear_axle_se2_to_center_se2,
rear_axle_se3_to_center_se3,
)
-from d123.common.geometry.base import StateSE2, StateSE3
-from d123.common.geometry.bounding_box.bounding_box import BoundingBoxSE2, BoundingBoxSE3
-from d123.common.geometry.vector import Vector2D, Vector3D
from d123.common.utils.enums import classproperty
+from d123.geometry.base import StateSE2, StateSE3
+from d123.geometry.bounding_box.bounding_box import BoundingBoxSE2, BoundingBoxSE3
+from d123.geometry.vector import Vector2D, Vector3D
# TODO: Find an appropriate way to handle SE2 and SE3 states.
diff --git a/d123/common/datatypes/vehicle_state/vehicle_parameters.py b/d123/common/datatypes/vehicle_state/vehicle_parameters.py
index 3b4f04b9..0cc14b7d 100644
--- a/d123/common/datatypes/vehicle_state/vehicle_parameters.py
+++ b/d123/common/datatypes/vehicle_state/vehicle_parameters.py
@@ -1,7 +1,7 @@
-from d123.common.geometry.base import StateSE2, StateSE3, dataclass
-from d123.common.geometry.transform.se3 import translate_se3_along_x, translate_se3_along_z
-from d123.common.geometry.transform.tranform_2d import translate_along_yaw
-from d123.common.geometry.vector import Vector2D
+from d123.geometry.base import StateSE2, StateSE3, dataclass
+from d123.geometry.transform.se3 import translate_se3_along_x, translate_se3_along_z
+from d123.geometry.transform.tranform_2d import translate_along_yaw
+from d123.geometry.vector import Vector2D
# TODO: Add more vehicle parameters, potentially extend the parameters.
diff --git a/d123/common/visualization/matplotlib/camera copy.py b/d123/common/visualization/matplotlib/camera copy.py
index ea758366..b44e387b 100644
--- a/d123/common/visualization/matplotlib/camera copy.py
+++ b/d123/common/visualization/matplotlib/camera copy.py
@@ -14,10 +14,10 @@
from d123.common.datatypes.detection.detection_types import DetectionType
from d123.common.datatypes.sensor.camera import Camera
from d123.common.datatypes.vehicle_state.ego_state import EgoStateSE3
-from d123.common.geometry.base import StateSE3
-from d123.common.geometry.bounding_box.bounding_box_index import BoundingBoxSE3Index, Corners3DIndex
-from d123.common.geometry.transform.se3 import convert_absolute_to_relative_se3_array, get_rotation_matrix
from d123.common.visualization.color.default import BOX_DETECTION_CONFIG
+from d123.geometry.base import StateSE3
+from d123.geometry.bounding_box.bounding_box_index import BoundingBoxSE3Index, Corners3DIndex
+from d123.geometry.transform.se3 import convert_absolute_to_relative_se3_array, get_rotation_matrix
# from navsim.common.dataclasses import Annotations, Camera, Lidar
# from navsim.common.enums import BoundingBoxIndex, LidarIndex
diff --git a/d123/common/visualization/matplotlib/camera.py b/d123/common/visualization/matplotlib/camera.py
index 52d873ae..d8412731 100644
--- a/d123/common/visualization/matplotlib/camera.py
+++ b/d123/common/visualization/matplotlib/camera.py
@@ -14,9 +14,9 @@
from d123.common.datatypes.detection.detection_types import DetectionType
from d123.common.datatypes.sensor.camera import Camera
from d123.common.datatypes.vehicle_state.ego_state import EgoStateSE3
-from d123.common.geometry.bounding_box.bounding_box_index import BoundingBoxSE3Index, Corners3DIndex
-from d123.common.geometry.transform.se3 import convert_absolute_to_relative_se3_array
from d123.common.visualization.color.default import BOX_DETECTION_CONFIG
+from d123.geometry.bounding_box.bounding_box_index import BoundingBoxSE3Index, Corners3DIndex
+from d123.geometry.transform.se3 import convert_absolute_to_relative_se3_array
# from navsim.common.dataclasses import Annotations, Camera, Lidar
# from navsim.common.enums import BoundingBoxIndex, LidarIndex
diff --git a/d123/common/visualization/matplotlib/observation.py b/d123/common/visualization/matplotlib/observation.py
index 7e225cc7..33bea28f 100644
--- a/d123/common/visualization/matplotlib/observation.py
+++ b/d123/common/visualization/matplotlib/observation.py
@@ -7,9 +7,6 @@
from d123.common.datatypes.detection.detection import BoxDetectionWrapper, TrafficLightDetectionWrapper
from d123.common.datatypes.detection.detection_types import DetectionType
from d123.common.datatypes.vehicle_state.ego_state import EgoStateSE2, EgoStateSE3
-from d123.common.geometry.base import Point2D
-from d123.common.geometry.bounding_box.bounding_box import BoundingBoxSE2, BoundingBoxSE3
-from d123.common.geometry.transform.tranform_2d import translate_along_yaw
from d123.common.visualization.color.config import PlotConfig
from d123.common.visualization.color.default import (
BOX_DETECTION_CONFIG,
@@ -29,6 +26,9 @@
from d123.dataset.maps.abstract_map_objects import AbstractLane
from d123.dataset.maps.map_datatypes import MapLayer
from d123.dataset.scene.abstract_scene import AbstractScene
+from d123.geometry.base import Point2D
+from d123.geometry.bounding_box.bounding_box import BoundingBoxSE2, BoundingBoxSE3
+from d123.geometry.transform.tranform_2d import translate_along_yaw
def add_default_map_on_ax(
diff --git a/d123/common/visualization/matplotlib/utils.py b/d123/common/visualization/matplotlib/utils.py
index 144530ab..4beff462 100644
--- a/d123/common/visualization/matplotlib/utils.py
+++ b/d123/common/visualization/matplotlib/utils.py
@@ -8,8 +8,8 @@
import shapely.geometry as geom
from matplotlib.path import Path
-from d123.common.geometry.base import StateSE2, StateSE3
from d123.common.visualization.color.config import PlotConfig
+from d123.geometry.base import StateSE2, StateSE3
def add_shapely_polygon_to_ax(
diff --git a/d123/common/visualization/viser/utils.py b/d123/common/visualization/viser/utils.py
index 561c3676..d03758ff 100644
--- a/d123/common/visualization/viser/utils.py
+++ b/d123/common/visualization/viser/utils.py
@@ -9,16 +9,16 @@
# from d123.common.datatypes.sensor.camera_parameters import get_nuplan_camera_parameters
from d123.common.datatypes.sensor.camera import Camera, CameraType
from d123.common.datatypes.sensor.lidar import LiDARType
-from d123.common.geometry.base import Point3D, StateSE3
-from d123.common.geometry.bounding_box.bounding_box import BoundingBoxSE3
-from d123.common.geometry.line.polylines import Polyline3D
-from d123.common.geometry.transform.se3 import convert_relative_to_absolute_points_3d_array
from d123.common.visualization.color.color import TAB_10, Color
from d123.common.visualization.color.config import PlotConfig
from d123.common.visualization.color.default import BOX_DETECTION_CONFIG, EGO_VEHICLE_CONFIG, MAP_SURFACE_CONFIG
from d123.dataset.maps.abstract_map import MapLayer
from d123.dataset.maps.abstract_map_objects import AbstractLane, AbstractSurfaceMapObject
from d123.dataset.scene.abstract_scene import AbstractScene
+from d123.geometry.base import Point3D, StateSE3
+from d123.geometry.bounding_box.bounding_box import BoundingBoxSE3
+from d123.geometry.line.polylines import Polyline3D
+from d123.geometry.transform.se3 import convert_relative_to_absolute_points_3d_array
# TODO: Refactor this file.
# TODO: Add general utilities for 3D primitives and mesh support.
@@ -233,7 +233,7 @@ def get_camera_values(
camera_to_ego = camera.extrinsic # 4x4 transformation from camera to ego frame
# Get the rotation matrix of the rear axle pose
- from d123.common.geometry.transform.se3 import get_rotation_matrix
+ from d123.geometry.transform.se3 import get_rotation_matrix
ego_transform = np.eye(4, dtype=np.float64)
ego_transform[:3, :3] = get_rotation_matrix(rear_axle)
diff --git a/d123/common/visualization/viser/utils_v2.py b/d123/common/visualization/viser/utils_v2.py
index 5db06ab8..f62a48f1 100644
--- a/d123/common/visualization/viser/utils_v2.py
+++ b/d123/common/visualization/viser/utils_v2.py
@@ -1,16 +1,17 @@
import numpy as np
import numpy.typing as npt
-# from d123.common.datatypes.sensor.camera_parameters import get_nuplan_camera_parameters
-from d123.common.geometry.base import Point3D, Point3DIndex
-from d123.common.geometry.bounding_box.bounding_box import BoundingBoxSE3
-from d123.common.geometry.bounding_box.bounding_box_index import Corners3DIndex
-from d123.common.geometry.transform.se3 import translate_body_frame
-from d123.common.geometry.vector import Vector3D
from d123.common.visualization.color.default import BOX_DETECTION_CONFIG, EGO_VEHICLE_CONFIG
from d123.common.visualization.viser.utils import BRIGHTNESS_FACTOR
from d123.dataset.scene.abstract_scene import AbstractScene
+# from d123.common.datatypes.sensor.camera_parameters import get_nuplan_camera_parameters
+from d123.geometry.base import Point3D, Point3DIndex
+from d123.geometry.bounding_box.bounding_box import BoundingBoxSE3
+from d123.geometry.bounding_box.bounding_box_index import Corners3DIndex
+from d123.geometry.transform.se3 import translate_body_frame
+from d123.geometry.vector import Vector3D
+
# TODO: Refactor this file.
# TODO: Add general utilities for 3D primitives and mesh support.
diff --git a/d123/dataset/arrow/conversion.py b/d123/dataset/arrow/conversion.py
index 68251f82..56b4b33b 100644
--- a/d123/dataset/arrow/conversion.py
+++ b/d123/dataset/arrow/conversion.py
@@ -25,10 +25,10 @@
from d123.common.datatypes.time.time_point import TimePoint
from d123.common.datatypes.vehicle_state.ego_state import EgoStateSE3
from d123.common.datatypes.vehicle_state.vehicle_parameters import VehicleParameters
-from d123.common.geometry.bounding_box.bounding_box import BoundingBoxSE3
-from d123.common.geometry.vector import Vector3D
from d123.dataset.logs.log_metadata import LogMetadata
from d123.dataset.maps.abstract_map import List
+from d123.geometry.bounding_box.bounding_box import BoundingBoxSE3
+from d123.geometry.vector import Vector3D
DATASET_SENSOR_ROOT: Dict[str, Path] = {
"nuplan": Path(os.environ["NUPLAN_DATA_ROOT"]) / "nuplan-v1.1" / "sensor_blobs",
diff --git a/d123/dataset/conversion/map/opendrive/parser/geometry.py b/d123/dataset/conversion/map/opendrive/parser/geometry.py
index c1f337c0..4aabd57c 100644
--- a/d123/dataset/conversion/map/opendrive/parser/geometry.py
+++ b/d123/dataset/conversion/map/opendrive/parser/geometry.py
@@ -8,7 +8,7 @@
import numpy.typing as npt
from scipy.special import fresnel
-from d123.common.geometry.base import StateSE2Index
+from d123.geometry.base import StateSE2Index
@dataclass
diff --git a/d123/dataset/conversion/map/opendrive/parser/reference.py b/d123/dataset/conversion/map/opendrive/parser/reference.py
index b4c88dc1..d8ce3b7b 100644
--- a/d123/dataset/conversion/map/opendrive/parser/reference.py
+++ b/d123/dataset/conversion/map/opendrive/parser/reference.py
@@ -9,11 +9,11 @@
import numpy as np
import numpy.typing as npt
-from d123.common.geometry.base import Point3DIndex, StateSE2Index
from d123.dataset.conversion.map.opendrive.parser.elevation import Elevation
from d123.dataset.conversion.map.opendrive.parser.geometry import Arc, Geometry, Line, Spiral
from d123.dataset.conversion.map.opendrive.parser.lane import LaneOffset, Width
from d123.dataset.conversion.map.opendrive.parser.polynomial import Polynomial
+from d123.geometry.base import Point3DIndex, StateSE2Index
TOLERANCE: Final[float] = 1e-3
diff --git a/d123/dataset/conversion/map/opendrive/utils/lane_helper.py b/d123/dataset/conversion/map/opendrive/utils/lane_helper.py
index 79e0f0c1..b6c26131 100644
--- a/d123/dataset/conversion/map/opendrive/utils/lane_helper.py
+++ b/d123/dataset/conversion/map/opendrive/utils/lane_helper.py
@@ -6,8 +6,6 @@
import numpy.typing as npt
import shapely
-from d123.common.geometry.base import StateSE2Index
-from d123.common.geometry.units import kmph_to_mps, mph_to_mps
from d123.dataset.conversion.map.opendrive.parser.lane import Lane, LaneSection
from d123.dataset.conversion.map.opendrive.parser.reference import ReferenceLine
from d123.dataset.conversion.map.opendrive.parser.road import RoadType
@@ -16,6 +14,8 @@
derive_lane_id,
lane_group_id_from_lane_id,
)
+from d123.geometry.base import StateSE2Index
+from d123.geometry.units import kmph_to_mps, mph_to_mps
@dataclass
diff --git a/d123/dataset/conversion/map/opendrive/utils/objects_helper.py b/d123/dataset/conversion/map/opendrive/utils/objects_helper.py
index 3cbb569e..da85ba5e 100644
--- a/d123/dataset/conversion/map/opendrive/utils/objects_helper.py
+++ b/d123/dataset/conversion/map/opendrive/utils/objects_helper.py
@@ -5,11 +5,11 @@
import numpy.typing as npt
import shapely
-from d123.common.geometry.base import Point2D, Point3D, Point3DIndex, StateSE2
-from d123.common.geometry.transform.tranform_2d import translate_along_yaw
-from d123.common.geometry.utils import normalize_angle
from d123.dataset.conversion.map.opendrive.parser.objects import Object
from d123.dataset.conversion.map.opendrive.parser.reference import ReferenceLine
+from d123.geometry.base import Point2D, Point3D, Point3DIndex, StateSE2
+from d123.geometry.transform.tranform_2d import translate_along_yaw
+from d123.geometry.utils import normalize_angle
# TODO: make naming consistent with group_collections.py
diff --git a/d123/dataset/conversion/map/road_edge/road_edge_3d_utils.py b/d123/dataset/conversion/map/road_edge/road_edge_3d_utils.py
index 93a34526..b2fa42e9 100644
--- a/d123/dataset/conversion/map/road_edge/road_edge_3d_utils.py
+++ b/d123/dataset/conversion/map/road_edge/road_edge_3d_utils.py
@@ -9,9 +9,9 @@
import shapely
from shapely.geometry import LineString
-from d123.common.geometry.base import Point3DIndex
-from d123.common.geometry.occupancy_map import OccupancyMap2D
from d123.dataset.conversion.map.road_edge.road_edge_2d_utils import get_road_edge_linear_rings
+from d123.geometry.base import Point3DIndex
+from d123.geometry.occupancy_map import OccupancyMap2D
logger = logging.getLogger(__name__)
diff --git a/d123/dataset/dataset_specific/av2/av2_data_converter.py b/d123/dataset/dataset_specific/av2/av2_data_converter.py
index 0ff84a7e..25b7bec6 100644
--- a/d123/dataset/dataset_specific/av2/av2_data_converter.py
+++ b/d123/dataset/dataset_specific/av2/av2_data_converter.py
@@ -19,11 +19,6 @@
get_av2_ford_fusion_hybrid_parameters,
rear_axle_se3_to_center_se3,
)
-from d123.common.geometry.base import StateSE3
-from d123.common.geometry.bounding_box.bounding_box import BoundingBoxSE3Index
-from d123.common.geometry.constants import DEFAULT_PITCH, DEFAULT_ROLL
-from d123.common.geometry.transform.se3 import convert_relative_to_absolute_se3_array, get_rotation_matrix
-from d123.common.geometry.vector import Vector3D, Vector3DIndex
from d123.common.multithreading.worker_utils import WorkerPool, worker_map
from d123.dataset.dataset_specific.av2.av2_constants import (
AV2_CAMERA_TYPE_MAPPING,
@@ -39,6 +34,11 @@
from d123.dataset.dataset_specific.av2.av2_map_conversion import convert_av2_map
from d123.dataset.dataset_specific.raw_data_converter import DataConverterConfig, RawDataConverter
from d123.dataset.logs.log_metadata import LogMetadata
+from d123.geometry.base import StateSE3
+from d123.geometry.bounding_box.bounding_box import BoundingBoxSE3Index
+from d123.geometry.constants import DEFAULT_PITCH, DEFAULT_ROLL
+from d123.geometry.transform.se3 import convert_relative_to_absolute_se3_array, get_rotation_matrix
+from d123.geometry.vector import Vector3D, Vector3DIndex
def create_token(input_data: str) -> str:
diff --git a/d123/dataset/dataset_specific/av2/av2_map_conversion.py b/d123/dataset/dataset_specific/av2/av2_map_conversion.py
index 390cbdb2..7fa53fad 100644
--- a/d123/dataset/dataset_specific/av2/av2_map_conversion.py
+++ b/d123/dataset/dataset_specific/av2/av2_map_conversion.py
@@ -1,3 +1,4 @@
+import json
import warnings
from pathlib import Path
from typing import Any, Dict, Final, List
@@ -8,17 +9,16 @@
import pandas as pd
import shapely
import shapely.geometry as geom
-from flask import json
-from d123.common.geometry.base import Point3DIndex
-from d123.common.geometry.line.polylines import Polyline2D, Polyline3D
-from d123.common.geometry.occupancy_map import OccupancyMap2D
from d123.dataset.conversion.map.road_edge.road_edge_2d_utils import split_line_geometry_by_max_length
from d123.dataset.conversion.map.road_edge.road_edge_3d_utils import (
get_road_edges_3d_from_generic_drivable_area_df,
)
from d123.dataset.dataset_specific.av2.av2_constants import AV2_ROAD_LINE_TYPE_MAPPING
from d123.dataset.maps.map_datatypes import MapLayer, RoadEdgeType
+from d123.geometry.base import Point3DIndex
+from d123.geometry.line.polylines import Polyline2D, Polyline3D
+from d123.geometry.occupancy_map import OccupancyMap2D
LANE_GROUP_MARK_TYPES: List[str] = [
"DASHED_WHITE",
diff --git a/d123/dataset/dataset_specific/carla/carla_data_converter.py b/d123/dataset/dataset_specific/carla/carla_data_converter.py
index f598bb1e..1e360007 100644
--- a/d123/dataset/dataset_specific/carla/carla_data_converter.py
+++ b/d123/dataset/dataset_specific/carla/carla_data_converter.py
@@ -16,9 +16,6 @@
from d123.common.datatypes.sensor.lidar_index import CarlaLidarIndex
from d123.common.datatypes.vehicle_state.ego_state import EgoStateSE3Index
from d123.common.datatypes.vehicle_state.vehicle_parameters import get_carla_lincoln_mkz_2020_parameters
-from d123.common.geometry.base import Point2D, Point3D
-from d123.common.geometry.bounding_box.bounding_box import BoundingBoxSE3Index
-from d123.common.geometry.vector import Vector3DIndex
from d123.common.multithreading.worker_utils import WorkerPool, worker_map
from d123.dataset.arrow.helper import open_arrow_table, write_arrow_table
from d123.dataset.conversion.map.opendrive.opendrive_map_conversion import convert_from_xodr
@@ -27,6 +24,9 @@
from d123.dataset.maps.abstract_map import AbstractMap, MapLayer
from d123.dataset.maps.abstract_map_objects import AbstractLane
from d123.dataset.scene.arrow_scene import get_map_api_from_names
+from d123.geometry.base import Point2D, Point3D
+from d123.geometry.bounding_box.bounding_box import BoundingBoxSE3Index
+from d123.geometry.vector import Vector3DIndex
AVAILABLE_CARLA_MAP_LOCATIONS: Final[List[str]] = [
"Town01", # A small, simple town with a river and several bridges.
diff --git a/d123/dataset/dataset_specific/nuplan/nuplan_data_converter.py b/d123/dataset/dataset_specific/nuplan/nuplan_data_converter.py
index 23a4d702..c65845fb 100644
--- a/d123/dataset/dataset_specific/nuplan/nuplan_data_converter.py
+++ b/d123/dataset/dataset_specific/nuplan/nuplan_data_converter.py
@@ -31,15 +31,15 @@
get_nuplan_chrysler_pacifica_parameters,
rear_axle_se3_to_center_se3,
)
-from d123.common.geometry.base import StateSE3
-from d123.common.geometry.bounding_box.bounding_box import BoundingBoxSE3, BoundingBoxSE3Index
-from d123.common.geometry.constants import DEFAULT_PITCH, DEFAULT_ROLL
-from d123.common.geometry.vector import Vector3D, Vector3DIndex
from d123.common.multithreading.worker_utils import WorkerPool, worker_map
from d123.dataset.arrow.helper import open_arrow_table, write_arrow_table
from d123.dataset.dataset_specific.nuplan.nuplan_map_conversion import MAP_LOCATIONS, NuPlanMapConverter
from d123.dataset.dataset_specific.raw_data_converter import DataConverterConfig, RawDataConverter
from d123.dataset.logs.log_metadata import LogMetadata
+from d123.geometry.base import StateSE3
+from d123.geometry.bounding_box.bounding_box import BoundingBoxSE3, BoundingBoxSE3Index
+from d123.geometry.constants import DEFAULT_PITCH, DEFAULT_ROLL
+from d123.geometry.vector import Vector3D, Vector3DIndex
TARGET_DT: Final[float] = 0.1
NUPLAN_DT: Final[float] = 0.05
diff --git a/d123/dataset/dataset_specific/wopd/waymo_map_utils/womp_boundary_utils.py b/d123/dataset/dataset_specific/wopd/waymo_map_utils/womp_boundary_utils.py
index a2ba1ee2..4c251252 100644
--- a/d123/dataset/dataset_specific/wopd/waymo_map_utils/womp_boundary_utils.py
+++ b/d123/dataset/dataset_specific/wopd/waymo_map_utils/womp_boundary_utils.py
@@ -5,12 +5,12 @@
import numpy.typing as npt
import shapely.geometry as geom
-from d123.common.geometry.base import Point3D, StateSE2
-from d123.common.geometry.line.polylines import Polyline3D, PolylineSE2
-from d123.common.geometry.occupancy_map import OccupancyMap2D
-from d123.common.geometry.transform.tranform_2d import translate_along_yaw
-from d123.common.geometry.utils import normalize_angle
-from d123.common.geometry.vector import Vector2D
+from d123.geometry.base import Point3D, StateSE2
+from d123.geometry.line.polylines import Polyline3D, PolylineSE2
+from d123.geometry.occupancy_map import OccupancyMap2D
+from d123.geometry.transform.tranform_2d import translate_along_yaw
+from d123.geometry.utils import normalize_angle
+from d123.geometry.vector import Vector2D
MAX_LANE_WIDTH = 25.0 # meters
MIN_LANE_WIDTH = 2.0
diff --git a/d123/dataset/dataset_specific/wopd/waymo_map_utils/wopd_map_utils.py b/d123/dataset/dataset_specific/wopd/waymo_map_utils/wopd_map_utils.py
index 6d3ef7ab..20a2be00 100644
--- a/d123/dataset/dataset_specific/wopd/waymo_map_utils/wopd_map_utils.py
+++ b/d123/dataset/dataset_specific/wopd/waymo_map_utils/wopd_map_utils.py
@@ -9,11 +9,11 @@
import shapely.geometry as geom
from waymo_open_dataset import dataset_pb2
-from d123.common.geometry.base import Point3DIndex
-from d123.common.geometry.line.polylines import Polyline3D
-from d123.common.geometry.units import mph_to_mps
from d123.dataset.dataset_specific.wopd.waymo_map_utils.womp_boundary_utils import extract_lane_boundaries
from d123.dataset.maps.map_datatypes import MapLayer, RoadEdgeType, RoadLineType
+from d123.geometry.base import Point3DIndex
+from d123.geometry.line.polylines import Polyline3D
+from d123.geometry.units import mph_to_mps
# TODO:
# - Implement stop signs
diff --git a/d123/dataset/dataset_specific/wopd/wopd_data_converter.py b/d123/dataset/dataset_specific/wopd/wopd_data_converter.py
index f1145c8e..77f19ead 100644
--- a/d123/dataset/dataset_specific/wopd/wopd_data_converter.py
+++ b/d123/dataset/dataset_specific/wopd/wopd_data_converter.py
@@ -21,17 +21,17 @@
from d123.common.datatypes.sensor.lidar_index import WopdLidarIndex
from d123.common.datatypes.vehicle_state.ego_state import DynamicStateSE3, EgoStateSE3, EgoStateSE3Index
from d123.common.datatypes.vehicle_state.vehicle_parameters import get_wopd_chrysler_pacifica_parameters
-from d123.common.geometry.base import Point3D, StateSE3
-from d123.common.geometry.bounding_box.bounding_box import BoundingBoxSE3Index
-from d123.common.geometry.constants import DEFAULT_PITCH, DEFAULT_ROLL
-from d123.common.geometry.transform.se3 import convert_relative_to_absolute_se3_array, get_rotation_matrix
-from d123.common.geometry.vector import Vector3D, Vector3DIndex
from d123.common.multithreading.worker_utils import WorkerPool, worker_map
from d123.dataset.arrow.helper import open_arrow_table, write_arrow_table
from d123.dataset.dataset_specific.raw_data_converter import DataConverterConfig, RawDataConverter
from d123.dataset.dataset_specific.wopd.waymo_map_utils.wopd_map_utils import convert_wopd_map
from d123.dataset.dataset_specific.wopd.wopd_utils import parse_range_image_and_camera_projection
from d123.dataset.logs.log_metadata import LogMetadata
+from d123.geometry.base import Point3D, StateSE3
+from d123.geometry.bounding_box.bounding_box import BoundingBoxSE3Index
+from d123.geometry.constants import DEFAULT_PITCH, DEFAULT_ROLL
+from d123.geometry.transform.se3 import convert_relative_to_absolute_se3_array, get_rotation_matrix
+from d123.geometry.vector import Vector3D, Vector3DIndex
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
D123_MAPS_ROOT = Path(os.environ.get("D123_MAPS_ROOT"))
diff --git a/d123/dataset/maps/abstract_map.py b/d123/dataset/maps/abstract_map.py
index 681c90cf..f8c547e7 100644
--- a/d123/dataset/maps/abstract_map.py
+++ b/d123/dataset/maps/abstract_map.py
@@ -5,9 +5,9 @@
import shapely
-from d123.common.geometry.base import Point2D
from d123.dataset.maps.abstract_map_objects import AbstractMapObject
from d123.dataset.maps.map_datatypes import MapLayer
+from d123.geometry.base import Point2D
# TODO:
# - add docstrings
diff --git a/d123/dataset/maps/abstract_map_objects.py b/d123/dataset/maps/abstract_map_objects.py
index 96ce79de..a1696009 100644
--- a/d123/dataset/maps/abstract_map_objects.py
+++ b/d123/dataset/maps/abstract_map_objects.py
@@ -6,8 +6,8 @@
import shapely.geometry as geom
import trimesh
-from d123.common.geometry.line.polylines import Polyline2D, Polyline3D, PolylineSE2
from d123.dataset.maps.map_datatypes import MapLayer, RoadEdgeType, RoadLineType
+from d123.geometry.line.polylines import Polyline2D, Polyline3D, PolylineSE2
class AbstractMapObject(abc.ABC):
diff --git a/d123/dataset/maps/gpkg/gpkg_map.py b/d123/dataset/maps/gpkg/gpkg_map.py
index 32b02d41..f1fead93 100644
--- a/d123/dataset/maps/gpkg/gpkg_map.py
+++ b/d123/dataset/maps/gpkg/gpkg_map.py
@@ -11,7 +11,6 @@
import shapely
import shapely.geometry as geom
-from d123.common.geometry.base import Point2D
from d123.dataset.maps.abstract_map import AbstractMap
from d123.dataset.maps.abstract_map_objects import AbstractMapObject
from d123.dataset.maps.gpkg.gpkg_map_objects import (
@@ -27,6 +26,7 @@
)
from d123.dataset.maps.gpkg.utils import load_gdf_with_geometry_columns
from d123.dataset.maps.map_datatypes import MapLayer
+from d123.geometry.base import Point2D
USE_ARROW: bool = True
diff --git a/d123/dataset/maps/gpkg/gpkg_map_objects.py b/d123/dataset/maps/gpkg/gpkg_map_objects.py
index 1fa6de6a..4641cd97 100644
--- a/d123/dataset/maps/gpkg/gpkg_map_objects.py
+++ b/d123/dataset/maps/gpkg/gpkg_map_objects.py
@@ -10,8 +10,6 @@
import shapely.geometry as geom
import trimesh
-from d123.common.geometry.base import Point3DIndex
-from d123.common.geometry.line.polylines import Polyline3D
from d123.common.visualization.viser.utils import get_trimesh_from_boundaries
from d123.dataset.maps.abstract_map_objects import (
AbstractCarpark,
@@ -28,6 +26,8 @@
)
from d123.dataset.maps.gpkg.utils import get_row_with_value
from d123.dataset.maps.map_datatypes import RoadEdgeType, RoadLineType
+from d123.geometry.base import Point3DIndex
+from d123.geometry.line.polylines import Polyline3D
class GPKGSurfaceObject(AbstractSurfaceMapObject):
diff --git a/d123/common/geometry/__init__.py b/d123/geometry/__init__.py
similarity index 100%
rename from d123/common/geometry/__init__.py
rename to d123/geometry/__init__.py
diff --git a/d123/common/geometry/base.py b/d123/geometry/base.py
similarity index 99%
rename from d123/common/geometry/base.py
rename to d123/geometry/base.py
index 7fdec5cc..b531f499 100644
--- a/d123/common/geometry/base.py
+++ b/d123/geometry/base.py
@@ -8,7 +8,7 @@
import numpy.typing as npt
import shapely.geometry as geom
-# from d123.common.geometry.transform.se3 import get_rotation_matrix
+# from d123.geometry.transform.se3 import get_rotation_matrix
from d123.common.utils.enums import classproperty
# TODO: Reconsider if 2D/3D or SE2/SE3 structure would be better hierarchical, e.g. inheritance or composition.
diff --git a/d123/common/geometry/base_index.py b/d123/geometry/base_index.py
similarity index 100%
rename from d123/common/geometry/base_index.py
rename to d123/geometry/base_index.py
diff --git a/d123/common/geometry/bounding_box/__init__.py b/d123/geometry/bounding_box/__init__.py
similarity index 100%
rename from d123/common/geometry/bounding_box/__init__.py
rename to d123/geometry/bounding_box/__init__.py
diff --git a/d123/common/geometry/bounding_box/bounding_box.py b/d123/geometry/bounding_box/bounding_box.py
similarity index 91%
rename from d123/common/geometry/bounding_box/bounding_box.py
rename to d123/geometry/bounding_box/bounding_box.py
index 625d24d9..ddf7ec15 100644
--- a/d123/common/geometry/bounding_box/bounding_box.py
+++ b/d123/geometry/bounding_box/bounding_box.py
@@ -7,9 +7,9 @@
import numpy.typing as npt
import shapely
-from d123.common.geometry.base import StateSE2, StateSE3
-from d123.common.geometry.bounding_box.bounding_box_index import BoundingBoxSE2Index, BoundingBoxSE3Index
-from d123.common.geometry.bounding_box.utils import bbse2_array_to_corners_array
+from d123.geometry.base import StateSE2, StateSE3
+from d123.geometry.bounding_box.bounding_box_index import BoundingBoxSE2Index, BoundingBoxSE3Index
+from d123.geometry.bounding_box.utils import bbse2_array_to_corners_array
# TODO: Reconsider naming SE2 and SE3 hierarchies. E.g. would inheritance be a better approach?
diff --git a/d123/common/geometry/bounding_box/bounding_box_index.py b/d123/geometry/bounding_box/bounding_box_index.py
similarity index 100%
rename from d123/common/geometry/bounding_box/bounding_box_index.py
rename to d123/geometry/bounding_box/bounding_box_index.py
diff --git a/d123/common/geometry/bounding_box/utils.py b/d123/geometry/bounding_box/utils.py
similarity index 93%
rename from d123/common/geometry/bounding_box/utils.py
rename to d123/geometry/bounding_box/utils.py
index df205565..5da3530f 100644
--- a/d123/common/geometry/bounding_box/utils.py
+++ b/d123/geometry/bounding_box/utils.py
@@ -2,8 +2,8 @@
import numpy.typing as npt
import shapely
-from d123.common.geometry.base import Point2DIndex
-from d123.common.geometry.bounding_box.bounding_box_index import BoundingBoxSE2Index, Corners2DIndex
+from d123.geometry.base import Point2DIndex
+from d123.geometry.bounding_box.bounding_box_index import BoundingBoxSE2Index, Corners2DIndex
def bbse2_array_to_corners_array(bbse2: npt.NDArray[np.float64]) -> npt.NDArray[np.float64]:
diff --git a/d123/common/geometry/constants.py b/d123/geometry/constants.py
similarity index 100%
rename from d123/common/geometry/constants.py
rename to d123/geometry/constants.py
diff --git a/d123/common/geometry/line/__init__.py b/d123/geometry/line/__init__.py
similarity index 100%
rename from d123/common/geometry/line/__init__.py
rename to d123/geometry/line/__init__.py
diff --git a/d123/common/geometry/line/helper.py b/d123/geometry/line/helper.py
similarity index 96%
rename from d123/common/geometry/line/helper.py
rename to d123/geometry/line/helper.py
index 99036004..c3fc0f1a 100644
--- a/d123/common/geometry/line/helper.py
+++ b/d123/geometry/line/helper.py
@@ -2,7 +2,7 @@
import numpy.typing as npt
from shapely.geometry import LineString
-from d123.common.geometry.base import Point2DIndex, StateSE2Index
+from d123.geometry.base import Point2DIndex, StateSE2Index
def get_linestring_yaws(linestring: LineString) -> npt.NDArray[np.float64]:
diff --git a/d123/common/geometry/line/polylines.py b/d123/geometry/line/polylines.py
similarity index 96%
rename from d123/common/geometry/line/polylines.py
rename to d123/geometry/line/polylines.py
index 37b799b5..78a417cd 100644
--- a/d123/common/geometry/line/polylines.py
+++ b/d123/geometry/line/polylines.py
@@ -9,10 +9,10 @@
import shapely.geometry as geom
from scipy.interpolate import interp1d
-from d123.common.geometry.base import Point2D, Point2DIndex, Point3D, Point3DIndex, StateSE2, StateSE2Index
-from d123.common.geometry.constants import DEFAULT_Z
-from d123.common.geometry.line.helper import get_linestring_yaws, get_path_progress
-from d123.common.geometry.utils import normalize_angle
+from d123.geometry.base import Point2D, Point2DIndex, Point3D, Point3DIndex, StateSE2, StateSE2Index
+from d123.geometry.constants import DEFAULT_Z
+from d123.geometry.line.helper import get_linestring_yaws, get_path_progress
+from d123.geometry.utils import normalize_angle
# TODO: Implement PolylineSE3
# TODO: Benchmark interpolation performance and reconsider reliance on LineString
diff --git a/d123/common/geometry/occupancy_map.py b/d123/geometry/occupancy_map.py
similarity index 100%
rename from d123/common/geometry/occupancy_map.py
rename to d123/geometry/occupancy_map.py
diff --git a/d123/common/geometry/transform/__init__.py b/d123/geometry/transform/__init__.py
similarity index 100%
rename from d123/common/geometry/transform/__init__.py
rename to d123/geometry/transform/__init__.py
diff --git a/d123/common/geometry/transform/rotation.py b/d123/geometry/transform/rotation.py
similarity index 86%
rename from d123/common/geometry/transform/rotation.py
rename to d123/geometry/transform/rotation.py
index 03072609..a99a2fc5 100644
--- a/d123/common/geometry/transform/rotation.py
+++ b/d123/geometry/transform/rotation.py
@@ -1,8 +1,8 @@
# import numpy as np
# import numpy.typing as npt
-# from d123.common.geometry.base import Point3DIndex, StateSE3, StateSE3Index
-# from d123.common.geometry.vector import Vector3D
+# from d123.geometry.base import Point3DIndex, StateSE3, StateSE3Index
+# from d123.geometry.vector import Vector3D
# def get_roll_pitch_yaw_from_rotation_matrix(
diff --git a/d123/common/geometry/transform/se2_array.py b/d123/geometry/transform/se2_array.py
similarity index 97%
rename from d123/common/geometry/transform/se2_array.py
rename to d123/geometry/transform/se2_array.py
index c225a325..5112ea34 100644
--- a/d123/common/geometry/transform/se2_array.py
+++ b/d123/geometry/transform/se2_array.py
@@ -3,8 +3,8 @@
import numpy as np
import numpy.typing as npt
-from d123.common.geometry.base import StateSE2, StateSE2Index
-from d123.common.geometry.line.polylines import normalize_angle
+from d123.geometry.base import StateSE2, StateSE2Index
+from d123.geometry.line.polylines import normalize_angle
# TODO: Refactor 2D and 3D transform functions in a more consistent and general way.
diff --git a/d123/common/geometry/transform/se3.py b/d123/geometry/transform/se3.py
similarity index 98%
rename from d123/common/geometry/transform/se3.py
rename to d123/geometry/transform/se3.py
index e61451b4..42fae1c2 100644
--- a/d123/common/geometry/transform/se3.py
+++ b/d123/geometry/transform/se3.py
@@ -1,8 +1,8 @@
import numpy as np
import numpy.typing as npt
-from d123.common.geometry.base import Point3DIndex, StateSE3, StateSE3Index
-from d123.common.geometry.vector import Vector3D
+from d123.geometry.base import Point3DIndex, StateSE3, StateSE3Index
+from d123.geometry.vector import Vector3D
# def get_rotation_matrix(state_se3: StateSE3) -> npt.NDArray[np.float64]:
# R_x = np.array(
diff --git a/d123/common/geometry/transform/tranform_2d.py b/d123/geometry/transform/tranform_2d.py
similarity index 88%
rename from d123/common/geometry/transform/tranform_2d.py
rename to d123/geometry/transform/tranform_2d.py
index fa8151bc..18998709 100644
--- a/d123/common/geometry/transform/tranform_2d.py
+++ b/d123/geometry/transform/tranform_2d.py
@@ -1,8 +1,8 @@
import numpy as np
import numpy.typing as npt
-from d123.common.geometry.base import StateSE2
-from d123.common.geometry.vector import Vector2D
+from d123.geometry.base import StateSE2
+from d123.geometry.vector import Vector2D
# TODO: Refactor 2D and 3D transform functions in a more consistent and general way.
diff --git a/d123/common/geometry/units.py b/d123/geometry/units.py
similarity index 100%
rename from d123/common/geometry/units.py
rename to d123/geometry/units.py
diff --git a/d123/common/geometry/utils.py b/d123/geometry/utils.py
similarity index 100%
rename from d123/common/geometry/utils.py
rename to d123/geometry/utils.py
diff --git a/d123/common/geometry/vector.py b/d123/geometry/vector.py
similarity index 96%
rename from d123/common/geometry/vector.py
rename to d123/geometry/vector.py
index ef0e4cfa..dad25329 100644
--- a/d123/common/geometry/vector.py
+++ b/d123/geometry/vector.py
@@ -5,7 +5,7 @@
import numpy as np
import numpy.typing as npt
-from d123.common.geometry.base import Point2D, Point3D, Point3DIndex
+from d123.geometry.base import Point2D, Point3D, Point3DIndex
class Vector2DIndex(IntEnum):
diff --git a/d123/script/config/datasets/av2_sensor_dataset.yaml b/d123/script/config/datasets/av2_sensor_dataset.yaml
index 4f9a95ec..58a64f7a 100644
--- a/d123/script/config/datasets/av2_sensor_dataset.yaml
+++ b/d123/script/config/datasets/av2_sensor_dataset.yaml
@@ -3,7 +3,7 @@ av2_sensor_dataset:
_convert_: 'all'
splits: ["av2-sensor-mini_train"]
- log_path: "/media/nvme1/argoverse"
+ log_path: "/mnt/elements_0/argoverse"
data_converter_config:
_target_: d123.dataset.dataset_specific.raw_data_converter.DataConverterConfig
@@ -12,5 +12,5 @@ av2_sensor_dataset:
output_path: ${d123_data_root}
force_log_conversion: ${force_log_conversion}
force_map_conversion: ${force_map_conversion}
- camera_store_option: "binary"
+ camera_store_option: "path"
lidar_store_option: null
diff --git a/d123/simulation/agents/constant_velocity_agents.py b/d123/simulation/agents/constant_velocity_agents.py
index 75f4e343..b9a4b587 100644
--- a/d123/simulation/agents/constant_velocity_agents.py
+++ b/d123/simulation/agents/constant_velocity_agents.py
@@ -3,11 +3,11 @@
from typing import List, Optional
from d123.common.datatypes.detection.detection import BoxDetection, BoxDetectionSE2
-from d123.common.geometry.base import Point2D
-from d123.common.geometry.bounding_box.bounding_box import BoundingBoxSE2
-from d123.common.geometry.transform.tranform_2d import translate_along_yaw
from d123.dataset.maps.abstract_map import AbstractMap
from d123.dataset.scene.abstract_scene import AbstractScene
+from d123.geometry.base import Point2D
+from d123.geometry.bounding_box.bounding_box import BoundingBoxSE2
+from d123.geometry.transform.tranform_2d import translate_along_yaw
from d123.simulation.agents.abstract_agents import AbstractAgents
diff --git a/d123/simulation/agents/idm_agents.py b/d123/simulation/agents/idm_agents.py
index 4e9159f5..118355a1 100644
--- a/d123/simulation/agents/idm_agents.py
+++ b/d123/simulation/agents/idm_agents.py
@@ -7,14 +7,14 @@
from shapely.geometry import CAP_STYLE, Polygon
from d123.common.datatypes.detection.detection import BoxDetection, BoxDetectionSE2
-from d123.common.geometry.base import Point2D, StateSE2
-from d123.common.geometry.bounding_box.bounding_box import BoundingBoxSE2
-from d123.common.geometry.line.polylines import PolylineSE2
-from d123.common.geometry.transform.tranform_2d import translate_along_yaw
-from d123.common.geometry.vector import Vector2D
from d123.dataset.arrow.conversion import BoxDetectionWrapper
from d123.dataset.maps.abstract_map import AbstractMap
from d123.dataset.scene.abstract_scene import AbstractScene
+from d123.geometry.base import Point2D, StateSE2
+from d123.geometry.bounding_box.bounding_box import BoundingBoxSE2
+from d123.geometry.line.polylines import PolylineSE2
+from d123.geometry.transform.tranform_2d import translate_along_yaw
+from d123.geometry.vector import Vector2D
from d123.simulation.agents.abstract_agents import AbstractAgents
diff --git a/d123/simulation/agents/path_following.py b/d123/simulation/agents/path_following.py
index bc4ec577..357aac41 100644
--- a/d123/simulation/agents/path_following.py
+++ b/d123/simulation/agents/path_following.py
@@ -3,12 +3,12 @@
from typing import Dict, List, Optional
from d123.common.datatypes.detection.detection import BoxDetection, BoxDetectionSE2
-from d123.common.geometry.base import Point2D, StateSE2
-from d123.common.geometry.bounding_box.bounding_box import BoundingBoxSE2
-from d123.common.geometry.line.polylines import PolylineSE2
-from d123.common.geometry.transform.tranform_2d import translate_along_yaw
from d123.dataset.maps.abstract_map import AbstractMap
from d123.dataset.scene.abstract_scene import AbstractScene
+from d123.geometry.base import Point2D, StateSE2
+from d123.geometry.bounding_box.bounding_box import BoundingBoxSE2
+from d123.geometry.line.polylines import PolylineSE2
+from d123.geometry.transform.tranform_2d import translate_along_yaw
from d123.simulation.agents.abstract_agents import AbstractAgents
diff --git a/d123/simulation/agents/smart_agents.py b/d123/simulation/agents/smart_agents.py
index 18c1b48e..2b20a496 100644
--- a/d123/simulation/agents/smart_agents.py
+++ b/d123/simulation/agents/smart_agents.py
@@ -6,13 +6,13 @@
from torch_geometric.data import HeteroData
from d123.common.datatypes.detection.detection import BoxDetection, BoxDetectionSE2
-from d123.common.geometry.base import StateSE2
-from d123.common.geometry.bounding_box.bounding_box import BoundingBoxSE2
-from d123.common.geometry.transform.se2_array import convert_relative_to_absolute_point_2d_array
-from d123.common.geometry.utils import normalize_angle
from d123.dataset.arrow.conversion import BoxDetectionWrapper, DetectionType
from d123.dataset.maps.abstract_map import AbstractMap
from d123.dataset.scene.abstract_scene import AbstractScene
+from d123.geometry.base import StateSE2
+from d123.geometry.bounding_box.bounding_box import BoundingBoxSE2
+from d123.geometry.transform.se2_array import convert_relative_to_absolute_point_2d_array
+from d123.geometry.utils import normalize_angle
from d123.simulation.agents.abstract_agents import AbstractAgents
from d123.training.feature_builder.smart_feature_builder import SMARTFeatureBuilder
from d123.training.models.sim_agent.smart.datamodules.target_builder import _numpy_dict_to_torch
diff --git a/d123/simulation/controller/motion_model/kinematic_bicycle_model.py b/d123/simulation/controller/motion_model/kinematic_bicycle_model.py
index 8a4d9802..c1f343da 100644
--- a/d123/simulation/controller/motion_model/kinematic_bicycle_model.py
+++ b/d123/simulation/controller/motion_model/kinematic_bicycle_model.py
@@ -3,8 +3,8 @@
from d123.common.datatypes.time.time_point import TimeDuration, TimePoint
from d123.common.datatypes.vehicle_state.ego_state import DynamicStateSE2, EgoStateSE2
-from d123.common.geometry.base import StateSE2
-from d123.common.geometry.vector import Vector2D
+from d123.geometry.base import StateSE2
+from d123.geometry.vector import Vector2D
from d123.simulation.controller.motion_model.abstract_motion_model import AbstractMotionModel
diff --git a/d123/simulation/gym/environment/gym_observation/raster/raster_renderer.py b/d123/simulation/gym/environment/gym_observation/raster/raster_renderer.py
index c5a4d57a..604626c9 100644
--- a/d123/simulation/gym/environment/gym_observation/raster/raster_renderer.py
+++ b/d123/simulation/gym/environment/gym_observation/raster/raster_renderer.py
@@ -12,10 +12,10 @@
from shapely.affinity import scale as shapely_scale
from d123.common.datatypes.detection.detection import BoxDetectionSE2, TrafficLightStatus
-from d123.common.geometry.base import StateSE2
-from d123.common.geometry.transform.se2_array import convert_absolute_to_relative_point_2d_array
-from d123.common.geometry.transform.tranform_2d import translate_along_yaw
-from d123.common.geometry.vector import Vector2D
+from d123.geometry.base import StateSE2
+from d123.geometry.transform.se2_array import convert_absolute_to_relative_point_2d_array
+from d123.geometry.transform.tranform_2d import translate_along_yaw
+from d123.geometry.vector import Vector2D
from d123.simulation.gym.environment.helper.environment_area import AbstractEnvironmentArea, RectangleEnvironmentArea
from d123.simulation.gym.environment.helper.environment_cache import BoxDetectionCache, MapCache
diff --git a/d123/simulation/gym/environment/helper/environment_area.py b/d123/simulation/gym/environment/helper/environment_area.py
index 03f87cd1..db247458 100644
--- a/d123/simulation/gym/environment/helper/environment_area.py
+++ b/d123/simulation/gym/environment/helper/environment_area.py
@@ -3,9 +3,9 @@
from shapely import Polygon
-from d123.common.geometry.base import StateSE2
-from d123.common.geometry.transform.tranform_2d import translate_along_yaw
-from d123.common.geometry.vector import Vector2D
+from d123.geometry.base import StateSE2
+from d123.geometry.transform.tranform_2d import translate_along_yaw
+from d123.geometry.vector import Vector2D
class AbstractEnvironmentArea(ABC):
diff --git a/d123/simulation/gym/environment/helper/environment_cache.py b/d123/simulation/gym/environment/helper/environment_cache.py
index 55fbea92..70a3dcdf 100644
--- a/d123/simulation/gym/environment/helper/environment_cache.py
+++ b/d123/simulation/gym/environment/helper/environment_cache.py
@@ -14,8 +14,6 @@
from d123.common.datatypes.detection.detection_types import DetectionType
from d123.common.datatypes.recording.detection_recording import DetectionRecording
from d123.common.datatypes.vehicle_state.ego_state import EgoStateSE2
-from d123.common.geometry.base import StateSE2
-from d123.common.geometry.occupancy_map import OccupancyMap2D
from d123.dataset.maps.abstract_map import AbstractMap
from d123.dataset.maps.abstract_map_objects import (
AbstractCarpark,
@@ -26,6 +24,8 @@
AbstractStopLine,
)
from d123.dataset.maps.map_datatypes import MapLayer
+from d123.geometry.base import StateSE2
+from d123.geometry.occupancy_map import OccupancyMap2D
from d123.simulation.gym.environment.helper.environment_area import AbstractEnvironmentArea
from d123.simulation.planning.abstract_planner import PlannerInitialization, PlannerInput
diff --git a/d123/simulation/gym/gym_env.py b/d123/simulation/gym/gym_env.py
index b1ce0746..96672d16 100644
--- a/d123/simulation/gym/gym_env.py
+++ b/d123/simulation/gym/gym_env.py
@@ -5,9 +5,9 @@
from d123.common.datatypes.recording.detection_recording import DetectionRecording
from d123.common.datatypes.vehicle_state.ego_state import DynamicStateSE2, EgoStateSE2
-from d123.common.geometry.vector import Vector2D
from d123.dataset.maps.abstract_map import AbstractMap
from d123.dataset.scene.abstract_scene import AbstractScene
+from d123.geometry.vector import Vector2D
from d123.simulation.controller.motion_model.kinematic_bicycle_model import KinematicBicycleModel
from d123.simulation.observation.abstract_observation import AbstractObservation
from d123.simulation.observation.log_replay_observation import LogReplayObservation
diff --git a/d123/simulation/metrics/sim_agents/interaction_based.py b/d123/simulation/metrics/sim_agents/interaction_based.py
index 76474d55..05bae92e 100644
--- a/d123/simulation/metrics/sim_agents/interaction_based.py
+++ b/d123/simulation/metrics/sim_agents/interaction_based.py
@@ -3,9 +3,9 @@
import numpy as np
import numpy.typing as npt
-from d123.common.geometry.bounding_box.bounding_box_index import BoundingBoxSE2Index
-from d123.common.geometry.bounding_box.utils import bbse2_array_to_polygon_array
from d123.dataset.arrow.conversion import BoxDetectionWrapper
+from d123.geometry.bounding_box.bounding_box_index import BoundingBoxSE2Index
+from d123.geometry.bounding_box.utils import bbse2_array_to_polygon_array
MAX_OBJECT_DISTANCE: Final[float] = 50.0
diff --git a/d123/simulation/metrics/sim_agents/kinematics.py b/d123/simulation/metrics/sim_agents/kinematics.py
index fb599db0..9d032360 100644
--- a/d123/simulation/metrics/sim_agents/kinematics.py
+++ b/d123/simulation/metrics/sim_agents/kinematics.py
@@ -2,7 +2,7 @@
import numpy.typing as npt
from scipy.signal import savgol_filter
-from d123.common.geometry.bounding_box.bounding_box_index import BoundingBoxSE2Index
+from d123.geometry.bounding_box.bounding_box_index import BoundingBoxSE2Index
SECONDS_PER_ITERATION = 0.1
diff --git a/d123/simulation/metrics/sim_agents/map_based.py b/d123/simulation/metrics/sim_agents/map_based.py
index dbd9a16d..5342be6e 100644
--- a/d123/simulation/metrics/sim_agents/map_based.py
+++ b/d123/simulation/metrics/sim_agents/map_based.py
@@ -4,13 +4,13 @@
import numpy.typing as npt
import shapely
-from d123.common.geometry.base import StateSE2, StateSE2Index
-from d123.common.geometry.bounding_box.bounding_box_index import BoundingBoxSE2Index
-from d123.common.geometry.bounding_box.utils import Corners2DIndex, bbse2_array_to_corners_array
-from d123.common.geometry.utils import normalize_angle
from d123.dataset.maps.abstract_map import AbstractMap
from d123.dataset.maps.abstract_map_objects import AbstractLane
from d123.dataset.maps.map_datatypes import MapLayer
+from d123.geometry.base import StateSE2, StateSE2Index
+from d123.geometry.bounding_box.bounding_box_index import BoundingBoxSE2Index
+from d123.geometry.bounding_box.utils import Corners2DIndex, bbse2_array_to_corners_array
+from d123.geometry.utils import normalize_angle
MAX_LANE_CENTER_DISTANCE: Final[float] = 10.0
diff --git a/d123/simulation/metrics/sim_agents/sim_agents.py b/d123/simulation/metrics/sim_agents/sim_agents.py
index 94eeed2c..7f9f372e 100644
--- a/d123/simulation/metrics/sim_agents/sim_agents.py
+++ b/d123/simulation/metrics/sim_agents/sim_agents.py
@@ -5,9 +5,9 @@
import numpy.typing as npt
from d123.common.datatypes.detection.detection import BoxDetection, BoxDetectionWrapper, DetectionType
-from d123.common.geometry.bounding_box.bounding_box_index import BoundingBoxSE2Index
from d123.dataset.maps.abstract_map import AbstractMap
from d123.dataset.scene.abstract_scene import AbstractScene
+from d123.geometry.bounding_box.bounding_box_index import BoundingBoxSE2Index
from d123.simulation.metrics.sim_agents.histogram_metric import (
BinaryHistogramIntersectionMetric,
HistogramIntersectionMetric,
diff --git a/d123/simulation/metrics/sim_agents/utils.py b/d123/simulation/metrics/sim_agents/utils.py
index cf40c5a6..2923038a 100644
--- a/d123/simulation/metrics/sim_agents/utils.py
+++ b/d123/simulation/metrics/sim_agents/utils.py
@@ -4,8 +4,8 @@
import numpy.typing as npt
from d123.common.datatypes.detection.detection import BoxDetectionWrapper
-from d123.common.geometry.bounding_box.bounding_box_index import BoundingBoxSE2Index
from d123.dataset.scene.abstract_scene import AbstractScene
+from d123.geometry.bounding_box.bounding_box_index import BoundingBoxSE2Index
def _get_log_agents_array(
diff --git a/d123/simulation/planning/planner_output/action_planner_output.py b/d123/simulation/planning/planner_output/action_planner_output.py
index 0a62ed98..6d1ff54a 100644
--- a/d123/simulation/planning/planner_output/action_planner_output.py
+++ b/d123/simulation/planning/planner_output/action_planner_output.py
@@ -1,5 +1,5 @@
from d123.common.datatypes.vehicle_state.ego_state import DynamicStateSE2, EgoStateSE2
-from d123.common.geometry.vector import Vector2D
+from d123.geometry.vector import Vector2D
from d123.simulation.planning.planner_output.abstract_planner_output import AbstractPlannerOutput
diff --git a/d123/training/feature_builder/smart_feature_builder.py b/d123/training/feature_builder/smart_feature_builder.py
index 829f0498..806350bf 100644
--- a/d123/training/feature_builder/smart_feature_builder.py
+++ b/d123/training/feature_builder/smart_feature_builder.py
@@ -7,10 +7,6 @@
from d123.common.datatypes.detection.detection import BoxDetection, BoxDetectionWrapper
from d123.common.datatypes.detection.detection_types import DetectionType
-from d123.common.geometry.base import StateSE2, StateSE2Index
-from d123.common.geometry.bounding_box.bounding_box import BoundingBoxSE2
-from d123.common.geometry.line.polylines import PolylineSE2
-from d123.common.geometry.transform.se2_array import convert_absolute_to_relative_se2_array
from d123.common.visualization.color.default import TrafficLightStatus
from d123.dataset.maps.abstract_map import MapLayer
from d123.dataset.maps.abstract_map_objects import (
@@ -20,6 +16,10 @@
AbstractLaneGroup,
)
from d123.dataset.scene.abstract_scene import AbstractScene
+from d123.geometry.base import StateSE2, StateSE2Index
+from d123.geometry.bounding_box.bounding_box import BoundingBoxSE2
+from d123.geometry.line.polylines import PolylineSE2
+from d123.geometry.transform.se2_array import convert_absolute_to_relative_se2_array
# TODO: Hind feature builder behind abstraction.
diff --git a/notebooks/av2/delete_me.ipynb b/notebooks/av2/delete_me.ipynb
index e28ef5ef..85226114 100644
--- a/notebooks/av2/delete_me.ipynb
+++ b/notebooks/av2/delete_me.ipynb
@@ -385,8 +385,8 @@
"\n",
"from pyquaternion import Quaternion\n",
"from d123.common.datatypes.detection.detection_types import DetectionType\n",
- "from d123.common.geometry.base import StateSE2\n",
- "from d123.common.geometry.bounding_box.bounding_box import BoundingBoxSE2\n",
+ "from d123.geometry.base import StateSE2\n",
+ "from d123.geometry.bounding_box.bounding_box import BoundingBoxSE2\n",
"from d123.common.visualization.color.config import PlotConfig\n",
"from d123.common.visualization.color.default import BOX_DETECTION_CONFIG\n",
"from d123.common.visualization.matplotlib.utils import add_shapely_polygon_to_ax\n",
diff --git a/notebooks/av2/delete_me_map.ipynb b/notebooks/av2/delete_me_map.ipynb
index 03e62838..901c232c 100644
--- a/notebooks/av2/delete_me_map.ipynb
+++ b/notebooks/av2/delete_me_map.ipynb
@@ -58,7 +58,7 @@
"import json\n",
"from typing import Dict, List\n",
"\n",
- "from d123.common.geometry.line.polylines import Polyline3D\n",
+ "from d123.geometry.line.polylines import Polyline3D\n",
"from d123.dataset.dataset_specific.av2.av2_map_conversion import _extract_lane_group_dict\n",
"\n",
"\n",
@@ -122,11 +122,11 @@
"\n",
"import shapely\n",
"\n",
- "from d123.common.geometry.base import Point3DIndex\n",
+ "from d123.geometry.base import Point3DIndex\n",
"import geopandas as gpd\n",
"\n",
- "from d123.common.geometry.line.polylines import Polyline2D\n",
- "from d123.common.geometry.occupancy_map import OccupancyMap2D\n",
+ "from d123.geometry.line.polylines import Polyline2D\n",
+ "from d123.geometry.occupancy_map import OccupancyMap2D\n",
"\n",
"import numpy.typing as npt\n",
"\n",
diff --git a/notebooks/deprecated/test_scene_builder.ipynb b/notebooks/deprecated/test_scene_builder.ipynb
index 00de24f9..607cd184 100644
--- a/notebooks/deprecated/test_scene_builder.ipynb
+++ b/notebooks/deprecated/test_scene_builder.ipynb
@@ -72,8 +72,8 @@
"# import matplotlib.pyplot as plt\n",
"# from tqdm import tqdm\n",
"\n",
- "# from d123.common.geometry.base import Point2D, StateSE2\n",
- "# from d123.common.geometry.bounding_box.bounding_box import BoundingBoxSE2\n",
+ "# from d123.geometry.base import Point2D, StateSE2\n",
+ "# from d123.geometry.bounding_box.bounding_box import BoundingBoxSE2\n",
"# from d123.common.visualization.color.default import EGO_VEHICLE_CONFIG\n",
"# from d123.common.visualization.matplotlib.observation import (\n",
"# add_bounding_box_to_ax,\n",
diff --git a/notebooks/gym/test_gym.ipynb b/notebooks/gym/test_gym.ipynb
index bb6cf7dc..3fbb3a94 100644
--- a/notebooks/gym/test_gym.ipynb
+++ b/notebooks/gym/test_gym.ipynb
@@ -77,8 +77,8 @@
"from tqdm import tqdm\n",
"\n",
"from d123.common.datatypes.vehicle_state.ego_state import EgoStateSE2\n",
- "from d123.common.geometry.base import Point2D, StateSE2\n",
- "from d123.common.geometry.bounding_box.bounding_box import BoundingBoxSE2\n",
+ "from d123.geometry.base import Point2D, StateSE2\n",
+ "from d123.geometry.bounding_box.bounding_box import BoundingBoxSE2\n",
"from d123.common.visualization.color.default import EGO_VEHICLE_CONFIG\n",
"from d123.common.visualization.matplotlib.observation import (\n",
" add_bounding_box_to_ax,\n",
diff --git a/notebooks/scene_rendering.ipynb b/notebooks/scene_rendering.ipynb
index dad53954..0a50c5ed 100644
--- a/notebooks/scene_rendering.ipynb
+++ b/notebooks/scene_rendering.ipynb
@@ -63,9 +63,9 @@
"from typing import Tuple\n",
"from d123.common.datatypes.detection.detection import BoxDetection\n",
"from d123.common.datatypes.detection.detection_types import DYNAMIC_DETECTION_TYPES, STATIC_DETECTION_TYPES\n",
- "from d123.common.geometry.base import StateSE2\n",
- "from d123.common.geometry.transform.tranform_2d import translate_along_yaw\n",
- "from d123.common.geometry.vector import Vector2D\n",
+ "from d123.geometry.base import StateSE2\n",
+ "from d123.geometry.transform.tranform_2d import translate_along_yaw\n",
+ "from d123.geometry.vector import Vector2D\n",
"from d123.common.visualization.matplotlib.observation import (\n",
" add_box_detections_to_ax,\n",
" add_default_map_on_ax,\n",
diff --git a/notebooks/smarty/smart_rollout.ipynb b/notebooks/smarty/smart_rollout.ipynb
index 3f3aac67..f8f71593 100644
--- a/notebooks/smarty/smart_rollout.ipynb
+++ b/notebooks/smarty/smart_rollout.ipynb
@@ -135,7 +135,7 @@
"source": [
"from matplotlib import pyplot as plt\n",
"\n",
- "from d123.common.geometry.transform.se2_array import convert_relative_to_absolute_point_2d_array\n",
+ "from d123.geometry.transform.se2_array import convert_relative_to_absolute_point_2d_array\n",
"\n",
"\n",
"origin = scene.get_ego_state_at_iteration(0).bounding_box.center.state_se2\n",
diff --git a/notebooks/viz/bev_matplotlib.ipynb b/notebooks/viz/bev_matplotlib.ipynb
index 1feef159..3755c860 100644
--- a/notebooks/viz/bev_matplotlib.ipynb
+++ b/notebooks/viz/bev_matplotlib.ipynb
@@ -63,7 +63,7 @@
"from typing import List, Optional, Tuple\n",
"import matplotlib.pyplot as plt\n",
"import numpy as np\n",
- "from d123.common.geometry.base import Point2D\n",
+ "from d123.geometry.base import Point2D\n",
"from d123.common.visualization.color.color import BLACK, DARK_GREY, DARKER_GREY, LIGHT_GREY, NEW_TAB_10, TAB_10\n",
"from d123.common.visualization.color.config import PlotConfig\n",
"from d123.common.visualization.color.default import CENTERLINE_CONFIG, MAP_SURFACE_CONFIG, ROUTE_CONFIG\n",
@@ -238,14 +238,14 @@
"\n",
"scene_index = 9\n",
"iteration = 99\n",
- "fig, ax = plot_scene_at_iteration(scenes[scene_index], iteration=iteration, radius=30)\n",
+ "fig, ax = plot_scene_at_iteration(scenes[scene_index], iteration=iteration, radius=60)\n",
"plt.show()\n",
"\n",
- "# camera = scenes[scene_index].get_camera_at_iteration(\n",
- "# iteration=iteration, camera_type=CameraType.CAM_F0\n",
- "# )\n",
+ "camera = scenes[scene_index].get_camera_at_iteration(\n",
+ " iteration=iteration, camera_type=CameraType.CAM_F0\n",
+ ")\n",
"\n",
- "# plt.imshow(camera.image, cmap=\"gray\", vmin=0, vmax=255)\n",
+ "plt.imshow(camera.image, cmap=\"gray\", vmin=0, vmax=255)\n",
"# # fig.savefig(f\"/home/daniel/scene_{scene_index}_iteration_1.pdf\", dpi=300, bbox_inches=\"tight\")\n",
"\n",
"# scenes[scene_index].log_name"
@@ -333,7 +333,7 @@
],
"metadata": {
"kernelspec": {
- "display_name": "d123",
+ "display_name": "d123_dev",
"language": "python",
"name": "python3"
},
diff --git a/notebooks/viz/bev_matplotlib_prediction.ipynb b/notebooks/viz/bev_matplotlib_prediction.ipynb
index 041daa72..bed72f71 100644
--- a/notebooks/viz/bev_matplotlib_prediction.ipynb
+++ b/notebooks/viz/bev_matplotlib_prediction.ipynb
@@ -64,7 +64,7 @@
"from typing import List, Optional, Tuple\n",
"import matplotlib.pyplot as plt\n",
"import numpy as np\n",
- "from d123.common.geometry.base import Point2D\n",
+ "from d123.geometry.base import Point2D\n",
"from d123.common.visualization.color.color import BLACK, DARK_GREY, DARKER_GREY, LIGHT_GREY, NEW_TAB_10, TAB_10\n",
"from d123.common.visualization.color.config import PlotConfig\n",
"from d123.common.visualization.color.default import CENTERLINE_CONFIG, MAP_SURFACE_CONFIG, ROUTE_CONFIG\n",
diff --git a/notebooks/waymo_perception/lidar_testing.ipynb b/notebooks/waymo_perception/lidar_testing.ipynb
index 3cd2a77e..dfa0e65e 100644
--- a/notebooks/waymo_perception/lidar_testing.ipynb
+++ b/notebooks/waymo_perception/lidar_testing.ipynb
@@ -54,8 +54,8 @@
"import io\n",
"from pyquaternion import Quaternion\n",
"\n",
- "from d123.common.geometry.base import StateSE3\n",
- "from d123.common.geometry.bounding_box.bounding_box import BoundingBoxSE3\n",
+ "from d123.geometry.base import StateSE3\n",
+ "from d123.geometry.bounding_box.bounding_box import BoundingBoxSE3\n",
"\n",
"from waymo_open_dataset.utils import frame_utils\n",
"\n",
diff --git a/notebooks/waymo_perception/map_testing.ipynb b/notebooks/waymo_perception/map_testing.ipynb
index c29fc212..a06d8f7c 100644
--- a/notebooks/waymo_perception/map_testing.ipynb
+++ b/notebooks/waymo_perception/map_testing.ipynb
@@ -54,8 +54,8 @@
"import io\n",
"from pyquaternion import Quaternion\n",
"\n",
- "from d123.common.geometry.base import StateSE3\n",
- "from d123.common.geometry.bounding_box.bounding_box import BoundingBoxSE3\n",
+ "from d123.geometry.base import StateSE3\n",
+ "from d123.geometry.bounding_box.bounding_box import BoundingBoxSE3\n",
"\n",
"from waymo_open_dataset.utils import frame_utils\n",
"\n",
@@ -139,7 +139,7 @@
"source": [
"from collections import defaultdict\n",
"\n",
- "from d123.common.geometry.units import mph_to_mps\n",
+ "from d123.geometry.units import mph_to_mps\n",
"\n",
"\n",
"dataset = tf.data.TFRecordDataset(pathname, compression_type=\"\")\n",
@@ -382,7 +382,7 @@
"metadata": {},
"outputs": [],
"source": [
- "from d123.common.geometry.line.polylines import Polyline3D\n",
+ "from d123.geometry.line.polylines import Polyline3D\n",
"import numpy as np\n",
"\n",
"\n",
@@ -635,10 +635,10 @@
"metadata": {},
"outputs": [],
"source": [
- "from d123.common.geometry.base import StateSE2\n",
- "# from d123.common.geometry.line.polylines import PolylineSE2\n",
- "# from d123.common.geometry.transform.tranform_2d import translate_along_yaw\n",
- "# from d123.common.geometry.vector import Vector2D\n",
+ "from d123.geometry.base import StateSE2\n",
+ "# from d123.geometry.line.polylines import PolylineSE2\n",
+ "# from d123.geometry.transform.tranform_2d import translate_along_yaw\n",
+ "# from d123.geometry.vector import Vector2D\n",
"\n",
"# size = 30\n",
"# fig, ax = plt.subplots(figsize=(size, size))\n",
diff --git a/notebooks/waymo_perception/testing.ipynb b/notebooks/waymo_perception/testing.ipynb
index 19745f8f..9adc8510 100644
--- a/notebooks/waymo_perception/testing.ipynb
+++ b/notebooks/waymo_perception/testing.ipynb
@@ -51,8 +51,8 @@
"import io\n",
"from pyquaternion import Quaternion\n",
"\n",
- "from d123.common.geometry.base import StateSE3\n",
- "from d123.common.geometry.bounding_box.bounding_box import BoundingBoxSE3\n",
+ "from d123.geometry.base import StateSE3\n",
+ "from d123.geometry.bounding_box.bounding_box import BoundingBoxSE3\n",
"\n",
"from waymo_open_dataset.utils import frame_utils\n",
"\n",
From 2580087ea96ded2f40181f725035660a3a160b53 Mon Sep 17 00:00:00 2001
From: DanielDauner
Date: Sun, 24 Aug 2025 20:41:11 +0200
Subject: [PATCH 013/145] Clean up `geometry` structure and import options
---
.pre-commit-config.yaml | 1 +
d123/common/datatypes/detection/detection.py | 5 +-
.../datatypes/vehicle_state/ego_state.py | 4 +-
.../vehicle_state/vehicle_parameters.py | 5 +-
.../visualization/matplotlib/camera copy.py | 3 +-
.../common/visualization/matplotlib/camera.py | 2 +-
.../visualization/matplotlib/observation.py | 3 +-
d123/common/visualization/matplotlib/utils.py | 2 +-
d123/common/visualization/viser/utils.py | 4 +-
d123/common/visualization/viser/utils_v2.py | 5 +-
d123/dataset/arrow/conversion.py | 3 +-
.../map/opendrive/parser/geometry.py | 2 +-
.../map/opendrive/parser/reference.py | 2 +-
.../map/opendrive/utils/lane_helper.py | 4 +-
.../map/opendrive/utils/objects_helper.py | 4 +-
.../map/road_edge/road_edge_3d_utils.py | 2 +-
.../av2/av2_data_converter.py | 6 +-
.../av2/av2_map_conversion.py | 4 +-
.../carla/carla_data_converter.py | 4 +-
.../nuplan/nuplan_data_converter.py | 6 +-
.../waymo_map_utils/womp_boundary_utils.py | 7 +-
.../wopd/waymo_map_utils/wopd_map_utils.py | 5 +-
.../wopd/wopd_data_converter.py | 6 +-
d123/dataset/maps/abstract_map.py | 2 +-
d123/dataset/maps/abstract_map_objects.py | 2 +-
d123/dataset/maps/gpkg/gpkg_map.py | 2 +-
d123/dataset/maps/gpkg/gpkg_map_objects.py | 3 +-
d123/geometry/__init__.py | 18 +++
d123/geometry/base_index.py | 1 -
.../{bounding_box => }/bounding_box.py | 6 +-
...ounding_box_index.py => geometry_index.py} | 63 ++++++++
d123/geometry/line/__init__.py | 0
d123/geometry/point.py | 98 ++++++++++++
.../{line/polylines.py => polyline.py} | 9 +-
d123/geometry/{base.py => se.py} | 147 +-----------------
d123/geometry/transform/se2_array.py | 4 +-
d123/geometry/transform/se3.py | 3 +-
d123/geometry/transform/tranform_2d.py | 2 +-
.../{bounding_box => utils}/__init__.py | 0
d123/geometry/utils/bounding_box_utils.py | 63 ++++++++
d123/geometry/{ => utils}/constants.py | 0
.../helper.py => utils/polyline_utils.py} | 2 +-
.../{utils.py => utils/rotation_utils.py} | 0
d123/geometry/{ => utils}/units.py | 0
.../geometry/{bounding_box => utils}/utils.py | 3 +-
d123/geometry/vector.py | 15 +-
.../agents/constant_velocity_agents.py | 4 +-
d123/simulation/agents/idm_agents.py | 7 +-
d123/simulation/agents/path_following.py | 7 +-
d123/simulation/agents/smart_agents.py | 6 +-
.../motion_model/kinematic_bicycle_model.py | 2 +-
.../gym_observation/raster/raster_renderer.py | 2 +-
.../environment/helper/environment_area.py | 2 +-
.../environment/helper/environment_cache.py | 2 +-
.../metrics/sim_agents/interaction_based.py | 4 +-
.../metrics/sim_agents/map_based.py | 8 +-
.../metrics/sim_agents/sim_agents.py | 2 +-
d123/simulation/metrics/sim_agents/utils.py | 2 +-
.../feature_builder/smart_feature_builder.py | 5 +-
notebooks/av2/delete_me.ipynb | 2 +-
notebooks/deprecated/test_scene_builder.ipynb | 4 +-
notebooks/gym/test_gym.ipynb | 2 +-
notebooks/scene_rendering.ipynb | 2 +-
notebooks/viz/bev_matplotlib.ipynb | 14 +-
.../waymo_perception/lidar_testing.ipynb | 4 +-
notebooks/waymo_perception/map_testing.ipynb | 80 +---------
notebooks/waymo_perception/testing.ipynb | 4 +-
67 files changed, 342 insertions(+), 355 deletions(-)
delete mode 100644 d123/geometry/base_index.py
rename d123/geometry/{bounding_box => }/bounding_box.py (92%)
rename d123/geometry/{bounding_box/bounding_box_index.py => geometry_index.py} (59%)
delete mode 100644 d123/geometry/line/__init__.py
create mode 100644 d123/geometry/point.py
rename d123/geometry/{line/polylines.py => polyline.py} (95%)
rename d123/geometry/{base.py => se.py} (53%)
rename d123/geometry/{bounding_box => utils}/__init__.py (100%)
create mode 100644 d123/geometry/utils/bounding_box_utils.py
rename d123/geometry/{ => utils}/constants.py (100%)
rename d123/geometry/{line/helper.py => utils/polyline_utils.py} (96%)
rename d123/geometry/{utils.py => utils/rotation_utils.py} (100%)
rename d123/geometry/{ => utils}/units.py (100%)
rename d123/geometry/{bounding_box => utils}/utils.py (94%)
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index 983998e5..251ae7fb 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -36,6 +36,7 @@ repos:
hooks:
- id: autoflake
args: ['--in-place', '--remove-all-unused-imports', '--remove-unused-variable']
+ exclude: __init__.py$
language_version: python3.12
- repo: https://github.com/pycqa/flake8
rev: 7.3.0
diff --git a/d123/common/datatypes/detection/detection.py b/d123/common/datatypes/detection/detection.py
index 6245892f..075129b4 100644
--- a/d123/common/datatypes/detection/detection.py
+++ b/d123/common/datatypes/detection/detection.py
@@ -7,10 +7,7 @@
from d123.common.datatypes.detection.detection_types import DetectionType
from d123.common.datatypes.time.time_point import TimePoint
from d123.common.utils.enums import SerialIntEnum
-from d123.geometry.base import StateSE2, StateSE3
-from d123.geometry.bounding_box.bounding_box import BoundingBoxSE2, BoundingBoxSE3
-from d123.geometry.occupancy_map import OccupancyMap2D
-from d123.geometry.vector import Vector2D, Vector3D
+from d123.geometry import BoundingBoxSE2, BoundingBoxSE3, OccupancyMap2D, StateSE2, StateSE3, Vector2D, Vector3D
@dataclass
diff --git a/d123/common/datatypes/vehicle_state/ego_state.py b/d123/common/datatypes/vehicle_state/ego_state.py
index 0fdf236e..d0487e60 100644
--- a/d123/common/datatypes/vehicle_state/ego_state.py
+++ b/d123/common/datatypes/vehicle_state/ego_state.py
@@ -23,9 +23,7 @@
rear_axle_se3_to_center_se3,
)
from d123.common.utils.enums import classproperty
-from d123.geometry.base import StateSE2, StateSE3
-from d123.geometry.bounding_box.bounding_box import BoundingBoxSE2, BoundingBoxSE3
-from d123.geometry.vector import Vector2D, Vector3D
+from d123.geometry import BoundingBoxSE2, BoundingBoxSE3, StateSE2, StateSE3, Vector2D, Vector3D
# TODO: Find an appropriate way to handle SE2 and SE3 states.
diff --git a/d123/common/datatypes/vehicle_state/vehicle_parameters.py b/d123/common/datatypes/vehicle_state/vehicle_parameters.py
index 0cc14b7d..c8a84828 100644
--- a/d123/common/datatypes/vehicle_state/vehicle_parameters.py
+++ b/d123/common/datatypes/vehicle_state/vehicle_parameters.py
@@ -1,7 +1,8 @@
-from d123.geometry.base import StateSE2, StateSE3, dataclass
+from dataclasses import dataclass
+
+from d123.geometry import StateSE2, StateSE3, Vector2D
from d123.geometry.transform.se3 import translate_se3_along_x, translate_se3_along_z
from d123.geometry.transform.tranform_2d import translate_along_yaw
-from d123.geometry.vector import Vector2D
# TODO: Add more vehicle parameters, potentially extend the parameters.
diff --git a/d123/common/visualization/matplotlib/camera copy.py b/d123/common/visualization/matplotlib/camera copy.py
index b44e387b..ed3a7d2a 100644
--- a/d123/common/visualization/matplotlib/camera copy.py
+++ b/d123/common/visualization/matplotlib/camera copy.py
@@ -15,8 +15,7 @@
from d123.common.datatypes.sensor.camera import Camera
from d123.common.datatypes.vehicle_state.ego_state import EgoStateSE3
from d123.common.visualization.color.default import BOX_DETECTION_CONFIG
-from d123.geometry.base import StateSE3
-from d123.geometry.bounding_box.bounding_box_index import BoundingBoxSE3Index, Corners3DIndex
+from d123.geometry import BoundingBoxSE3Index, Corners3DIndex, StateSE3
from d123.geometry.transform.se3 import convert_absolute_to_relative_se3_array, get_rotation_matrix
# from navsim.common.dataclasses import Annotations, Camera, Lidar
diff --git a/d123/common/visualization/matplotlib/camera.py b/d123/common/visualization/matplotlib/camera.py
index d8412731..2b8ecce4 100644
--- a/d123/common/visualization/matplotlib/camera.py
+++ b/d123/common/visualization/matplotlib/camera.py
@@ -15,7 +15,7 @@
from d123.common.datatypes.sensor.camera import Camera
from d123.common.datatypes.vehicle_state.ego_state import EgoStateSE3
from d123.common.visualization.color.default import BOX_DETECTION_CONFIG
-from d123.geometry.bounding_box.bounding_box_index import BoundingBoxSE3Index, Corners3DIndex
+from d123.geometry import BoundingBoxSE3Index, Corners3DIndex
from d123.geometry.transform.se3 import convert_absolute_to_relative_se3_array
# from navsim.common.dataclasses import Annotations, Camera, Lidar
diff --git a/d123/common/visualization/matplotlib/observation.py b/d123/common/visualization/matplotlib/observation.py
index 33bea28f..ecddaa02 100644
--- a/d123/common/visualization/matplotlib/observation.py
+++ b/d123/common/visualization/matplotlib/observation.py
@@ -26,8 +26,7 @@
from d123.dataset.maps.abstract_map_objects import AbstractLane
from d123.dataset.maps.map_datatypes import MapLayer
from d123.dataset.scene.abstract_scene import AbstractScene
-from d123.geometry.base import Point2D
-from d123.geometry.bounding_box.bounding_box import BoundingBoxSE2, BoundingBoxSE3
+from d123.geometry import BoundingBoxSE2, BoundingBoxSE3, Point2D
from d123.geometry.transform.tranform_2d import translate_along_yaw
diff --git a/d123/common/visualization/matplotlib/utils.py b/d123/common/visualization/matplotlib/utils.py
index 4beff462..9e030b80 100644
--- a/d123/common/visualization/matplotlib/utils.py
+++ b/d123/common/visualization/matplotlib/utils.py
@@ -9,7 +9,7 @@
from matplotlib.path import Path
from d123.common.visualization.color.config import PlotConfig
-from d123.geometry.base import StateSE2, StateSE3
+from d123.geometry import StateSE2, StateSE3
def add_shapely_polygon_to_ax(
diff --git a/d123/common/visualization/viser/utils.py b/d123/common/visualization/viser/utils.py
index d03758ff..afa5ea2e 100644
--- a/d123/common/visualization/viser/utils.py
+++ b/d123/common/visualization/viser/utils.py
@@ -15,9 +15,7 @@
from d123.dataset.maps.abstract_map import MapLayer
from d123.dataset.maps.abstract_map_objects import AbstractLane, AbstractSurfaceMapObject
from d123.dataset.scene.abstract_scene import AbstractScene
-from d123.geometry.base import Point3D, StateSE3
-from d123.geometry.bounding_box.bounding_box import BoundingBoxSE3
-from d123.geometry.line.polylines import Polyline3D
+from d123.geometry import BoundingBoxSE3, Point3D, Polyline3D, StateSE3
from d123.geometry.transform.se3 import convert_relative_to_absolute_points_3d_array
# TODO: Refactor this file.
diff --git a/d123/common/visualization/viser/utils_v2.py b/d123/common/visualization/viser/utils_v2.py
index f62a48f1..44831f3e 100644
--- a/d123/common/visualization/viser/utils_v2.py
+++ b/d123/common/visualization/viser/utils_v2.py
@@ -6,11 +6,8 @@
from d123.dataset.scene.abstract_scene import AbstractScene
# from d123.common.datatypes.sensor.camera_parameters import get_nuplan_camera_parameters
-from d123.geometry.base import Point3D, Point3DIndex
-from d123.geometry.bounding_box.bounding_box import BoundingBoxSE3
-from d123.geometry.bounding_box.bounding_box_index import Corners3DIndex
+from d123.geometry import BoundingBoxSE3, Corners3DIndex, Point3D, Point3DIndex, Vector3D
from d123.geometry.transform.se3 import translate_body_frame
-from d123.geometry.vector import Vector3D
# TODO: Refactor this file.
# TODO: Add general utilities for 3D primitives and mesh support.
diff --git a/d123/dataset/arrow/conversion.py b/d123/dataset/arrow/conversion.py
index 56b4b33b..ab078545 100644
--- a/d123/dataset/arrow/conversion.py
+++ b/d123/dataset/arrow/conversion.py
@@ -27,8 +27,7 @@
from d123.common.datatypes.vehicle_state.vehicle_parameters import VehicleParameters
from d123.dataset.logs.log_metadata import LogMetadata
from d123.dataset.maps.abstract_map import List
-from d123.geometry.bounding_box.bounding_box import BoundingBoxSE3
-from d123.geometry.vector import Vector3D
+from d123.geometry import BoundingBoxSE3, Vector3D
DATASET_SENSOR_ROOT: Dict[str, Path] = {
"nuplan": Path(os.environ["NUPLAN_DATA_ROOT"]) / "nuplan-v1.1" / "sensor_blobs",
diff --git a/d123/dataset/conversion/map/opendrive/parser/geometry.py b/d123/dataset/conversion/map/opendrive/parser/geometry.py
index 4aabd57c..dc782a7a 100644
--- a/d123/dataset/conversion/map/opendrive/parser/geometry.py
+++ b/d123/dataset/conversion/map/opendrive/parser/geometry.py
@@ -8,7 +8,7 @@
import numpy.typing as npt
from scipy.special import fresnel
-from d123.geometry.base import StateSE2Index
+from d123.geometry import StateSE2Index
@dataclass
diff --git a/d123/dataset/conversion/map/opendrive/parser/reference.py b/d123/dataset/conversion/map/opendrive/parser/reference.py
index d8ce3b7b..5fe9211b 100644
--- a/d123/dataset/conversion/map/opendrive/parser/reference.py
+++ b/d123/dataset/conversion/map/opendrive/parser/reference.py
@@ -13,7 +13,7 @@
from d123.dataset.conversion.map.opendrive.parser.geometry import Arc, Geometry, Line, Spiral
from d123.dataset.conversion.map.opendrive.parser.lane import LaneOffset, Width
from d123.dataset.conversion.map.opendrive.parser.polynomial import Polynomial
-from d123.geometry.base import Point3DIndex, StateSE2Index
+from d123.geometry import Point3DIndex, StateSE2Index
TOLERANCE: Final[float] = 1e-3
diff --git a/d123/dataset/conversion/map/opendrive/utils/lane_helper.py b/d123/dataset/conversion/map/opendrive/utils/lane_helper.py
index b6c26131..edd05423 100644
--- a/d123/dataset/conversion/map/opendrive/utils/lane_helper.py
+++ b/d123/dataset/conversion/map/opendrive/utils/lane_helper.py
@@ -14,8 +14,8 @@
derive_lane_id,
lane_group_id_from_lane_id,
)
-from d123.geometry.base import StateSE2Index
-from d123.geometry.units import kmph_to_mps, mph_to_mps
+from d123.geometry import StateSE2Index
+from d123.geometry.utils.units import kmph_to_mps, mph_to_mps
@dataclass
diff --git a/d123/dataset/conversion/map/opendrive/utils/objects_helper.py b/d123/dataset/conversion/map/opendrive/utils/objects_helper.py
index da85ba5e..11c7f609 100644
--- a/d123/dataset/conversion/map/opendrive/utils/objects_helper.py
+++ b/d123/dataset/conversion/map/opendrive/utils/objects_helper.py
@@ -7,9 +7,9 @@
from d123.dataset.conversion.map.opendrive.parser.objects import Object
from d123.dataset.conversion.map.opendrive.parser.reference import ReferenceLine
-from d123.geometry.base import Point2D, Point3D, Point3DIndex, StateSE2
+from d123.geometry import Point2D, Point3D, Point3DIndex, StateSE2
from d123.geometry.transform.tranform_2d import translate_along_yaw
-from d123.geometry.utils import normalize_angle
+from d123.geometry.utils.rotation_utils import normalize_angle
# TODO: make naming consistent with group_collections.py
diff --git a/d123/dataset/conversion/map/road_edge/road_edge_3d_utils.py b/d123/dataset/conversion/map/road_edge/road_edge_3d_utils.py
index b2fa42e9..b88a44e0 100644
--- a/d123/dataset/conversion/map/road_edge/road_edge_3d_utils.py
+++ b/d123/dataset/conversion/map/road_edge/road_edge_3d_utils.py
@@ -10,7 +10,7 @@
from shapely.geometry import LineString
from d123.dataset.conversion.map.road_edge.road_edge_2d_utils import get_road_edge_linear_rings
-from d123.geometry.base import Point3DIndex
+from d123.geometry import Point3DIndex
from d123.geometry.occupancy_map import OccupancyMap2D
logger = logging.getLogger(__name__)
diff --git a/d123/dataset/dataset_specific/av2/av2_data_converter.py b/d123/dataset/dataset_specific/av2/av2_data_converter.py
index 25b7bec6..d57b4e52 100644
--- a/d123/dataset/dataset_specific/av2/av2_data_converter.py
+++ b/d123/dataset/dataset_specific/av2/av2_data_converter.py
@@ -34,11 +34,9 @@
from d123.dataset.dataset_specific.av2.av2_map_conversion import convert_av2_map
from d123.dataset.dataset_specific.raw_data_converter import DataConverterConfig, RawDataConverter
from d123.dataset.logs.log_metadata import LogMetadata
-from d123.geometry.base import StateSE3
-from d123.geometry.bounding_box.bounding_box import BoundingBoxSE3Index
-from d123.geometry.constants import DEFAULT_PITCH, DEFAULT_ROLL
+from d123.geometry import BoundingBoxSE3Index, StateSE3, Vector3D, Vector3DIndex
from d123.geometry.transform.se3 import convert_relative_to_absolute_se3_array, get_rotation_matrix
-from d123.geometry.vector import Vector3D, Vector3DIndex
+from d123.geometry.utils.constants import DEFAULT_PITCH, DEFAULT_ROLL
def create_token(input_data: str) -> str:
diff --git a/d123/dataset/dataset_specific/av2/av2_map_conversion.py b/d123/dataset/dataset_specific/av2/av2_map_conversion.py
index 7fa53fad..daadcd3b 100644
--- a/d123/dataset/dataset_specific/av2/av2_map_conversion.py
+++ b/d123/dataset/dataset_specific/av2/av2_map_conversion.py
@@ -16,9 +16,7 @@
)
from d123.dataset.dataset_specific.av2.av2_constants import AV2_ROAD_LINE_TYPE_MAPPING
from d123.dataset.maps.map_datatypes import MapLayer, RoadEdgeType
-from d123.geometry.base import Point3DIndex
-from d123.geometry.line.polylines import Polyline2D, Polyline3D
-from d123.geometry.occupancy_map import OccupancyMap2D
+from d123.geometry import OccupancyMap2D, Point3DIndex, Polyline2D, Polyline3D
LANE_GROUP_MARK_TYPES: List[str] = [
"DASHED_WHITE",
diff --git a/d123/dataset/dataset_specific/carla/carla_data_converter.py b/d123/dataset/dataset_specific/carla/carla_data_converter.py
index 1e360007..c6ce3622 100644
--- a/d123/dataset/dataset_specific/carla/carla_data_converter.py
+++ b/d123/dataset/dataset_specific/carla/carla_data_converter.py
@@ -24,9 +24,7 @@
from d123.dataset.maps.abstract_map import AbstractMap, MapLayer
from d123.dataset.maps.abstract_map_objects import AbstractLane
from d123.dataset.scene.arrow_scene import get_map_api_from_names
-from d123.geometry.base import Point2D, Point3D
-from d123.geometry.bounding_box.bounding_box import BoundingBoxSE3Index
-from d123.geometry.vector import Vector3DIndex
+from d123.geometry import BoundingBoxSE3Index, Point2D, Point3D, Vector3DIndex
AVAILABLE_CARLA_MAP_LOCATIONS: Final[List[str]] = [
"Town01", # A small, simple town with a river and several bridges.
diff --git a/d123/dataset/dataset_specific/nuplan/nuplan_data_converter.py b/d123/dataset/dataset_specific/nuplan/nuplan_data_converter.py
index c65845fb..f57a5f5e 100644
--- a/d123/dataset/dataset_specific/nuplan/nuplan_data_converter.py
+++ b/d123/dataset/dataset_specific/nuplan/nuplan_data_converter.py
@@ -36,10 +36,8 @@
from d123.dataset.dataset_specific.nuplan.nuplan_map_conversion import MAP_LOCATIONS, NuPlanMapConverter
from d123.dataset.dataset_specific.raw_data_converter import DataConverterConfig, RawDataConverter
from d123.dataset.logs.log_metadata import LogMetadata
-from d123.geometry.base import StateSE3
-from d123.geometry.bounding_box.bounding_box import BoundingBoxSE3, BoundingBoxSE3Index
-from d123.geometry.constants import DEFAULT_PITCH, DEFAULT_ROLL
-from d123.geometry.vector import Vector3D, Vector3DIndex
+from d123.geometry import BoundingBoxSE3, BoundingBoxSE3Index, StateSE3, Vector3D, Vector3DIndex
+from d123.geometry.utils.constants import DEFAULT_PITCH, DEFAULT_ROLL
TARGET_DT: Final[float] = 0.1
NUPLAN_DT: Final[float] = 0.05
diff --git a/d123/dataset/dataset_specific/wopd/waymo_map_utils/womp_boundary_utils.py b/d123/dataset/dataset_specific/wopd/waymo_map_utils/womp_boundary_utils.py
index 4c251252..5831ea2a 100644
--- a/d123/dataset/dataset_specific/wopd/waymo_map_utils/womp_boundary_utils.py
+++ b/d123/dataset/dataset_specific/wopd/waymo_map_utils/womp_boundary_utils.py
@@ -5,12 +5,9 @@
import numpy.typing as npt
import shapely.geometry as geom
-from d123.geometry.base import Point3D, StateSE2
-from d123.geometry.line.polylines import Polyline3D, PolylineSE2
-from d123.geometry.occupancy_map import OccupancyMap2D
+from d123.geometry import OccupancyMap2D, Point3D, Polyline3D, PolylineSE2, StateSE2, Vector2D
from d123.geometry.transform.tranform_2d import translate_along_yaw
-from d123.geometry.utils import normalize_angle
-from d123.geometry.vector import Vector2D
+from d123.geometry.utils.rotation_utils import normalize_angle
MAX_LANE_WIDTH = 25.0 # meters
MIN_LANE_WIDTH = 2.0
diff --git a/d123/dataset/dataset_specific/wopd/waymo_map_utils/wopd_map_utils.py b/d123/dataset/dataset_specific/wopd/waymo_map_utils/wopd_map_utils.py
index 20a2be00..6cf4f0f9 100644
--- a/d123/dataset/dataset_specific/wopd/waymo_map_utils/wopd_map_utils.py
+++ b/d123/dataset/dataset_specific/wopd/waymo_map_utils/wopd_map_utils.py
@@ -11,9 +11,8 @@
from d123.dataset.dataset_specific.wopd.waymo_map_utils.womp_boundary_utils import extract_lane_boundaries
from d123.dataset.maps.map_datatypes import MapLayer, RoadEdgeType, RoadLineType
-from d123.geometry.base import Point3DIndex
-from d123.geometry.line.polylines import Polyline3D
-from d123.geometry.units import mph_to_mps
+from d123.geometry import Point3DIndex, Polyline3D
+from d123.geometry.utils.units import mph_to_mps
# TODO:
# - Implement stop signs
diff --git a/d123/dataset/dataset_specific/wopd/wopd_data_converter.py b/d123/dataset/dataset_specific/wopd/wopd_data_converter.py
index 77f19ead..a6371421 100644
--- a/d123/dataset/dataset_specific/wopd/wopd_data_converter.py
+++ b/d123/dataset/dataset_specific/wopd/wopd_data_converter.py
@@ -27,11 +27,9 @@
from d123.dataset.dataset_specific.wopd.waymo_map_utils.wopd_map_utils import convert_wopd_map
from d123.dataset.dataset_specific.wopd.wopd_utils import parse_range_image_and_camera_projection
from d123.dataset.logs.log_metadata import LogMetadata
-from d123.geometry.base import Point3D, StateSE3
-from d123.geometry.bounding_box.bounding_box import BoundingBoxSE3Index
-from d123.geometry.constants import DEFAULT_PITCH, DEFAULT_ROLL
+from d123.geometry import BoundingBoxSE3Index, Point3D, StateSE3, Vector3D, Vector3DIndex
from d123.geometry.transform.se3 import convert_relative_to_absolute_se3_array, get_rotation_matrix
-from d123.geometry.vector import Vector3D, Vector3DIndex
+from d123.geometry.utils.constants import DEFAULT_PITCH, DEFAULT_ROLL
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
D123_MAPS_ROOT = Path(os.environ.get("D123_MAPS_ROOT"))
diff --git a/d123/dataset/maps/abstract_map.py b/d123/dataset/maps/abstract_map.py
index f8c547e7..be9eefeb 100644
--- a/d123/dataset/maps/abstract_map.py
+++ b/d123/dataset/maps/abstract_map.py
@@ -7,7 +7,7 @@
from d123.dataset.maps.abstract_map_objects import AbstractMapObject
from d123.dataset.maps.map_datatypes import MapLayer
-from d123.geometry.base import Point2D
+from d123.geometry import Point2D
# TODO:
# - add docstrings
diff --git a/d123/dataset/maps/abstract_map_objects.py b/d123/dataset/maps/abstract_map_objects.py
index a1696009..14e85539 100644
--- a/d123/dataset/maps/abstract_map_objects.py
+++ b/d123/dataset/maps/abstract_map_objects.py
@@ -7,7 +7,7 @@
import trimesh
from d123.dataset.maps.map_datatypes import MapLayer, RoadEdgeType, RoadLineType
-from d123.geometry.line.polylines import Polyline2D, Polyline3D, PolylineSE2
+from d123.geometry import Polyline2D, Polyline3D, PolylineSE2
class AbstractMapObject(abc.ABC):
diff --git a/d123/dataset/maps/gpkg/gpkg_map.py b/d123/dataset/maps/gpkg/gpkg_map.py
index f1fead93..d466857e 100644
--- a/d123/dataset/maps/gpkg/gpkg_map.py
+++ b/d123/dataset/maps/gpkg/gpkg_map.py
@@ -26,7 +26,7 @@
)
from d123.dataset.maps.gpkg.utils import load_gdf_with_geometry_columns
from d123.dataset.maps.map_datatypes import MapLayer
-from d123.geometry.base import Point2D
+from d123.geometry import Point2D
USE_ARROW: bool = True
diff --git a/d123/dataset/maps/gpkg/gpkg_map_objects.py b/d123/dataset/maps/gpkg/gpkg_map_objects.py
index 4641cd97..1a274a53 100644
--- a/d123/dataset/maps/gpkg/gpkg_map_objects.py
+++ b/d123/dataset/maps/gpkg/gpkg_map_objects.py
@@ -26,8 +26,7 @@
)
from d123.dataset.maps.gpkg.utils import get_row_with_value
from d123.dataset.maps.map_datatypes import RoadEdgeType, RoadLineType
-from d123.geometry.base import Point3DIndex
-from d123.geometry.line.polylines import Polyline3D
+from d123.geometry import Point3DIndex, Polyline3D
class GPKGSurfaceObject(AbstractSurfaceMapObject):
diff --git a/d123/geometry/__init__.py b/d123/geometry/__init__.py
index e69de29b..ca210677 100644
--- a/d123/geometry/__init__.py
+++ b/d123/geometry/__init__.py
@@ -0,0 +1,18 @@
+from d123.geometry.bounding_box import BoundingBoxSE2, BoundingBoxSE3
+from d123.geometry.geometry_index import (
+ BoundingBoxSE2Index,
+ BoundingBoxSE3Index,
+ Corners2DIndex,
+ Corners3DIndex,
+ Point2DIndex,
+ Point3DIndex,
+ StateSE2Index,
+ StateSE3Index,
+ Vector2DIndex,
+ Vector3DIndex,
+)
+from d123.geometry.occupancy_map import OccupancyMap2D
+from d123.geometry.point import Point2D, Point3D
+from d123.geometry.polyline import Polyline2D, Polyline3D, PolylineSE2
+from d123.geometry.se import StateSE2, StateSE3
+from d123.geometry.vector import Vector2D, Vector3D
diff --git a/d123/geometry/base_index.py b/d123/geometry/base_index.py
deleted file mode 100644
index cb258618..00000000
--- a/d123/geometry/base_index.py
+++ /dev/null
@@ -1 +0,0 @@
-# TODO: Move base index here to avoid circular imports.
diff --git a/d123/geometry/bounding_box/bounding_box.py b/d123/geometry/bounding_box.py
similarity index 92%
rename from d123/geometry/bounding_box/bounding_box.py
rename to d123/geometry/bounding_box.py
index ddf7ec15..b71c4fae 100644
--- a/d123/geometry/bounding_box/bounding_box.py
+++ b/d123/geometry/bounding_box.py
@@ -7,9 +7,9 @@
import numpy.typing as npt
import shapely
-from d123.geometry.base import StateSE2, StateSE3
-from d123.geometry.bounding_box.bounding_box_index import BoundingBoxSE2Index, BoundingBoxSE3Index
-from d123.geometry.bounding_box.utils import bbse2_array_to_corners_array
+from d123.geometry.geometry_index import BoundingBoxSE2Index, BoundingBoxSE3Index
+from d123.geometry.se import StateSE2, StateSE3
+from d123.geometry.utils.bounding_box_utils import bbse2_array_to_corners_array
# TODO: Reconsider naming SE2 and SE3 hierarchies. E.g. would inheritance be a better approach?
diff --git a/d123/geometry/bounding_box/bounding_box_index.py b/d123/geometry/geometry_index.py
similarity index 59%
rename from d123/geometry/bounding_box/bounding_box_index.py
rename to d123/geometry/geometry_index.py
index c282b07e..a8652b37 100644
--- a/d123/geometry/bounding_box/bounding_box_index.py
+++ b/d123/geometry/geometry_index.py
@@ -3,6 +3,69 @@
from d123.common.utils.enums import classproperty
+class Point2DIndex(IntEnum):
+ X = 0
+ Y = 1
+
+ @classproperty
+ def XY(cls) -> slice:
+ return slice(cls.X, cls.Y + 1)
+
+
+class Vector2DIndex(IntEnum):
+ X = 0
+ Y = 1
+
+
+class StateSE2Index(IntEnum):
+ X = 0
+ Y = 1
+ YAW = 2
+
+ @classproperty
+ def XY(cls) -> slice:
+ return slice(cls.X, cls.Y + 1)
+
+
+class Point3DIndex(IntEnum):
+
+ X = 0
+ Y = 1
+ Z = 2
+
+ @classproperty
+ def XY(cls) -> slice:
+ return slice(cls.X, cls.Y + 1)
+
+
+class Vector3DIndex(IntEnum):
+ X = 0
+ Y = 1
+ Z = 2
+
+
+class StateSE3Index(IntEnum):
+
+ X = 0
+ Y = 1
+ Z = 2
+ ROLL = 3
+ PITCH = 4
+ YAW = 5
+
+ @classproperty
+ def XY(cls) -> slice:
+ return slice(cls.X, cls.Y + 1)
+
+ @classproperty
+ def XYZ(cls) -> slice:
+ return slice(cls.X, cls.Z + 1)
+
+ @classproperty
+ def ROTATION_XYZ(cls) -> slice:
+ return slice(cls.ROLL, cls.YAW + 1)
+
+
class BoundingBoxSE2Index(IntEnum):
X = 0
Y = 1
diff --git a/d123/geometry/line/__init__.py b/d123/geometry/line/__init__.py
deleted file mode 100644
index e69de29b..00000000
diff --git a/d123/geometry/point.py b/d123/geometry/point.py
new file mode 100644
index 00000000..c12cc22e
--- /dev/null
+++ b/d123/geometry/point.py
@@ -0,0 +1,98 @@
+from __future__ import annotations
+
+from dataclasses import dataclass
+from typing import Iterable
+
+import numpy as np
+import numpy.typing as npt
+import shapely.geometry as geom
+
+from d123.geometry.geometry_index import Point2DIndex, Point3DIndex
+
+
+@dataclass
+class Point2D:
+ """Class to represents 2D points."""
+
+ x: float # [m] location
+ y: float # [m] location
+ __slots__ = "x", "y"
+
+ @classmethod
+ def from_array(cls, array: npt.NDArray[np.float64]) -> Point2D:
+ assert array.ndim == 1
+ assert array.shape[0] == len(Point2DIndex)
+ return Point2D(array[Point2DIndex.X], array[Point2DIndex.Y])
+
+ @property
+ def array(self) -> npt.NDArray[np.float64]:
+ """
+ Convert vector to array
+ :return: array containing [x, y]
+ """
+ array = np.zeros(len(Point2DIndex), dtype=np.float64)
+ array[Point2DIndex.X] = self.x
+ array[Point2DIndex.Y] = self.y
+ return array
+
+ @property
+ def shapely_point(self) -> geom.Point:
+ return geom.Point(self.x, self.y)
+
+ def __iter__(self) -> Iterable[float]:
+ """
+ :return: iterator of tuples (x, y)
+ """
+ return iter((self.x, self.y))
+
+ def __hash__(self) -> int:
+ """Hash method"""
+ return hash((self.x, self.y))
+
+
+@dataclass
+class Point3D:
+ """Class to represents 2D points."""
+
+ x: float # [m] location
+ y: float # [m] location
+ z: float # [m] location
+ __slots__ = "x", "y", "z"
+
+ @classmethod
+ def from_array(cls, array: npt.NDArray[np.float64]) -> "Point3D":
+ assert array.ndim == 1, f"Array must be 1-dimensional, got shape {array.shape}"
+ assert array.shape[0] == len(
+ Point3DIndex
+ ), f"Array must have the same length as Point3DIndex, got shape {array.shape}"
+ return cls(array[Point3DIndex.X], array[Point3DIndex.Y], array[Point3DIndex.Z])
+
+ @property
+ def array(self) -> npt.NDArray[np.float64]:
+ """
+ Convert vector to array
+ :return: array containing [x, y]
+ """
+ array = np.zeros(len(Point3DIndex), dtype=np.float64)
+ array[Point3DIndex.X] = self.x
+ array[Point3DIndex.Y] = self.y
+ array[Point3DIndex.Z] = self.z
+ return array
+
+ @property
+ def point_2d(self) -> Point2D:
+ return Point2D(self.x, self.y)
+
+ @property
+ def shapely_point(self) -> geom.Point:
+ return geom.Point(self.x, self.y, self.z)
+
+ def __iter__(self) -> Iterable[float]:
+ """
+ :return: iterator of tuples (x, y)
+ """
+ return iter((self.x, self.y, self.z))
+
+ def __hash__(self) -> int:
+ """Hash method"""
+ return hash((self.x, self.y, self.z))
diff --git a/d123/geometry/line/polylines.py b/d123/geometry/polyline.py
similarity index 95%
rename from d123/geometry/line/polylines.py
rename to d123/geometry/polyline.py
index 78a417cd..365b458e 100644
--- a/d123/geometry/line/polylines.py
+++ b/d123/geometry/polyline.py
@@ -9,10 +9,11 @@
import shapely.geometry as geom
from scipy.interpolate import interp1d
-from d123.geometry.base import Point2D, Point2DIndex, Point3D, Point3DIndex, StateSE2, StateSE2Index
-from d123.geometry.constants import DEFAULT_Z
-from d123.geometry.line.helper import get_linestring_yaws, get_path_progress
-from d123.geometry.utils import normalize_angle
+from d123.geometry.point import Point2D, Point2DIndex, Point3D, Point3DIndex
+from d123.geometry.se import StateSE2, StateSE2Index
+from d123.geometry.utils.constants import DEFAULT_Z
+from d123.geometry.utils.polyline_utils import get_linestring_yaws, get_path_progress
+from d123.geometry.utils.rotation_utils import normalize_angle
# TODO: Implement PolylineSE3
# TODO: Benchmark interpolation performance and reconsider reliance on LineString
diff --git a/d123/geometry/base.py b/d123/geometry/se.py
similarity index 53%
rename from d123/geometry/base.py
rename to d123/geometry/se.py
index b531f499..8abd908a 100644
--- a/d123/geometry/base.py
+++ b/d123/geometry/se.py
@@ -1,76 +1,14 @@
from __future__ import annotations
from dataclasses import dataclass
-from enum import IntEnum
from typing import Iterable
import numpy as np
import numpy.typing as npt
import shapely.geometry as geom
-# from d123.geometry.transform.se3 import get_rotation_matrix
-from d123.common.utils.enums import classproperty
-
-# TODO: Reconsider if 2D/3D or SE2/SE3 structure would be better hierarchical, e.g. inheritance or composition.
-
-
-class Point2DIndex(IntEnum):
- X = 0
- Y = 1
-
- @classproperty
- def XY(cls) -> slice:
- return slice(cls.X, cls.Y + 1)
-
-
-@dataclass
-class Point2D:
- """Class to represents 2D points."""
-
- x: float # [m] location
- y: float # [m] location
- __slots__ = "x", "y"
-
- @classmethod
- def from_array(cls, array: npt.NDArray[np.float64]) -> Point2D:
- assert array.ndim == 1
- assert array.shape[0] == len(Point2DIndex)
- return Point2D(array[Point2DIndex.X], array[Point2DIndex.Y])
-
- @property
- def array(self) -> npt.NDArray[np.float64]:
- """
- Convert vector to array
- :return: array containing [x, y]
- """
- array = np.zeros(len(Point2DIndex), dtype=np.float64)
- array[Point2DIndex.X] = self.x
- array[Point2DIndex.Y] = self.y
- return array
-
- @property
- def shapely_point(self) -> geom.Point:
- return geom.Point(self.x, self.y)
-
- def __iter__(self) -> Iterable[float]:
- """
- :return: iterator of tuples (x, y)
- """
- return iter((self.x, self.y))
-
- def __hash__(self) -> int:
- """Hash method"""
- return hash((self.x, self.y))
-
-
-class StateSE2Index(IntEnum):
- X = 0
- Y = 1
- YAW = 2
-
- @classproperty
- def XY(cls) -> slice:
- return slice(cls.X, cls.Y + 1)
+from d123.geometry.geometry_index import StateSE2Index, StateSE3Index
+from d123.geometry.point import Point2D, Point3D
@dataclass
@@ -123,87 +61,6 @@ def __hash__(self) -> int:
return hash((self.x, self.y))
-class Point3DIndex(IntEnum):
-
- X = 0
- Y = 1
- Z = 2
-
- @classproperty
- def XY(cls) -> slice:
- return slice(cls.X, cls.Y + 1)
-
-
-@dataclass
-class Point3D:
- """Class to represents 2D points."""
-
- x: float # [m] location
- y: float # [m] location
- z: float # [m] location
- __slots__ = "x", "y", "z"
-
- @classmethod
- def from_array(cls, array: npt.NDArray[np.float64]) -> "Point3D":
- assert array.ndim == 1, f"Array must be 1-dimensional, got shape {array.shape}"
- assert array.shape[0] == len(
- Point3DIndex
- ), f"Array must have the same length as Point3DIndex, got shape {array.shape}"
- return cls(array[Point3DIndex.X], array[Point3DIndex.Y], array[Point3DIndex.Z])
-
- @property
- def array(self) -> npt.NDArray[np.float64]:
- """
- Convert vector to array
- :return: array containing [x, y]
- """
- array = np.zeros(len(Point3DIndex), dtype=np.float64)
- array[Point3DIndex.X] = self.x
- array[Point3DIndex.Y] = self.y
- array[Point3DIndex.Z] = self.z
- return array
-
- @property
- def point_2d(self) -> Point2D:
- return Point2D(self.x, self.y)
-
- @property
- def shapely_point(self) -> geom.Point:
- return geom.Point(self.x, self.y, self.z)
-
- def __iter__(self) -> Iterable[float]:
- """
- :return: iterator of tuples (x, y)
- """
- return iter((self.x, self.y, self.z))
-
- def __hash__(self) -> int:
- """Hash method"""
- return hash((self.x, self.y, self.z))
-
-
-class StateSE3Index(IntEnum):
-
- X = 0
- Y = 1
- Z = 2
- ROLL = 3
- PITCH = 4
- YAW = 5
-
- @classproperty
- def XY(cls) -> slice:
- return slice(cls.X, cls.Y + 1)
-
- @classproperty
- def XYZ(cls) -> slice:
- return slice(cls.X, cls.Z + 1)
-
- @classproperty
- def ROTATION_XYZ(cls) -> slice:
- return slice(cls.ROLL, cls.YAW + 1)
-
-
@dataclass
class StateSE3:
"""Class to represents 2D points."""
diff --git a/d123/geometry/transform/se2_array.py b/d123/geometry/transform/se2_array.py
index 5112ea34..97ff8bee 100644
--- a/d123/geometry/transform/se2_array.py
+++ b/d123/geometry/transform/se2_array.py
@@ -3,8 +3,8 @@
import numpy as np
import numpy.typing as npt
-from d123.geometry.base import StateSE2, StateSE2Index
-from d123.geometry.line.polylines import normalize_angle
+from d123.geometry.se import StateSE2, StateSE2Index
+from d123.geometry.utils.rotation_utils import normalize_angle
# TODO: Refactor 2D and 3D transform functions in a more consistent and general way.
diff --git a/d123/geometry/transform/se3.py b/d123/geometry/transform/se3.py
index 42fae1c2..6b4219d0 100644
--- a/d123/geometry/transform/se3.py
+++ b/d123/geometry/transform/se3.py
@@ -1,8 +1,7 @@
import numpy as np
import numpy.typing as npt
-from d123.geometry.base import Point3DIndex, StateSE3, StateSE3Index
-from d123.geometry.vector import Vector3D
+from d123.geometry import Point3DIndex, StateSE3, StateSE3Index, Vector3D
# def get_rotation_matrix(state_se3: StateSE3) -> npt.NDArray[np.float64]:
# R_x = np.array(
diff --git a/d123/geometry/transform/tranform_2d.py b/d123/geometry/transform/tranform_2d.py
index 18998709..b85e598b 100644
--- a/d123/geometry/transform/tranform_2d.py
+++ b/d123/geometry/transform/tranform_2d.py
@@ -1,7 +1,7 @@
import numpy as np
import numpy.typing as npt
-from d123.geometry.base import StateSE2
+from d123.geometry.se import StateSE2
from d123.geometry.vector import Vector2D
# TODO: Refactor 2D and 3D transform functions in a more consistent and general way.
diff --git a/d123/geometry/bounding_box/__init__.py b/d123/geometry/utils/__init__.py
similarity index 100%
rename from d123/geometry/bounding_box/__init__.py
rename to d123/geometry/utils/__init__.py
diff --git a/d123/geometry/utils/bounding_box_utils.py b/d123/geometry/utils/bounding_box_utils.py
new file mode 100644
index 00000000..b1ce46b2
--- /dev/null
+++ b/d123/geometry/utils/bounding_box_utils.py
@@ -0,0 +1,63 @@
+import numpy as np
+import numpy.typing as npt
+import shapely
+
+from d123.geometry.geometry_index import BoundingBoxSE2Index, Corners2DIndex, Point2DIndex
+
+
+def bbse2_array_to_corners_array(bbse2: npt.NDArray[np.float64]) -> npt.NDArray[np.float64]:
+ """
+ Converts an array of BoundingBoxSE2 objects to a coordinates array.
+ :param bbse2: Array of BoundingBoxSE2 objects.
+ :return: Coordinates array of shape (n, 5, 2) where n is the number of bounding boxes.
+ """
+ assert bbse2.shape[-1] == len(BoundingBoxSE2Index)
+
+ ndim_one: bool = bbse2.ndim == 1
+ if ndim_one:
+ bbse2 = bbse2[None, :]
+
+ corners_array = np.zeros((*bbse2.shape[:-1], len(Corners2DIndex), len(Point2DIndex)), dtype=np.float64)
+
+ centers = bbse2[..., BoundingBoxSE2Index.XY]
+ yaws = bbse2[..., BoundingBoxSE2Index.YAW]
+ half_length = bbse2[..., BoundingBoxSE2Index.LENGTH] / 2.0
+ half_width = bbse2[..., BoundingBoxSE2Index.WIDTH] / 2.0
+
+ corners_array[..., Corners2DIndex.FRONT_LEFT, :] = translate_along_yaw_array(centers, yaws, half_length, half_width)
+ corners_array[..., Corners2DIndex.FRONT_RIGHT, :] = translate_along_yaw_array(
+ centers, yaws, half_length, -half_width
+ )
+ corners_array[..., Corners2DIndex.BACK_RIGHT, :] = translate_along_yaw_array(
+ centers, yaws, -half_length, -half_width
+ )
+ corners_array[..., Corners2DIndex.BACK_LEFT, :] = translate_along_yaw_array(centers, yaws, -half_length, half_width)
+
+ return corners_array.squeeze(axis=0) if ndim_one else corners_array
+
+
+def corners_array_to_polygon_array(corners_array: npt.NDArray[np.float64]) -> npt.NDArray[np.object_]:
+ polygons = shapely.creation.polygons(corners_array)
+ return polygons
+
+
+def bbse2_array_to_polygon_array(bbse2: npt.NDArray[np.float64]) -> npt.NDArray[np.float64]:
+ return corners_array_to_polygon_array(bbse2_array_to_corners_array(bbse2))
+
+
+def translate_along_yaw_array(
+ points_2d: npt.NDArray[np.float64],
+ headings: npt.NDArray[np.float64],
+ lon: npt.NDArray[np.float64],
+ lat: npt.NDArray[np.float64],
+) -> npt.NDArray[np.float64]:
+ assert points_2d.shape[-1] == len(Point2DIndex)
+ half_pi = np.pi / 2.0
+ translation: npt.NDArray[np.float64] = np.stack(
+ [
+ (lat * np.cos(headings + half_pi)) + (lon * np.cos(headings)),
+ (lat * np.sin(headings + half_pi)) + (lon * np.sin(headings)),
+ ],
+ axis=-1,
+ )
+ return points_2d + translation
diff --git a/d123/geometry/constants.py b/d123/geometry/utils/constants.py
similarity index 100%
rename from d123/geometry/constants.py
rename to d123/geometry/utils/constants.py
diff --git a/d123/geometry/line/helper.py b/d123/geometry/utils/polyline_utils.py
similarity index 96%
rename from d123/geometry/line/helper.py
rename to d123/geometry/utils/polyline_utils.py
index c3fc0f1a..5e82ea9f 100644
--- a/d123/geometry/line/helper.py
+++ b/d123/geometry/utils/polyline_utils.py
@@ -2,7 +2,7 @@
import numpy.typing as npt
from shapely.geometry import LineString
-from d123.geometry.base import Point2DIndex, StateSE2Index
+from d123.geometry.geometry_index import Point2DIndex, StateSE2Index
def get_linestring_yaws(linestring: LineString) -> npt.NDArray[np.float64]:
diff --git a/d123/geometry/utils.py b/d123/geometry/utils/rotation_utils.py
similarity index 100%
rename from d123/geometry/utils.py
rename to d123/geometry/utils/rotation_utils.py
diff --git a/d123/geometry/units.py b/d123/geometry/utils/units.py
similarity index 100%
rename from d123/geometry/units.py
rename to d123/geometry/utils/units.py
diff --git a/d123/geometry/bounding_box/utils.py b/d123/geometry/utils/utils.py
similarity index 94%
rename from d123/geometry/bounding_box/utils.py
rename to d123/geometry/utils/utils.py
index 5da3530f..b1ce46b2 100644
--- a/d123/geometry/bounding_box/utils.py
+++ b/d123/geometry/utils/utils.py
@@ -2,8 +2,7 @@
import numpy.typing as npt
import shapely
-from d123.geometry.base import Point2DIndex
-from d123.geometry.bounding_box.bounding_box_index import BoundingBoxSE2Index, Corners2DIndex
+from d123.geometry.geometry_index import BoundingBoxSE2Index, Corners2DIndex, Point2DIndex
def bbse2_array_to_corners_array(bbse2: npt.NDArray[np.float64]) -> npt.NDArray[np.float64]:
diff --git a/d123/geometry/vector.py b/d123/geometry/vector.py
index dad25329..a3b8631d 100644
--- a/d123/geometry/vector.py
+++ b/d123/geometry/vector.py
@@ -1,16 +1,9 @@
from __future__ import annotations
-from enum import IntEnum
-
import numpy as np
import numpy.typing as npt
-from d123.geometry.base import Point2D, Point3D, Point3DIndex
-
-
-class Vector2DIndex(IntEnum):
- X = 0
- Y = 1
+from d123.geometry.point import Point2D, Point3D, Point3DIndex
class Vector2D(Point2D):
@@ -35,12 +28,6 @@ def vector_2d(self) -> Vector2D:
return self
-class Vector3DIndex(IntEnum):
- X = 0
- Y = 1
- Z = 2
-
-
class Vector3D(Point3D):
@classmethod
diff --git a/d123/simulation/agents/constant_velocity_agents.py b/d123/simulation/agents/constant_velocity_agents.py
index b9a4b587..7d1a58d0 100644
--- a/d123/simulation/agents/constant_velocity_agents.py
+++ b/d123/simulation/agents/constant_velocity_agents.py
@@ -5,8 +5,8 @@
from d123.common.datatypes.detection.detection import BoxDetection, BoxDetectionSE2
from d123.dataset.maps.abstract_map import AbstractMap
from d123.dataset.scene.abstract_scene import AbstractScene
-from d123.geometry.base import Point2D
-from d123.geometry.bounding_box.bounding_box import BoundingBoxSE2
+from d123.geometry.bounding_box import BoundingBoxSE2
+from d123.geometry.point import Point2D
from d123.geometry.transform.tranform_2d import translate_along_yaw
from d123.simulation.agents.abstract_agents import AbstractAgents
diff --git a/d123/simulation/agents/idm_agents.py b/d123/simulation/agents/idm_agents.py
index 118355a1..072773e1 100644
--- a/d123/simulation/agents/idm_agents.py
+++ b/d123/simulation/agents/idm_agents.py
@@ -10,9 +10,10 @@
from d123.dataset.arrow.conversion import BoxDetectionWrapper
from d123.dataset.maps.abstract_map import AbstractMap
from d123.dataset.scene.abstract_scene import AbstractScene
-from d123.geometry.base import Point2D, StateSE2
-from d123.geometry.bounding_box.bounding_box import BoundingBoxSE2
-from d123.geometry.line.polylines import PolylineSE2
+from d123.geometry.bounding_box import BoundingBoxSE2
+from d123.geometry.point import Point2D
+from d123.geometry.polyline import PolylineSE2
+from d123.geometry.se import StateSE2
from d123.geometry.transform.tranform_2d import translate_along_yaw
from d123.geometry.vector import Vector2D
from d123.simulation.agents.abstract_agents import AbstractAgents
diff --git a/d123/simulation/agents/path_following.py b/d123/simulation/agents/path_following.py
index 357aac41..e4d740c3 100644
--- a/d123/simulation/agents/path_following.py
+++ b/d123/simulation/agents/path_following.py
@@ -5,9 +5,10 @@
from d123.common.datatypes.detection.detection import BoxDetection, BoxDetectionSE2
from d123.dataset.maps.abstract_map import AbstractMap
from d123.dataset.scene.abstract_scene import AbstractScene
-from d123.geometry.base import Point2D, StateSE2
-from d123.geometry.bounding_box.bounding_box import BoundingBoxSE2
-from d123.geometry.line.polylines import PolylineSE2
+from d123.geometry.bounding_box import BoundingBoxSE2
+from d123.geometry.point import Point2D
+from d123.geometry.polyline import PolylineSE2
+from d123.geometry.se import StateSE2
from d123.geometry.transform.tranform_2d import translate_along_yaw
from d123.simulation.agents.abstract_agents import AbstractAgents
diff --git a/d123/simulation/agents/smart_agents.py b/d123/simulation/agents/smart_agents.py
index 2b20a496..3af49624 100644
--- a/d123/simulation/agents/smart_agents.py
+++ b/d123/simulation/agents/smart_agents.py
@@ -9,10 +9,10 @@
from d123.dataset.arrow.conversion import BoxDetectionWrapper, DetectionType
from d123.dataset.maps.abstract_map import AbstractMap
from d123.dataset.scene.abstract_scene import AbstractScene
-from d123.geometry.base import StateSE2
-from d123.geometry.bounding_box.bounding_box import BoundingBoxSE2
+from d123.geometry.bounding_box import BoundingBoxSE2
+from d123.geometry.se import StateSE2
from d123.geometry.transform.se2_array import convert_relative_to_absolute_point_2d_array
-from d123.geometry.utils import normalize_angle
+from d123.geometry.utils.rotation_utils import normalize_angle
from d123.simulation.agents.abstract_agents import AbstractAgents
from d123.training.feature_builder.smart_feature_builder import SMARTFeatureBuilder
from d123.training.models.sim_agent.smart.datamodules.target_builder import _numpy_dict_to_torch
diff --git a/d123/simulation/controller/motion_model/kinematic_bicycle_model.py b/d123/simulation/controller/motion_model/kinematic_bicycle_model.py
index c1f343da..6ef5893c 100644
--- a/d123/simulation/controller/motion_model/kinematic_bicycle_model.py
+++ b/d123/simulation/controller/motion_model/kinematic_bicycle_model.py
@@ -3,7 +3,7 @@
from d123.common.datatypes.time.time_point import TimeDuration, TimePoint
from d123.common.datatypes.vehicle_state.ego_state import DynamicStateSE2, EgoStateSE2
-from d123.geometry.base import StateSE2
+from d123.geometry.se import StateSE2
from d123.geometry.vector import Vector2D
from d123.simulation.controller.motion_model.abstract_motion_model import AbstractMotionModel
diff --git a/d123/simulation/gym/environment/gym_observation/raster/raster_renderer.py b/d123/simulation/gym/environment/gym_observation/raster/raster_renderer.py
index 604626c9..38d661ac 100644
--- a/d123/simulation/gym/environment/gym_observation/raster/raster_renderer.py
+++ b/d123/simulation/gym/environment/gym_observation/raster/raster_renderer.py
@@ -12,7 +12,7 @@
from shapely.affinity import scale as shapely_scale
from d123.common.datatypes.detection.detection import BoxDetectionSE2, TrafficLightStatus
-from d123.geometry.base import StateSE2
+from d123.geometry.se import StateSE2
from d123.geometry.transform.se2_array import convert_absolute_to_relative_point_2d_array
from d123.geometry.transform.tranform_2d import translate_along_yaw
from d123.geometry.vector import Vector2D
diff --git a/d123/simulation/gym/environment/helper/environment_area.py b/d123/simulation/gym/environment/helper/environment_area.py
index db247458..90f5a0c9 100644
--- a/d123/simulation/gym/environment/helper/environment_area.py
+++ b/d123/simulation/gym/environment/helper/environment_area.py
@@ -3,7 +3,7 @@
from shapely import Polygon
-from d123.geometry.base import StateSE2
+from d123.geometry.se import StateSE2
from d123.geometry.transform.tranform_2d import translate_along_yaw
from d123.geometry.vector import Vector2D
diff --git a/d123/simulation/gym/environment/helper/environment_cache.py b/d123/simulation/gym/environment/helper/environment_cache.py
index 70a3dcdf..4ab65f6a 100644
--- a/d123/simulation/gym/environment/helper/environment_cache.py
+++ b/d123/simulation/gym/environment/helper/environment_cache.py
@@ -24,8 +24,8 @@
AbstractStopLine,
)
from d123.dataset.maps.map_datatypes import MapLayer
-from d123.geometry.base import StateSE2
from d123.geometry.occupancy_map import OccupancyMap2D
+from d123.geometry.se import StateSE2
from d123.simulation.gym.environment.helper.environment_area import AbstractEnvironmentArea
from d123.simulation.planning.abstract_planner import PlannerInitialization, PlannerInput
diff --git a/d123/simulation/metrics/sim_agents/interaction_based.py b/d123/simulation/metrics/sim_agents/interaction_based.py
index 05bae92e..7cbd9b92 100644
--- a/d123/simulation/metrics/sim_agents/interaction_based.py
+++ b/d123/simulation/metrics/sim_agents/interaction_based.py
@@ -4,8 +4,8 @@
import numpy.typing as npt
from d123.dataset.arrow.conversion import BoxDetectionWrapper
-from d123.geometry.bounding_box.bounding_box_index import BoundingBoxSE2Index
-from d123.geometry.bounding_box.utils import bbse2_array_to_polygon_array
+from d123.geometry.geometry_index import BoundingBoxSE2Index
+from d123.geometry.utils.bounding_box_utils import bbse2_array_to_polygon_array
MAX_OBJECT_DISTANCE: Final[float] = 50.0
diff --git a/d123/simulation/metrics/sim_agents/map_based.py b/d123/simulation/metrics/sim_agents/map_based.py
index 5342be6e..134a3c5e 100644
--- a/d123/simulation/metrics/sim_agents/map_based.py
+++ b/d123/simulation/metrics/sim_agents/map_based.py
@@ -7,10 +7,10 @@
from d123.dataset.maps.abstract_map import AbstractMap
from d123.dataset.maps.abstract_map_objects import AbstractLane
from d123.dataset.maps.map_datatypes import MapLayer
-from d123.geometry.base import StateSE2, StateSE2Index
-from d123.geometry.bounding_box.bounding_box_index import BoundingBoxSE2Index
-from d123.geometry.bounding_box.utils import Corners2DIndex, bbse2_array_to_corners_array
-from d123.geometry.utils import normalize_angle
+from d123.geometry.geometry_index import BoundingBoxSE2Index, Corners2DIndex, StateSE2Index
+from d123.geometry.se import StateSE2
+from d123.geometry.utils.bounding_box_utils import bbse2_array_to_corners_array
+from d123.geometry.utils.rotation_utils import normalize_angle
MAX_LANE_CENTER_DISTANCE: Final[float] = 10.0
diff --git a/d123/simulation/metrics/sim_agents/sim_agents.py b/d123/simulation/metrics/sim_agents/sim_agents.py
index 7f9f372e..36033e70 100644
--- a/d123/simulation/metrics/sim_agents/sim_agents.py
+++ b/d123/simulation/metrics/sim_agents/sim_agents.py
@@ -7,7 +7,7 @@
from d123.common.datatypes.detection.detection import BoxDetection, BoxDetectionWrapper, DetectionType
from d123.dataset.maps.abstract_map import AbstractMap
from d123.dataset.scene.abstract_scene import AbstractScene
-from d123.geometry.bounding_box.bounding_box_index import BoundingBoxSE2Index
+from d123.geometry.geometry_index import BoundingBoxSE2Index
from d123.simulation.metrics.sim_agents.histogram_metric import (
BinaryHistogramIntersectionMetric,
HistogramIntersectionMetric,
diff --git a/d123/simulation/metrics/sim_agents/utils.py b/d123/simulation/metrics/sim_agents/utils.py
index 2923038a..c9d2bb3b 100644
--- a/d123/simulation/metrics/sim_agents/utils.py
+++ b/d123/simulation/metrics/sim_agents/utils.py
@@ -5,7 +5,7 @@
from d123.common.datatypes.detection.detection import BoxDetectionWrapper
from d123.dataset.scene.abstract_scene import AbstractScene
-from d123.geometry.bounding_box.bounding_box_index import BoundingBoxSE2Index
+from d123.geometry.geometry_index import BoundingBoxSE2Index
def _get_log_agents_array(
diff --git a/d123/training/feature_builder/smart_feature_builder.py b/d123/training/feature_builder/smart_feature_builder.py
index 806350bf..4286b990 100644
--- a/d123/training/feature_builder/smart_feature_builder.py
+++ b/d123/training/feature_builder/smart_feature_builder.py
@@ -16,9 +16,8 @@
AbstractLaneGroup,
)
from d123.dataset.scene.abstract_scene import AbstractScene
-from d123.geometry.base import StateSE2, StateSE2Index
-from d123.geometry.bounding_box.bounding_box import BoundingBoxSE2
-from d123.geometry.line.polylines import PolylineSE2
+from d123.geometry import BoundingBoxSE2, PolylineSE2, StateSE2
+from d123.geometry.geometry_index import StateSE2Index
from d123.geometry.transform.se2_array import convert_absolute_to_relative_se2_array
# TODO: Hind feature builder behind abstraction.
diff --git a/notebooks/av2/delete_me.ipynb b/notebooks/av2/delete_me.ipynb
index 85226114..434287f0 100644
--- a/notebooks/av2/delete_me.ipynb
+++ b/notebooks/av2/delete_me.ipynb
@@ -386,7 +386,7 @@
"from pyquaternion import Quaternion\n",
"from d123.common.datatypes.detection.detection_types import DetectionType\n",
"from d123.geometry.base import StateSE2\n",
- "from d123.geometry.bounding_box.bounding_box import BoundingBoxSE2\n",
+ "from d123.geometry.bounding_box import BoundingBoxSE2\n",
"from d123.common.visualization.color.config import PlotConfig\n",
"from d123.common.visualization.color.default import BOX_DETECTION_CONFIG\n",
"from d123.common.visualization.matplotlib.utils import add_shapely_polygon_to_ax\n",
diff --git a/notebooks/deprecated/test_scene_builder.ipynb b/notebooks/deprecated/test_scene_builder.ipynb
index 607cd184..e41ba4dd 100644
--- a/notebooks/deprecated/test_scene_builder.ipynb
+++ b/notebooks/deprecated/test_scene_builder.ipynb
@@ -72,8 +72,8 @@
"# import matplotlib.pyplot as plt\n",
"# from tqdm import tqdm\n",
"\n",
- "# from d123.geometry.base import Point2D, StateSE2\n",
- "# from d123.geometry.bounding_box.bounding_box import BoundingBoxSE2\n",
+ "# from d123.geometry import Point2D, StateSE2\n",
+ "# from d123.geometry.bounding_box import BoundingBoxSE2\n",
"# from d123.common.visualization.color.default import EGO_VEHICLE_CONFIG\n",
"# from d123.common.visualization.matplotlib.observation import (\n",
"# add_bounding_box_to_ax,\n",
diff --git a/notebooks/gym/test_gym.ipynb b/notebooks/gym/test_gym.ipynb
index 3fbb3a94..c49f505c 100644
--- a/notebooks/gym/test_gym.ipynb
+++ b/notebooks/gym/test_gym.ipynb
@@ -78,7 +78,7 @@
"\n",
"from d123.common.datatypes.vehicle_state.ego_state import EgoStateSE2\n",
"from d123.geometry.base import Point2D, StateSE2\n",
- "from d123.geometry.bounding_box.bounding_box import BoundingBoxSE2\n",
+ "from d123.geometry.bounding_box import BoundingBoxSE2\n",
"from d123.common.visualization.color.default import EGO_VEHICLE_CONFIG\n",
"from d123.common.visualization.matplotlib.observation import (\n",
" add_bounding_box_to_ax,\n",
diff --git a/notebooks/scene_rendering.ipynb b/notebooks/scene_rendering.ipynb
index 0a50c5ed..4436e7ea 100644
--- a/notebooks/scene_rendering.ipynb
+++ b/notebooks/scene_rendering.ipynb
@@ -63,7 +63,7 @@
"from typing import Tuple\n",
"from d123.common.datatypes.detection.detection import BoxDetection\n",
"from d123.common.datatypes.detection.detection_types import DYNAMIC_DETECTION_TYPES, STATIC_DETECTION_TYPES\n",
- "from d123.geometry.base import StateSE2\n",
+ "from d123.geometry import StateSE2\n",
"from d123.geometry.transform.tranform_2d import translate_along_yaw\n",
"from d123.geometry.vector import Vector2D\n",
"from d123.common.visualization.matplotlib.observation import (\n",
diff --git a/notebooks/viz/bev_matplotlib.ipynb b/notebooks/viz/bev_matplotlib.ipynb
index 3755c860..f8fe8c69 100644
--- a/notebooks/viz/bev_matplotlib.ipynb
+++ b/notebooks/viz/bev_matplotlib.ipynb
@@ -63,7 +63,7 @@
"from typing import List, Optional, Tuple\n",
"import matplotlib.pyplot as plt\n",
"import numpy as np\n",
- "from d123.geometry.base import Point2D\n",
+ "from d123.geometry import Point2D\n",
"from d123.common.visualization.color.color import BLACK, DARK_GREY, DARKER_GREY, LIGHT_GREY, NEW_TAB_10, TAB_10\n",
"from d123.common.visualization.color.config import PlotConfig\n",
"from d123.common.visualization.color.default import CENTERLINE_CONFIG, MAP_SURFACE_CONFIG, ROUTE_CONFIG\n",
@@ -236,17 +236,17 @@
" return fig, ax\n",
"\n",
"\n",
- "scene_index = 9\n",
+ "scene_index = 1\n",
"iteration = 99\n",
"fig, ax = plot_scene_at_iteration(scenes[scene_index], iteration=iteration, radius=60)\n",
"plt.show()\n",
"\n",
- "camera = scenes[scene_index].get_camera_at_iteration(\n",
- " iteration=iteration, camera_type=CameraType.CAM_F0\n",
- ")\n",
+ "# camera = scenes[scene_index].get_camera_at_iteration(\n",
+ "# iteration=iteration, camera_type=CameraType.CAM_F0\n",
+ "# )\n",
"\n",
- "plt.imshow(camera.image, cmap=\"gray\", vmin=0, vmax=255)\n",
- "# # fig.savefig(f\"/home/daniel/scene_{scene_index}_iteration_1.pdf\", dpi=300, bbox_inches=\"tight\")\n",
+ "# plt.imshow(camera.image, cmap=\"gray\", vmin=0, vmax=255)\n",
+ "# # # fig.savefig(f\"/home/daniel/scene_{scene_index}_iteration_1.pdf\", dpi=300, bbox_inches=\"tight\")\n",
"\n",
"# scenes[scene_index].log_name"
]
diff --git a/notebooks/waymo_perception/lidar_testing.ipynb b/notebooks/waymo_perception/lidar_testing.ipynb
index dfa0e65e..fa6a92a6 100644
--- a/notebooks/waymo_perception/lidar_testing.ipynb
+++ b/notebooks/waymo_perception/lidar_testing.ipynb
@@ -54,8 +54,8 @@
"import io\n",
"from pyquaternion import Quaternion\n",
"\n",
- "from d123.geometry.base import StateSE3\n",
- "from d123.geometry.bounding_box.bounding_box import BoundingBoxSE3\n",
+ "from d123.geometry import StateSE3\n",
+ "from d123.geometry.bounding_box import BoundingBoxSE3\n",
"\n",
"from waymo_open_dataset.utils import frame_utils\n",
"\n",
diff --git a/notebooks/waymo_perception/map_testing.ipynb b/notebooks/waymo_perception/map_testing.ipynb
index a06d8f7c..d7694c2d 100644
--- a/notebooks/waymo_perception/map_testing.ipynb
+++ b/notebooks/waymo_perception/map_testing.ipynb
@@ -54,8 +54,8 @@
"import io\n",
"from pyquaternion import Quaternion\n",
"\n",
- "from d123.geometry.base import StateSE3\n",
- "from d123.geometry.bounding_box.bounding_box import BoundingBoxSE3\n",
+ "from d123.geometry import StateSE3\n",
+ "from d123.geometry.bounding_box import BoundingBoxSE3\n",
"\n",
"from waymo_open_dataset.utils import frame_utils\n",
"\n",
@@ -139,7 +139,7 @@
"source": [
"from collections import defaultdict\n",
"\n",
- "from d123.geometry.units import mph_to_mps\n",
+ "from d123.geometry.utils.units import mph_to_mps\n",
"\n",
"\n",
"dataset = tf.data.TFRecordDataset(pathname, compression_type=\"\")\n",
@@ -382,7 +382,7 @@
"metadata": {},
"outputs": [],
"source": [
- "from d123.geometry.line.polylines import Polyline3D\n",
+ "from d123.geometry.polyline import Polyline3D\n",
"import numpy as np\n",
"\n",
"\n",
@@ -627,78 +627,6 @@
"ax.set_aspect(\"equal\")\n",
"# lanes"
]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "15",
- "metadata": {},
- "outputs": [],
- "source": [
- "from d123.geometry.base import StateSE2\n",
- "# from d123.geometry.line.polylines import PolylineSE2\n",
- "# from d123.geometry.transform.tranform_2d import translate_along_yaw\n",
- "# from d123.geometry.vector import Vector2D\n",
- "\n",
- "# size = 30\n",
- "# fig, ax = plt.subplots(figsize=(size, size))\n",
- "\n",
- "# lane_id = 114\n",
- "# lane_ = lanes[lane_id]\n",
- "\n",
- "# lane_polyline_se2 = PolylineSE2.from_array(lane_[:, :2])\n",
- "# BOUNDARY_STEP_SIZE = 0.5\n",
- "\n",
- "\n",
- "# distances_se2 = np.linspace(\n",
- "# 0, lane_polyline_se2.length, int(lane_polyline_se2.length / BOUNDARY_STEP_SIZE) + 1, endpoint=True\n",
- "# )\n",
- "# lane_query_se2 = lane_polyline_se2.interpolate(distances_se2)\n",
- "\n",
- "\n",
- "# left_boundary = left_boundaries[lane_id].array\n",
- "# right_boundary = right_boundaries[lane_id].array\n",
- "\n",
- "# assert len(left_boundary) > 0 and len(right_boundary) > 0\n",
- "# ax.plot(left_boundary[:, 0], left_boundary[:, 1], color=\"lime\")\n",
- "# ax.plot(right_boundary[:, 0], right_boundary[:, 1], color=\"red\")\n",
- "\n",
- "# ax.scatter(lane_query_se2[:, 0], lane_query_se2[:, 1], c=lane_query_se2[:, 2] / np.pi, cmap=\"viridis\")\n",
- "\n",
- "\n",
- "# MAX_LANE_WIDTH = 25\n",
- "# for state_se2_array in lane_query_se2:\n",
- "# for sign in [1.0, -1.0]:\n",
- "# perp_start_point = translate_along_yaw(StateSE2.from_array(state_se2_array), Vector2D(0.0, sign * 0.1))\n",
- "# perp_end_point = translate_along_yaw(\n",
- "# StateSE2.from_array(state_se2_array), Vector2D(0.0, sign * MAX_LANE_WIDTH / 2.0)\n",
- "# )\n",
- "# ax.plot(\n",
- "# [perp_start_point.x, perp_end_point.x],\n",
- "# [perp_start_point.y, perp_end_point.y],\n",
- "# color=\"lime\" if sign > 0 else \"red\",\n",
- "# linestyle=\"--\",\n",
- "# alpha=0.5,\n",
- "# )\n",
- "\n",
- "\n",
- "# for road_edge in road_edges.values():\n",
- "# # print(len(driveway))\n",
- "# ax.plot(road_edge[:, 0], road_edge[:, 1], color=\"black\", label=\"road_edge\", linestyle=\"dashdot\")\n",
- "\n",
- "\n",
- "\n",
- "# for road_line in road_lines.values():\n",
- "# # print(len(driveway))\n",
- "# ax.plot(road_line[:, 0], road_line[:, 1], color=\"orange\", label=\"road_line\")\n",
- "\n",
- "# ax.set_aspect(\"equal\")\n",
- "\n",
- "# ax.set_xlim(lane_[:, 0].min() - 10, lane_[:, 0].max() + 10)\n",
- "# ax.set_ylim(lane_[:, 1].min() - 10, lane_[:, 1].max() + 10)\n",
- "\n",
- "# lanes_map_features[lane_id]"
- ]
}
],
"metadata": {
diff --git a/notebooks/waymo_perception/testing.ipynb b/notebooks/waymo_perception/testing.ipynb
index 9adc8510..d03d7d93 100644
--- a/notebooks/waymo_perception/testing.ipynb
+++ b/notebooks/waymo_perception/testing.ipynb
@@ -51,8 +51,8 @@
"import io\n",
"from pyquaternion import Quaternion\n",
"\n",
- "from d123.geometry.base import StateSE3\n",
- "from d123.geometry.bounding_box.bounding_box import BoundingBoxSE3\n",
+ "from d123.geometry import StateSE3\n",
+ "from d123.geometry.bounding_box import BoundingBoxSE3\n",
"\n",
"from waymo_open_dataset.utils import frame_utils\n",
"\n",
From 62654f3f13146a5f299aedf25e623c97caab9907 Mon Sep 17 00:00:00 2001
From: jbwang <1159270049@qq.com>
Date: Mon, 25 Aug 2025 14:31:07 +0800
Subject: [PATCH 014/145] fix ego_yaw_pitch_roll and get good results in lidar
viser
---
.../kitti_360/kitti_360_data_converter.py | 16 +-
.../kitti_360/kitti_360_helper.py | 14 +-
jbwang_test2.py | 216 ++++++++++++------
3 files changed, 158 insertions(+), 88 deletions(-)
diff --git a/d123/dataset/dataset_specific/kitti_360/kitti_360_data_converter.py b/d123/dataset/dataset_specific/kitti_360/kitti_360_data_converter.py
index 77f3fff0..6433ca89 100644
--- a/d123/dataset/dataset_specific/kitti_360/kitti_360_data_converter.py
+++ b/d123/dataset/dataset_specific/kitti_360/kitti_360_data_converter.py
@@ -16,6 +16,7 @@
import pyarrow as pa
from PIL import Image
import logging
+from pyquaternion import Quaternion
from nuplan.planning.utils.multithreading.worker_utils import WorkerPool, worker_map
@@ -455,15 +456,24 @@ def _extract_ego_state_all(log_name: str) -> List[List[float]]:
oxts_path_file = oxts_path / f"{int(idx):010d}.txt"
oxts_data = np.loadtxt(oxts_path_file)
- #TODO check roll, pitch, yaw again
- roll, pitch, yaw = oxts_data[3:6]
vehicle_parameters = get_kitti360_station_wagon_parameters()
while pose_idx + 1 < poses_time_len and poses_time[pose_idx + 1] < idx:
pose_idx += 1
pos = pose_idx
- # pos = np.searchsorted(pwwwoses_time, idx, side='right') - 1
+ # pos = np.searchsorted(poses_time, idx, side='right') - 1
+ # NOTE you can use oxts_data[3:6] as roll, pitch, yaw for simplicity
+ #roll, pitch, yaw = oxts_data[3:6]
+ r00, r01, r02 = poses[pos, 1:4]
+ r10, r11, r12 = poses[pos, 5:8]
+ r20, r21, r22 = poses[pos, 9:12]
+ R_mat = np.array([[r00, r01, r02],
+ [r10, r11, r12],
+ [r20, r21, r22]], dtype=np.float64)
+ R_mat_cali = R_mat @ KITTI3602NUPLAN_IMU_CALIBRATION[:3,:3]
+ yaw, pitch, roll = Quaternion(matrix=R_mat_cali[:3, :3]).yaw_pitch_roll
+
rear_axle_pose = StateSE3(
x=poses[pos, 4],
y=poses[pos, 8],
diff --git a/d123/dataset/dataset_specific/kitti_360/kitti_360_helper.py b/d123/dataset/dataset_specific/kitti_360/kitti_360_helper.py
index 5c69264f..7edcd6af 100644
--- a/d123/dataset/dataset_specific/kitti_360/kitti_360_helper.py
+++ b/d123/dataset/dataset_specific/kitti_360/kitti_360_helper.py
@@ -13,17 +13,6 @@
DEFAULT_ROLL = 0.0
DEFAULT_PITCH = 0.0
-addtional_calibration = get_rotation_matrix(
- StateSE3(
- x=0.0,
- y=0.0,
- z=0.0,
- roll=np.deg2rad(1.0),
- pitch=np.deg2rad(1.0),
- yaw=np.deg2rad(0.0),
- )
- )
-
kitti3602nuplan_imu_calibration_ideal = np.array([
[1, 0, 0, 0],
[0, -1, 0, 0],
@@ -31,8 +20,7 @@
[0, 0, 0, 1],
], dtype=np.float64)
-KITTI3602NUPLAN_IMU_CALIBRATION = np.eye(4, dtype=np.float64)
-KITTI3602NUPLAN_IMU_CALIBRATION[:3, :3] = addtional_calibration @ kitti3602nuplan_imu_calibration_ideal[:3, :3]
+KITTI3602NUPLAN_IMU_CALIBRATION = kitti3602nuplan_imu_calibration_ideal
MAX_N = 1000
def local2global(semanticId, instanceId):
diff --git a/jbwang_test2.py b/jbwang_test2.py
index 7128a636..f9748db5 100644
--- a/jbwang_test2.py
+++ b/jbwang_test2.py
@@ -76,77 +76,149 @@
# print(a[10000:10010,:3])
-import gc
-import json
-import os
-from dataclasses import asdict
-from functools import partial
-from pathlib import Path
-from typing import Any, Dict, Final, List, Optional, Tuple, Union
-import numpy as np
-from collections import defaultdict
-import datetime
-import hashlib
-import xml.etree.ElementTree as ET
-import pyarrow as pa
-from PIL import Image
-import logging
-
-from d123.common.datatypes.detection.detection_types import DetectionType
-from d123.dataset.dataset_specific.kitti_360.kitti_360_helper import KITTI360Bbox3D
-
-
-#TODO train and train_full
-bbox_3d_path = Path("/nas/datasets/KITTI-360/data_3d_bboxes/train/2013_05_28_drive_0000_sync.xml")
-
-tree = ET.parse(bbox_3d_path)
-root = tree.getroot()
-
-KIITI360_DETECTION_NAME_DICT = {
- "truck": DetectionType.VEHICLE,
- "bus": DetectionType.VEHICLE,
- "car": DetectionType.VEHICLE,
- "motorcycle": DetectionType.BICYCLE,
- "bicycle": DetectionType.BICYCLE,
- "pedestrian": DetectionType.PEDESTRIAN,
-}
-# x,y,z = 881.2268115,3247.493293,115.239219
-# x,y,z = 867.715474,3229.630439,115.189221 # 自车
-# x,y,z = 873.533508, 3227.16235, 115.185341 # 要找的那个人
-x,y,z = 874.233508, 3231.56235, 115.185341 # 要找的那个车
-CENTER_REF = np.array([x, y, z], dtype=np.float64)
-objs_name = []
-lable_name = []
-for child in root:
- label = child.find('label').text
- # if child.find('transform') is None or label not in KIITI360_DETECTION_NAME_DICT.keys():
- # continue
+
+
+# import gc
+# import json
+# import os
+# from dataclasses import asdict
+# from functools import partial
+# from pathlib import Path
+# from typing import Any, Dict, Final, List, Optional, Tuple, Union
+
+# import numpy as np
+# from collections import defaultdict
+# import datetime
+# import hashlib
+# import xml.etree.ElementTree as ET
+# import pyarrow as pa
+# from PIL import Image
+# import logging
+
+# from d123.common.datatypes.detection.detection_types import DetectionType
+# from d123.dataset.dataset_specific.kitti_360.kitti_360_helper import KITTI360Bbox3D
+
+
+# #TODO train and train_full
+# bbox_3d_path = Path("/nas/datasets/KITTI-360/data_3d_bboxes/train/2013_05_28_drive_0000_sync.xml")
+
+# tree = ET.parse(bbox_3d_path)
+# root = tree.getroot()
+
+# KIITI360_DETECTION_NAME_DICT = {
+# "truck": DetectionType.VEHICLE,
+# "bus": DetectionType.VEHICLE,
+# "car": DetectionType.VEHICLE,
+# "motorcycle": DetectionType.BICYCLE,
+# "bicycle": DetectionType.BICYCLE,
+# "pedestrian": DetectionType.PEDESTRIAN,
+# }
+# # x,y,z = 881.2268115,3247.493293,115.239219
+# # x,y,z = 867.715474,3229.630439,115.189221 # 自车
+# # x,y,z = 873.533508, 3227.16235, 115.185341 # 要找的那个人
+# x,y,z = 874.233508, 3231.56235, 115.185341 # 要找的那个车
+# CENTER_REF = np.array([x, y, z], dtype=np.float64)
+# objs_name = []
+# lable_name = []
+# for child in root:
+# label = child.find('label').text
+# # if child.find('transform') is None or label not in KIITI360_DETECTION_NAME_DICT.keys():
+# # continue
- if child.find('transform') is None:
- continue
- print("this label is ",label)
- print("!!!!!!!!!!!!!!!!!!!")
- obj = KITTI360Bbox3D()
- obj.parseBbox(child)
- # obj.parseVertices(child)
- name = child.find('label').text
- lable_name.append(name)
- # if obj.start_frame < 10030 and obj.end_frame > 10030:
- center = np.array(obj.T, dtype=np.float64)
- dist = np.linalg.norm(center - CENTER_REF)
- if dist < 7:
- print(f"Object ID: {obj.name}, Start Frame: {obj.start_frame}, End Frame: {obj.end_frame},self.annotationId: {obj.annotationId},{obj.timestamp},{obj.T}")
- objs_name.append(obj.name)
-print(len(objs_name))
-print(set(objs_name))
-print(set(lable_name))
- # print(obj.Rm)
- # print(Sigma)
-names = []
-for child in root:
- label = child.find('label').text
- if child.find('transform') is None:
- continue
- names.append(label)
-print(set(names))
\ No newline at end of file
+# if child.find('transform') is None:
+# continue
+# print("this label is ",label)
+# print("!!!!!!!!!!!!!!!!!!!")
+# obj = KITTI360Bbox3D()
+# obj.parseBbox(child)
+# # obj.parseVertices(child)
+# name = child.find('label').text
+# lable_name.append(name)
+# # if obj.start_frame < 10030 and obj.end_frame > 10030:
+# center = np.array(obj.T, dtype=np.float64)
+# dist = np.linalg.norm(center - CENTER_REF)
+# if dist < 7:
+# print(f"Object ID: {obj.name}, Start Frame: {obj.start_frame}, End Frame: {obj.end_frame},self.annotationId: {obj.annotationId},{obj.timestamp},{obj.T}")
+# objs_name.append(obj.name)
+# print(len(objs_name))
+# print(set(objs_name))
+# print(set(lable_name))
+# # print(obj.Rm)
+# # print(Sigma)
+# names = []
+# for child in root:
+# label = child.find('label').text
+# if child.find('transform') is None:
+# continue
+# names.append(label)
+# print(set(names))
+
+from scipy.spatial.transform import Rotation as R
+import numpy as np
+from pathlib import Path as PATH
+
+def get_rotation_matrix(roll,pitch,yaw):
+ # Intrinsic Z-Y'-X'' rotation: R = R_x(roll) @ R_y(pitch) @ R_z(yaw)
+ R_x = np.array(
+ [
+ [1, 0, 0],
+ [0, np.cos(roll), -np.sin(roll)],
+ [0, np.sin(roll), np.cos(roll)],
+ ],
+ dtype=np.float64,
+ )
+ R_y = np.array(
+ [
+ [np.cos(pitch), 0, np.sin(pitch)],
+ [0, 1, 0],
+ [-np.sin(pitch), 0, np.cos(pitch)],
+ ],
+ dtype=np.float64,
+ )
+ R_z = np.array(
+ [
+ [np.cos(yaw), -np.sin(yaw), 0],
+ [np.sin(yaw), np.cos(yaw), 0],
+ [0, 0, 1],
+ ],
+ dtype=np.float64,
+ )
+ return R_x @ R_y @ R_z
+
+oxts_path = PATH("/data/jbwang/d123/data_poses/2013_05_28_drive_0000_sync/oxts/data/" )
+pose_file = PATH("/nas/datasets/KITTI-360/data_poses/2013_05_28_drive_0000_sync/poses.txt")
+poses = np.loadtxt(pose_file)
+poses_time = poses[:, 0] - 1 # Adjusting time to start from 0
+
+pose_idx = 0
+poses_time_len = len(poses_time)
+
+from pyquaternion import Quaternion
+
+for idx in range(len(list(oxts_path.glob("*.txt")))):
+ oxts_path_file = oxts_path / f"{int(idx):010d}.txt"
+ oxts_data = np.loadtxt(oxts_path_file)
+ while pose_idx + 1 < poses_time_len and poses_time[pose_idx + 1] < idx:
+ pose_idx += 1
+ pos = pose_idx
+
+ r00, r01, r02 = poses[pos, 1:4]
+ r10, r11, r12 = poses[pos, 5:8]
+ r20, r21, r22 = poses[pos, 9:12]
+ R_mat = np.array([[r00, r01, r02],
+ [r10, r11, r12],
+ [r20, r21, r22]], dtype=np.float64)
+ calib = np.array([[1.0, 0.0, 0.0],
+ [0.0, -1.0, 0.0],
+ [0.0, 0.0, -1.0]], dtype=np.float64)
+ R_mat = R_mat @ calib
+ if idx <= 300:
+ # print("R_mat",R_mat)
+ new_yaw, new_pitch, new_roll = Quaternion(matrix=R_mat[:3, :3]).yaw_pitch_roll
+ # new_yaw,new_pitch,new_roll = R.from_matrix(R_mat).as_euler('yxz', degrees=False)
+ print("new",new_roll,new_pitch,new_yaw)
+ print("roll,pitch,yaw",oxts_data[3:6]) # 前6个元素是位置和速度
+ roll, pitch, yaw = oxts_data[3:6]
+ # print("true",get_rotation_matrix(roll,pitch,yaw))
+ # print("new",roll,pitch,yaw)
\ No newline at end of file
From c3f4053b0f750d5eda690d6c53c85d89ff623ee9 Mon Sep 17 00:00:00 2001
From: DanielDauner
Date: Mon, 25 Aug 2025 10:39:38 +0200
Subject: [PATCH 015/145] Refactor `geometry` functionality and add to docs.
---
d123/common/utils/mixin.py | 49 ++++
d123/geometry/bounding_box.py | 164 ++++++++++--
d123/geometry/geometry_index.py | 54 ++++
d123/geometry/occupancy_map.py | 93 ++++---
d123/geometry/point.py | 64 +++--
d123/geometry/polyline.py | 233 +++++++++++++++---
d123/geometry/se.py | 163 +++++++++---
.../av2 => geometry/torch}/.gitkeep | 0
d123/geometry/transform/se3.py | 27 --
d123/geometry/utils/bounding_box_utils.py | 68 ++++-
d123/geometry/vector.py | 194 +++++++++++++--
.../agents/constant_velocity_agents.py | 2 +-
d123/simulation/agents/idm_agents.py | 6 +-
d123/simulation/agents/path_following.py | 2 +-
.../gym_observation/raster/raster_renderer.py | 10 +-
.../components/time_to_collision.py | 6 +-
.../reward_builder/default_reward_builder.py | 2 +-
docs/conf.py | 4 +-
docs/geometry.rst | 54 +++-
notebooks/viz/bev_matplotlib.ipynb | 34 ++-
20 files changed, 1005 insertions(+), 224 deletions(-)
create mode 100644 d123/common/utils/mixin.py
rename d123/{dataset/dataset_specific/av2 => geometry/torch}/.gitkeep (100%)
diff --git a/d123/common/utils/mixin.py b/d123/common/utils/mixin.py
new file mode 100644
index 00000000..5e7ecc0e
--- /dev/null
+++ b/d123/common/utils/mixin.py
@@ -0,0 +1,49 @@
+from __future__ import annotations
+
+import abc
+import copy as pycopy
+from functools import cached_property
+
+import numpy as np
+import numpy.typing as npt
+
+
+class ArrayMixin(abc.ABC):
+ """Abstract base class for geometric entities."""
+
+ @cached_property
+ @abc.abstractmethod
+ def array(self) -> npt.NDArray[np.float64]:
+ """The array representation of the geometric entity."""
+
+ def __array__(self, dtype: npt.DtypeLike = None, copy: bool = False) -> npt.NDArray:
+ array = self.array
+ return array if dtype is None else array.astype(dtype=dtype, copy=copy)
+
+ def __len__(self) -> int:
+ """Return the length of the array."""
+ return len(self.array)
+
+ def __getitem__(self, key):
+ """Allow indexing into the array."""
+ return self.array[key]
+
+ def __eq__(self, other) -> bool:
+ """Equality comparison based on array values."""
+ if isinstance(other, ArrayMixin):
+ return np.array_equal(self.array, other.array)
+ return False
+
+ def shape(self) -> tuple:
+ """Return the shape of the array."""
+ return self.array.shape
+
+ def tolist(self) -> list:
+ """Convert the array to a Python list."""
+ return self.array.tolist()
+
+ def copy(self) -> ArrayMixin:
+ """Return a copy of the object with a copied array."""
+ obj = pycopy.copy(self)
+ obj.array = self.array.copy()
+ return obj
diff --git a/d123/geometry/bounding_box.py b/d123/geometry/bounding_box.py
index b71c4fae..6b612c4e 100644
--- a/d123/geometry/bounding_box.py
+++ b/d123/geometry/bounding_box.py
@@ -1,32 +1,60 @@
from __future__ import annotations
+from ast import Dict
from dataclasses import dataclass
from functools import cached_property
import numpy as np
import numpy.typing as npt
-import shapely
+import shapely.geometry as geom
-from d123.geometry.geometry_index import BoundingBoxSE2Index, BoundingBoxSE3Index
+from d123.common.utils.mixin import ArrayMixin
+from d123.geometry.geometry_index import BoundingBoxSE2Index, BoundingBoxSE3Index, Corners2DIndex, Corners3DIndex
+from d123.geometry.point import Point2D, Point3D
from d123.geometry.se import StateSE2, StateSE3
-from d123.geometry.utils.bounding_box_utils import bbse2_array_to_corners_array
-
-# TODO: Reconsider naming SE2 and SE3 hierarchies. E.g. would inheritance be a better approach?
+from d123.geometry.utils.bounding_box_utils import bbse2_array_to_corners_array, bbse3_array_to_corners_array
@dataclass
-class BoundingBoxSE2:
+class BoundingBoxSE2(ArrayMixin):
+ """
+ Rotated bounding box in 2D defined by center (StateSE2), length and width.
+
+ Example:
+ >>> from d123.geometry import StateSE2
+ >>> bbox = BoundingBoxSE2(center=StateSE2(1.0, 2.0, 0.5), length=4.0, width=2.0)
+ >>> bbox.array
+ array([1. , 2. , 0.5, 4. , 2. ])
+ >>> bbox.corners_array.shape
+ (4, 2)
+ >>> bbox.shapely_polygon.area
+ 8.0
+ """
center: StateSE2
length: float
width: float
- @cached_property
- def shapely_polygon(self) -> shapely.geometry.Polygon:
- return shapely.geometry.Polygon(self.corners_array)
+ @classmethod
+ def from_array(cls, array: npt.NDArray[np.float64]) -> BoundingBoxSE2:
+ """Create a BoundingBoxSE2 from a numpy array, index by :class:`~d123.geometry.BoundingBoxSE2Index`.
- @property
+ :param array: A 1D numpy array containing the bounding box parameters.
+ :return: A BoundingBoxSE2 instance.
+ """
+ assert array.ndim == 1 and array.shape[-1] == len(BoundingBoxSE2Index)
+ return BoundingBoxSE2(
+ center=StateSE2.from_array(array[BoundingBoxSE2Index.SE2]),
+ length=array[BoundingBoxSE2Index.LENGTH],
+ width=array[BoundingBoxSE2Index.WIDTH],
+ )
+
+ @cached_property
def array(self) -> npt.NDArray[np.float64]:
+ """Converts the BoundingBoxSE2 instance to a numpy array, indexed by :class:`~d123.geometry.BoundingBoxSE2Index`.
+
+ :return: A numpy array of shape (5,) containing the bounding box parameters [x, y, yaw, length, width].
+ """
array = np.zeros(len(BoundingBoxSE2Index), dtype=np.float64)
array[BoundingBoxSE2Index.X] = self.center.x
array[BoundingBoxSE2Index.Y] = self.center.y
@@ -35,17 +63,56 @@ def array(self) -> npt.NDArray[np.float64]:
array[BoundingBoxSE2Index.WIDTH] = self.width
return array
+ @cached_property
+ def shapely_polygon(self) -> geom.Polygon:
+ """Return a Shapely polygon representation of the bounding box.
+
+ :return: A Shapely polygon representing the bounding box.
+ """
+ return geom.Polygon(self.corners_array)
+
@property
def bounding_box_se2(self) -> BoundingBoxSE2:
+ """Returns bounding box itself for polymorphism.
+
+ :return: A BoundingBoxSE2 instance representing the 2D bounding box.
+ """
return self
- @property
+ @cached_property
def corners_array(self) -> npt.NDArray[np.float64]:
+ """Returns the corner points of the bounding box as a numpy array.
+
+ :return: A numpy array of shape (4, 2) containing the corner points of the bounding box, \
+ indexed by :class:`~d123.geometry.Corners2DIndex` and :class:`~d123.geometry.Point2DIndex`.
+ """
return bbse2_array_to_corners_array(self.array)
+ @property
+ def corners_dict(self) -> Dict[Corners2DIndex, Point2D]:
+ """Returns the corner points of the bounding box as a dictionary.
+
+ :return: A dictionary mapping :class:`~d123.geometry.Corners2DIndex` to :class:`~d123.geometry.Point2D` instances.
+ """
+ corners_array = self.corners_array
+ return {index: Point2D.from_array(corners_array[index]) for index in Corners2DIndex}
+
@dataclass
-class BoundingBoxSE3:
+class BoundingBoxSE3(ArrayMixin):
+ """
+ Rotated bounding box in 3D defined by center (StateSE3), length, width and height.
+
+ Example:
+ >>> from d123.geometry import StateSE3
+ >>> bbox = BoundingBoxSE3(center=StateSE3(1.0, 2.0, 3.0, 0.1, 0.2, 0.3), length=4.0, width=2.0, height=1.5)
+ >>> bbox.array
+ array([1. , 2. , 3. , 0.1, 0.2, 0.3, 4. , 2. , 1.5])
+ >>> bbox.bounding_box_se2.array
+ array([1. , 2. , 0.3, 4. , 2. ])
+ >>> bbox.shapely_polygon.area
+ 8.0
+ """
center: StateSE3
length: float
@@ -54,27 +121,27 @@ class BoundingBoxSE3:
@classmethod
def from_array(cls, array: npt.NDArray[np.float64]) -> BoundingBoxSE3:
- return cls(
+ """Create a BoundingBoxSE3 from a numpy array.
+
+ :param array: A 1D numpy array containing the bounding box parameters, indexed by \
+ :class:`~d123.geometry.BoundingBoxSE3Index`.
+ :return: A BoundingBoxSE3 instance.
+ """
+ assert array.ndim == 1 and array.shape[-1] == len(BoundingBoxSE3Index)
+ return BoundingBoxSE3(
center=StateSE3.from_array(array[BoundingBoxSE3Index.STATE_SE3]),
length=array[BoundingBoxSE3Index.LENGTH],
width=array[BoundingBoxSE3Index.WIDTH],
height=array[BoundingBoxSE3Index.HEIGHT],
)
- @property
- def bounding_box_se2(self) -> BoundingBoxSE2:
- return BoundingBoxSE2(
- center=StateSE2(self.center.x, self.center.y, self.center.yaw),
- length=self.length,
- width=self.width,
- )
-
- @property
- def center_se3(self) -> StateSE3:
- return self.center
-
- @property
+ @cached_property
def array(self) -> npt.NDArray[np.float64]:
+ """Convert the BoundingBoxSE3 instance to a numpy array.
+
+ :return: A 1D numpy array containing the bounding box parameters, indexed by \
+ :class:`~d123.geometry.BoundingBoxSE3Index`.
+ """
array = np.zeros(len(BoundingBoxSE3Index), dtype=np.float64)
array[BoundingBoxSE3Index.X] = self.center.x
array[BoundingBoxSE3Index.Y] = self.center.y
@@ -88,8 +155,51 @@ def array(self) -> npt.NDArray[np.float64]:
return array
@property
- def shapely_polygon(self) -> shapely.geometry.Polygon:
+ def bounding_box_se2(self) -> BoundingBoxSE2:
+ """Converts the 3D bounding box to a 2D bounding box by dropping the z, roll and pitch components.
+
+ :return: A BoundingBoxSE2 instance.
+ """
+ return BoundingBoxSE2(
+ center=StateSE2(self.center.x, self.center.y, self.center.yaw),
+ length=self.length,
+ width=self.width,
+ )
+
+ @property
+ def center_se3(self) -> StateSE3:
+ """Returns the center of the bounding box as a StateSE3 instance.
+
+ :return: The center of the bounding box as a StateSE3 instance.
+ """
+ return self.center
+
+ @property
+ def shapely_polygon(self) -> geom.Polygon:
+ """Return a Shapely polygon representation of the 2D projection of the bounding box.
+
+ :return: A shapely polygon representing the 2D bounding box.
+ """
return self.bounding_box_se2.shapely_polygon
+ @cached_property
+ def corners_array(self) -> npt.NDArray[np.float64]:
+ """Returns the corner points of the bounding box as a numpy array, shape (8, 3).
+
+ :return: A numpy array of shape (8, 3) containing the corner points of the bounding box, \
+ indexed by :class:`~d123.geometry.Corners3DIndex` and :class:`~d123.geometry.Point3DIndex`.
+ """
+ return bbse3_array_to_corners_array(self.array)
+
+ @cached_property
+ def corners_dict(self) -> Dict[Corners3DIndex, Point3D]:
+ """Returns the corner points of the bounding box as a dictionary.
+
+ :return: A dictionary mapping :class:`~d123.geometry.Corners3DIndex` to \
+ :class:`~d123.geometry.Point3D` instances.
+ """
+ corners_array = self.corners_array
+ return {index: Point3D.from_array(corners_array[index]) for index in Corners3DIndex}
+
BoundingBox = BoundingBoxSE2 | BoundingBoxSE3
diff --git a/d123/geometry/geometry_index.py b/d123/geometry/geometry_index.py
index a8652b37..f77951c2 100644
--- a/d123/geometry/geometry_index.py
+++ b/d123/geometry/geometry_index.py
@@ -4,6 +4,10 @@
class Point2DIndex(IntEnum):
+ """
+ Indexes array-like representations of 2D points (x,y).
+ """
+
X = 0
Y = 1
@@ -13,11 +17,23 @@ def XY(cls) -> slice:
class Vector2DIndex(IntEnum):
+ """
+ Indexes array-like representations of 2D vectors (x,y).
+ """
+
X = 0
Y = 1
+ @classproperty
+ def XY(cls) -> slice:
+ return slice(cls.X, cls.Y + 1)
+
class StateSE2Index(IntEnum):
+ """
+ Indexes array-like representations of SE2 states (x,y,yaw).
+ """
+
X = 0
Y = 1
YAW = 2
@@ -28,6 +44,9 @@ def XY(cls) -> slice:
class Point3DIndex(IntEnum):
+ """
+ Indexes array-like representations of 3D points (x,y,z).
+ """
X = 0
Y = 1
@@ -37,14 +56,26 @@ class Point3DIndex(IntEnum):
def XY(cls) -> slice:
return slice(cls.X, cls.Y + 1)
+ @classproperty
+ def XYZ(cls) -> slice:
+ return slice(cls.X, cls.Z + 1)
+
class Vector3DIndex(IntEnum):
+ """
+ Indexes array-like representations of 3D vectors (x,y,z).
+ """
+
X = 0
Y = 1
Z = 2
class StateSE3Index(IntEnum):
+ """
+ Indexes array-like representations of SE3 states (x,y,z,roll,pitch,yaw).
+ TODO: Use quaternions for rotation.
+ """
X = 0
Y = 1
@@ -67,6 +98,10 @@ def ROTATION_XYZ(cls) -> slice:
class BoundingBoxSE2Index(IntEnum):
+ """
+ Indexes array-like representations of rotated 2D bounding boxes (x,y,yaw,length,width).
+ """
+
X = 0
Y = 1
YAW = 2
@@ -83,6 +118,10 @@ def SE2(cls) -> slice:
class Corners2DIndex(IntEnum):
+ """
+ Indexes the corners of a BoundingBoxSE2 in the order: front-left, front-right, back-right, back-left.
+ """
+
FRONT_LEFT = 0
FRONT_RIGHT = 1
BACK_RIGHT = 2
@@ -90,6 +129,11 @@ class Corners2DIndex(IntEnum):
class BoundingBoxSE3Index(IntEnum):
+ """
+ Indexes array-like representations of rotated 3D bounding boxes (x,y,z,roll,pitch,yaw,length,width,height).
+ TODO: Use quaternions for rotation.
+ """
+
X = 0
Y = 1
Z = 2
@@ -112,8 +156,18 @@ def STATE_SE3(cls) -> slice:
def ROTATION_XYZ(cls) -> slice:
return slice(cls.ROLL, cls.YAW + 1)
+ @classproperty
+ def EXTENT(cls) -> slice:
+ return slice(cls.LENGTH, cls.HEIGHT + 1)
+
class Corners3DIndex(IntEnum):
+ """
+ Indexes the corners of a BoundingBoxSE3 in the order:
+ front-left-bottom, front-right-bottom, back-right-bottom, back-left-bottom,
+ front-left-top, front-right-top, back-right-top, back-left-top.
+ """
+
FRONT_LEFT_BOTTOM = 0
FRONT_RIGHT_BOTTOM = 1
BACK_RIGHT_BOTTOM = 2
diff --git a/d123/geometry/occupancy_map.py b/d123/geometry/occupancy_map.py
index 2a8085d6..d8df5a1d 100644
--- a/d123/geometry/occupancy_map.py
+++ b/d123/geometry/occupancy_map.py
@@ -1,6 +1,6 @@
from __future__ import annotations
-from typing import Dict, List, Literal, Optional, Sequence, Union
+from typing import Dict, List, Literal, Optional, Sequence, Tuple, Union
import numpy as np
import numpy.typing as npt
@@ -8,7 +8,7 @@
from shapely.geometry.base import BaseGeometry
from shapely.strtree import STRtree
-# TODO: Figure out if a 3D equivalent is needed.
+from d123.geometry.geometry_index import Point2DIndex
class OccupancyMap2D:
@@ -18,14 +18,14 @@ def __init__(
ids: Optional[Union[List[str], List[int]]] = None,
node_capacity: int = 10,
):
- """
- Constructor of PDMOccupancyMap
- :param geometries: list/array of polygons
- :param ids: optional list of geometry identifiers
+ """Constructs a 2D occupancy map of shapely geometries using an str-tree for efficient spatial queries.
+
+ :param geometries: list/array of shapely geometries
+ :param ids: optional list of geometry identifiers, either strings or integers
:param node_capacity: max number of child nodes in str-tree, defaults to 10
"""
+
assert ids is None or len(ids) == len(geometries), "Length of ids must match length of geometries"
- # assert len(tokens) == len(geometries)
self._ids: Union[List[str], List[int]] = (
ids if ids is not None else [str(idx) for idx in range(len(geometries))]
@@ -38,51 +38,59 @@ def __init__(
@classmethod
def from_dict(cls, geometry_dict: Dict[Union[str, int], BaseGeometry], node_capacity: int = 10) -> OccupancyMap2D:
+ """Constructs a 2D occupancy map from a dictionary of geometries.
+
+ :param geometry_dict: Dictionary mapping geometry identifiers to shapely geometries
+ :param node_capacity: Max number of child nodes in str-tree, defaults to 10
+ :return: OccupancyMap2D instance
+ """
ids = list(geometry_dict.keys())
geometries = list(geometry_dict.values())
return cls(geometries=geometries, ids=ids, node_capacity=node_capacity)
def __getitem__(self, id: Union[str, int]) -> BaseGeometry:
- """
- Retrieves geometry of token.
- :param token: geometry identifier
- :return: Geometry of token
+ """Retrieves geometry given an ID.
+
+ :param id: geometry identifier
+ :return: Geometry of ID.
"""
return self._geometries[self._id_to_idx[id]]
def __len__(self) -> int:
"""
- Number of geometries in the occupancy map
- :return: int
+ :return: Number of geometries in the occupancy map.
"""
return len(self._ids)
@property
def ids(self) -> Union[List[str], List[int]]:
- """
- Getter for track tokens in occupancy map
- :return: list of strings
+ """Getter for geometry IDs in occupancy map
+
+ :return: list of IDs
"""
return self._ids
@property
def geometries(self) -> Sequence[BaseGeometry]:
+ """Getter for geometries in occupancy map.
+ :return: list of geometries
+ """
return self._geometries
@property
def token_to_idx(self) -> Dict[Union[int, str], int]:
- """
- Getter for track tokens in occupancy map
- :return: dictionary of tokens and indices
+ """Mapping from geometry IDs to indices in the occupancy map.
+
+ :return: dictionary of IDs and indices
"""
return self._id_to_idx
def intersects(self, geometry: BaseGeometry) -> Union[List[str], List[int]]:
- """
- Searches for intersecting geometries in the occupancy map
+ """Searches for intersecting geometries in the occupancy map.
+
:param geometry: geometries to query
- :return: list of tokens for intersecting geometries
+ :return: list of IDs for intersecting geometries
"""
indices = self.query(geometry, predicate="intersects")
return [self._ids[idx] for idx in indices]
@@ -95,11 +103,17 @@ def query(
] = None,
distance: Optional[float] = None,
) -> npt.NDArray[np.int64]:
- """
- Function to directly calls shapely's query function on str-tree
- :param geometry: geometries to query
- :param predicate: see shapely, defaults to None
- :return: query output
+ """Queries the str-tree for geometries that match the given predicate with the input geometry.
+
+ :param geometry: Geometry or array_like
+ :param predicate: {None, 'intersects', 'within', 'contains', 'overlaps', 'crosses', 'touches', 'covers', \
+ 'covered_by', 'contains_properly', 'dwithin'}, defaults to None
+ :param distance: number or array_like, defaults to None.
+ :return: ndarray with shape (n,) if geometry is a scalar.
+ Contains tree geometry indices.
+ :return: ndarray with shape (2, n) if geometry is an array_like
+ The first subarray contains input geometry indices.
+ The second subarray contains tree geometry indices.
"""
return self._str_tree.query(geometry, predicate=predicate, distance=distance)
@@ -110,7 +124,16 @@ def query_nearest(
return_distance: bool = False,
exclusive: bool = False,
all_matches: bool = True,
- ):
+ ) -> Union[npt.NDArray[np.int64], Tuple[npt.NDArray[np.int64], npt.NDArray[np.float64]]]:
+ """Queries the str-tree for the nearest geometry to the input geometry.
+
+ :param geometry: The input geometry to query.
+ :param max_distance: The maximum distance to consider, defaults to None.
+ :param return_distance: Whether to return the distance to the nearest geometry, defaults to False.
+ :param exclusive: Whether to exclude the input geometry from the results, defaults to False.
+ :param all_matches: Whether to return all matching geometries, defaults to True.
+ :return: The nearest geometry or geometries.
+ """
return self._str_tree.query_nearest(
geometry,
max_distance=max_distance,
@@ -119,14 +142,16 @@ def query_nearest(
all_matches=all_matches,
)
- def points_in_polygons(self, points: npt.NDArray[np.float64]) -> npt.NDArray[np.bool_]:
- """
- Determines wether input-points are in polygons of the occupancy map
- :param points: input-points
+ def contains_vectorized(self, points: npt.NDArray[np.float64]) -> npt.NDArray[np.bool_]:
+ """Determines wether input-points are in geometries (i.e. polygons) of the occupancy map.
+ NOTE: This function can be significantly faster than using the str-tree, if the number of geometries is
+ relatively small compared to the number of input-points.
+
+ :param points: array of shape (num_points, 2), indexed by :class:`~d123.geometry.Point2DIndex`.
:return: boolean array of shape (polygons, input-points)
"""
output = np.zeros((len(self._geometries), len(points)), dtype=bool)
- for i, polygon in enumerate(self._geometries):
- output[i] = shapely.vectorized.contains(polygon, points[:, 0], points[:, 1])
+ for i, geometry in enumerate(self._geometries):
+ output[i] = shapely.vectorized.contains(geometry, points[..., Point2DIndex.X], points[..., Point2DIndex.Y])
return output
diff --git a/d123/geometry/point.py b/d123/geometry/point.py
index c12cc22e..5641cf17 100644
--- a/d123/geometry/point.py
+++ b/d123/geometry/point.py
@@ -1,18 +1,23 @@
from __future__ import annotations
from dataclasses import dataclass
+from functools import cached_property
from typing import Iterable
import numpy as np
import numpy.typing as npt
import shapely.geometry as geom
+from d123.common.utils.mixin import ArrayMixin
from d123.geometry.geometry_index import Point2DIndex, Point3DIndex
@dataclass
-class Point2D:
- """Class to represents 2D points."""
+class Point2D(ArrayMixin):
+ """Class to represents 2D points.
+
+ :return: A Point2D instance.
+ """
x: float # [m] location
y: float # [m] location
@@ -20,15 +25,23 @@ class Point2D:
@classmethod
def from_array(cls, array: npt.NDArray[np.float64]) -> Point2D:
+ """Constructs a Point2D from a numpy array.
+
+ :param array: Array of shape (2,) representing the point coordinates [x, y], indexed by \
+ :class:`~d123.geometry.Point2DIndex`.
+ :return: A Point2D instance.
+ """
+
assert array.ndim == 1
assert array.shape[0] == len(Point2DIndex)
return Point2D(array[Point2DIndex.X], array[Point2DIndex.Y])
- @property
+ @cached_property
def array(self) -> npt.NDArray[np.float64]:
- """
- Convert vector to array
- :return: array containing [x, y]
+ """The array representation of the point.
+
+ :return: A numpy array of shape (2,) containing the point coordinates [x, y], indexed by \
+ :class:`~d123.geometry.Point2DIndex`.
"""
array = np.zeros(len(Point2DIndex), dtype=np.float64)
array[Point2DIndex.X] = self.x
@@ -37,12 +50,14 @@ def array(self) -> npt.NDArray[np.float64]:
@property
def shapely_point(self) -> geom.Point:
+ """The Shapely Point representation of the 2D point.
+
+ :return: A Shapely Point representation of the 2D point.
+ """
return geom.Point(self.x, self.y)
def __iter__(self) -> Iterable[float]:
- """
- :return: iterator of tuples (x, y)
- """
+ """Iterator over point coordinates."""
return iter((self.x, self.y))
def __hash__(self) -> int:
@@ -51,8 +66,8 @@ def __hash__(self) -> int:
@dataclass
-class Point3D:
- """Class to represents 2D points."""
+class Point3D(ArrayMixin):
+ """Class to represents 3D points."""
x: float # [m] location
y: float # [m] location
@@ -61,17 +76,23 @@ class Point3D:
@classmethod
def from_array(cls, array: npt.NDArray[np.float64]) -> "Point3D":
+ """Constructs a Point3D from a numpy array.
+
+ :param array: Array of shape (3,) representing the point coordinates [x, y, z], indexed by \
+ :class:`~d123.geometry.Point3DIndex`.
+ :return: A Point3D instance.
+ """
assert array.ndim == 1, f"Array must be 1-dimensional, got shape {array.shape}"
assert array.shape[0] == len(
Point3DIndex
), f"Array must have the same length as Point3DIndex, got shape {array.shape}"
return cls(array[Point3DIndex.X], array[Point3DIndex.Y], array[Point3DIndex.Z])
- @property
+ @cached_property
def array(self) -> npt.NDArray[np.float64]:
- """
- Convert vector to array
- :return: array containing [x, y]
+ """Converts the Point3D instance to a numpy array, indexed by :class:`~d123.geometry.Point3DIndex`.
+
+ :return: A numpy array of shape (3,) containing the point coordinates [x, y, z].
"""
array = np.zeros(len(Point3DIndex), dtype=np.float64)
array[Point3DIndex.X] = self.x
@@ -81,16 +102,23 @@ def array(self) -> npt.NDArray[np.float64]:
@property
def point_2d(self) -> Point2D:
+ """The 2D projection of the 3D point.
+
+ :return: A Point2D instance representing the 2D projection of the 3D point.
+ """
return Point2D(self.x, self.y)
@property
def shapely_point(self) -> geom.Point:
+ """The Shapely Point representation of the 3D point. \
+ This geometry contains the z-coordinate, but many Shapely operations ignore it.
+
+ :return: A Shapely Point representation of the 3D point.
+ """
return geom.Point(self.x, self.y, self.z)
def __iter__(self) -> Iterable[float]:
- """
- :return: iterator of tuples (x, y)
- """
+ """Iterator over the point coordinates (x, y, z)."""
return iter((self.x, self.y, self.z))
def __hash__(self) -> int:
diff --git a/d123/geometry/polyline.py b/d123/geometry/polyline.py
index 365b458e..3fee304d 100644
--- a/d123/geometry/polyline.py
+++ b/d123/geometry/polyline.py
@@ -9,8 +9,10 @@
import shapely.geometry as geom
from scipy.interpolate import interp1d
-from d123.geometry.point import Point2D, Point2DIndex, Point3D, Point3DIndex
-from d123.geometry.se import StateSE2, StateSE2Index
+from d123.common.utils.mixin import ArrayMixin
+from d123.geometry.geometry_index import Point2DIndex, Point3DIndex, StateSE2Index
+from d123.geometry.point import Point2D, Point3D
+from d123.geometry.se import StateSE2
from d123.geometry.utils.constants import DEFAULT_Z
from d123.geometry.utils.polyline_utils import get_linestring_yaws, get_path_progress
from d123.geometry.utils.rotation_utils import normalize_angle
@@ -20,12 +22,18 @@
@dataclass
-class Polyline2D:
+class Polyline2D(ArrayMixin):
+ """Represents a interpolatable 2D polyline."""
linestring: geom.LineString
@classmethod
def from_linestring(cls, linestring: geom.LineString) -> Polyline2D:
+ """Creates a Polyline2D from a Shapely LineString. If the LineString has Z-coordinates, they are ignored.
+
+ :param linestring: A Shapely LineString object.
+ :return: A Polyline2D instance.
+ """
if linestring.has_z:
linestring_ = geom_creation.linestrings(*linestring.xy)
else:
@@ -34,6 +42,13 @@ def from_linestring(cls, linestring: geom.LineString) -> Polyline2D:
@classmethod
def from_array(cls, polyline_array: npt.NDArray[np.float32]) -> Polyline2D:
+ """Creates a Polyline2D from a numpy array.
+
+ :param polyline_array: A numpy array of shape (N, 2) or (N, 3), e.g. indexed by \
+ :class:`~d123.geometry.Point2DIndex` or :class:`~d123.geometry.Point3DIndex`.
+ :raises ValueError: If the input array is not of the expected shape.
+ :return: A Polyline2D instance.
+ """
assert polyline_array.ndim == 2
linestring: Optional[geom.LineString] = None
if polyline_array.shape[-1] == len(Point2DIndex):
@@ -44,37 +59,87 @@ def from_array(cls, polyline_array: npt.NDArray[np.float32]) -> Polyline2D:
raise ValueError("Array must have shape (N, 2) or (N, 3) for Point2D or Point3D respectively.")
return Polyline2D(linestring)
+ def from_discrete_points(cls, discrete_points: List[Point2D]) -> Polyline2D:
+ """Creates a Polyline2D from a list of discrete 2D points.
+
+ :param discrete_points: A list of Point2D instances.
+ :return: A Polyline2D instance.
+ """
+ return Polyline2D.from_array(np.array(discrete_points, dtype=np.float64))
+
@property
def array(self) -> npt.NDArray[np.float64]:
- return np.array(self.linestring.coords, dtype=np.float64)
+ """Converts the polyline to a numpy array, indexed by :class:`~d123.geometry.Point2DIndex`.
+
+ :return: A numpy array of shape (N, 2) representing the polyline.
+ """
+ x, y = self.linestring.xy
+ array = np.zeros((len(x), len(Point2DIndex)), dtype=np.float64)
+ array[:, Point2DIndex.X] = x
+ array[:, Point2DIndex.Y] = y
+ return array
@property
- def polyline_se2(self) -> Polyline3D:
+ def polyline_se2(self) -> PolylineSE2:
+ """Converts the 2D polyline to a 2D SE(2) polyline and retrieves the yaw angles.
+
+ :return: A PolylineSE2 instance representing the 2D polyline.
+ """
return PolylineSE2.from_linestring(self.linestring)
@property
def length(self) -> float:
+ """Returns the length of the polyline.
+
+ :return: The length of the polyline.
+ """
return self.linestring.length
- def interpolate(self, distances: Union[float, npt.NDArray[np.float64]]) -> Union[Point2D, npt.NDArray[np.float64]]:
+ def interpolate(
+ self,
+ distances: Union[float, npt.NDArray[np.float64]],
+ normalized: bool = False,
+ ) -> Union[Point2D, npt.NDArray[np.float64]]:
+ """Interpolates the polyline at the given distances.
+
+ :param distances: The distances at which to interpolate the polyline.
+ :return: The interpolated point(s) on the polyline.
+ """
+ distances_ = distances * self.length if normalized else distances
+
if isinstance(distances, float) or isinstance(distances, int):
- point = self.linestring.interpolate(distances)
+ point = self.linestring.interpolate(distances_, normalized=normalized)
return Point2D(point.x, point.y)
else:
- distances = np.asarray(distances, dtype=np.float64)
- points = self.linestring.interpolate(distances)
+ distances = np.asarray(distances_, dtype=np.float64)
+ points = self.linestring.interpolate(distances_, normalized=normalized)
return np.array([[p.x, p.y] for p in points], dtype=np.float64)
- def project(self, point: Union[Point2D, npt.NDArray[np.float64]]) -> Union[Point2D, npt.NDArray[np.float64]]:
+ def project(
+ self,
+ point: Union[geom.Point, Point2D, StateSE2, npt.NDArray[np.float64]],
+ normalized: bool = False,
+ ) -> npt.NDArray[np.float64]:
+ """Projects a point onto the polyline and returns the distance along the polyline to the closest point.
+
+ :param point: The point to project onto the polyline.
+ :param normalized: Whether to return the normalized distance, defaults to False.
+ :return: The distance along the polyline to the closest point.
+ """
if isinstance(point, Point2D):
point_ = point.array
+ elif isinstance(point, StateSE2):
+ point_ = point.array[StateSE2Index.XY]
+ elif isinstance(point, geom.Point):
+ point_ = np.array(point.coords[0], dtype=np.float64)
else:
point_ = np.array(point, dtype=np.float64)
- return self.linestring.project(point_)
+ return self.linestring.project(point_, normalized=normalized)
@dataclass
-class PolylineSE2:
+class PolylineSE2(ArrayMixin):
+ """Represents a interpolatable SE2 polyline."""
se2_array: npt.NDArray[np.float64]
linestring: Optional[geom.LineString] = None
@@ -94,6 +159,11 @@ def __post_init__(self):
@classmethod
def from_linestring(cls, linestring: geom.LineString) -> PolylineSE2:
+ """Creates a PolylineSE2 from a LineString. This requires computing the yaw angles along the path.
+
+ :param linestring: The LineString to convert.
+ :return: A PolylineSE2 representing the same path as the LineString.
+ """
points_2d = np.array(linestring.coords, dtype=np.float64)[..., StateSE2Index.XY]
se2_array = np.zeros((len(points_2d), len(StateSE2Index)), dtype=np.float64)
se2_array[:, StateSE2Index.XY] = points_2d
@@ -102,6 +172,13 @@ def from_linestring(cls, linestring: geom.LineString) -> PolylineSE2:
@classmethod
def from_array(cls, polyline_array: npt.NDArray[np.float32]) -> PolylineSE2:
+ """Creates a PolylineSE2 from a numpy array.
+
+ :param polyline_array: The input numpy array representing, either indexed by \
+ :class:`~d123.geometry.Point2DIndex` or :class:`~d123.geometry.StateSE2Index`.
+ :raises ValueError: If the input array is not of the expected shape.
+ :return: A PolylineSE2 representing the same path as the input array.
+ """
assert polyline_array.ndim == 2
if polyline_array.shape[-1] == len(Point2DIndex):
se2_array = np.zeros((len(polyline_array), len(StateSE2Index)), dtype=np.float64)
@@ -110,19 +187,41 @@ def from_array(cls, polyline_array: npt.NDArray[np.float32]) -> PolylineSE2:
elif polyline_array.shape[-1] == len(StateSE2Index):
se2_array = np.array(polyline_array, dtype=np.float64)
else:
- raise ValueError
+ raise ValueError("Invalid polyline array shape.")
return PolylineSE2(se2_array)
@classmethod
def from_discrete_se2(cls, discrete_se2: List[StateSE2]) -> PolylineSE2:
- return PolylineSE2(np.array([se2.array for se2 in discrete_se2], dtype=np.float64))
+ """Creates a PolylineSE2 from a list of discrete SE2 states.
+
+ :param discrete_se2: The list of discrete SE2 states.
+ :return: A PolylineSE2 representing the same path as the discrete SE2 states.
+ """
+ return PolylineSE2.from_array(np.array(discrete_se2, dtype=np.float64))
@property
def length(self) -> float:
+ """Returns the length of the polyline.
+
+ :return: The length of the polyline.
+ """
return float(self._progress[-1])
- def interpolate(self, distances: Union[float, npt.NDArray[np.float64]]) -> Union[StateSE2, npt.NDArray[np.float64]]:
- clipped_distances = np.clip(distances, 1e-8, self.length)
+ def interpolate(
+ self,
+ distances: Union[float, npt.NDArray[np.float64]],
+ normalized: bool = False,
+ ) -> Union[StateSE2, npt.NDArray[np.float64]]:
+ """Interpolates the polyline at the given distances.
+
+ :param distances: The distances along the polyline to interpolate.
+ :param normalized: Whether the distances are normalized (0 to 1), defaults to False
+ :return: The interpolated StateSE2 or an array of interpolated states, according to
+ """
+
+ distances_ = distances * self.length if normalized else distances
+ clipped_distances = np.clip(distances_, 1e-8, self.length)
+
interpolated_se2_array = self._interpolator(clipped_distances)
interpolated_se2_array[..., StateSE2Index.YAW] = normalize_angle(interpolated_se2_array[..., StateSE2Index.YAW])
@@ -132,27 +231,41 @@ def interpolate(self, distances: Union[float, npt.NDArray[np.float64]]) -> Union
return interpolated_se2_array
def project(
- self, point: Union[geom.Point, Point2D, npt.NDArray[np.float64]]
- ) -> Union[Point2D, npt.NDArray[np.float64]]:
+ self,
+ point: Union[geom.Point, Point2D, StateSE2, npt.NDArray[np.float64]],
+ normalized: bool = False,
+ ) -> npt.NDArray[np.float64]:
+ """Projects a point onto the polyline and returns the distance along the polyline to the closest point.
+
+ :param point: The point to project onto the polyline.
+ :param normalized: Whether to return the normalized distance, defaults to False.
+ :return: The distance along the polyline to the closest point.
+ """
if isinstance(point, Point2D):
- point_ = geom.Point(point.x, point.y)
- elif isinstance(point, np.ndarray) and point.shape[-1] == 2:
- point_ = geom_creation.points(point)
+ point_ = point.array
+ elif isinstance(point, StateSE2):
+ point_ = point.array[StateSE2Index.XY]
elif isinstance(point, geom.Point):
- point_ = point
+ point_ = np.array(point.coords[0], dtype=np.float64)
else:
- raise ValueError("Point must be a Point2D, geom.Point, or a 2D numpy array.")
-
- return self.linestring.project(point_)
+ point_ = np.array(point, dtype=np.float64)
+ return self.linestring.project(point_, normalized=normalized)
@dataclass
-class Polyline3D:
+class Polyline3D(ArrayMixin):
+ """Represents a interpolatable 3D polyline."""
linestring: geom.LineString
@classmethod
def from_linestring(cls, linestring: geom.LineString) -> Polyline3D:
+ """Creates a Polyline3D from a Shapely LineString. If the LineString does not have Z-coordinates, \
+ a default Z-value is added.
+
+ :param linestring: The input LineString.
+ :return: A Polyline3D instance.
+ """
return (
Polyline3D(linestring)
if linestring.has_z
@@ -161,27 +274,61 @@ def from_linestring(cls, linestring: geom.LineString) -> Polyline3D:
@classmethod
def from_array(cls, array: npt.NDArray[np.float64]) -> Polyline3D:
- assert array.ndim == 2 and array.shape[1] == 3, "Array must be 2D with shape (N, 3)"
+ """Creates a Polyline3D from a numpy array.
+
+ :param array: A numpy array of shape (N, 3) representing 3D points, e.g. indexed by \
+ :class:`~d123.geometry.Point3DIndex`.
+ :return: A Polyline3D instance.
+ """
+ assert array.ndim == 2 and array.shape[1] == 3, "Array must be 3D with shape (N, 3)"
linestring = geom_creation.linestrings(*array.T)
return Polyline3D(linestring)
@property
def polyline_2d(self) -> Polyline2D:
+ """Converts the 3D polyline to a 2D polyline by dropping the Z-coordinates.
+
+ :return: A Polyline2D instance.
+ """
return Polyline2D(geom_creation.linestrings(*self.linestring.xy))
@property
def polyline_se2(self) -> PolylineSE2:
+ """Converts the 3D polyline to a 2D SE(2) polyline.
+
+ :return: A PolylineSE2 instance.
+ """
return PolylineSE2.from_linestring(self.linestring)
@property
def array(self) -> Polyline2D:
+ """Converts the 3D polyline to the discrete 3D points.
+
+ :return: A numpy array of shape (N, 3), indexed by :class:`~d123.geometry.Point3DIndex`.
+ """
return np.array(self.linestring.coords, dtype=np.float64)
@property
def length(self) -> float:
+ """Returns the length of the 3D polyline.
+
+ :return: The length of the polyline.
+ """
return self.linestring.length
- def interpolate(self, distances: Union[float, npt.NDArray[np.float64]]) -> Union[Point3D, npt.NDArray[np.float64]]:
+ def interpolate(
+ self,
+ distances: Union[float, npt.NDArray[np.float64]],
+ normalized: bool = False,
+ ) -> Union[Point3D, npt.NDArray[np.float64]]:
+ """Interpolates the 3D polyline at the given distances.
+
+ :param distances: A float or numpy array of distances along the polyline.
+ :param normalized: Whether to interpret the distances as fractions of the length.
+ :return: A Point3D instance or a numpy array of shape (N, 3) representing the interpolated points.
+ """
+ distances * self.length if normalized else distances
+
if isinstance(distances, float) or isinstance(distances, int):
point = self.linestring.interpolate(distances)
return Point3D(point.x, point.y, point.z)
@@ -190,8 +337,36 @@ def interpolate(self, distances: Union[float, npt.NDArray[np.float64]]) -> Union
points = self.linestring.interpolate(distances)
return np.array([[p.x, p.y, p.z] for p in points], dtype=np.float64)
+ def project(
+ self,
+ point: Union[geom.Point, Point2D, Point3D, npt.NDArray[np.float64]],
+ normalized: bool = False,
+ ) -> npt.NDArray[np.float64]:
+ """Projects a point onto the 3D polyline and returns the distance along the polyline to the closest point.
+
+ :param point: The point to project.
+ :param normalized: Whether to return normalized distances, defaults to False.
+ :return: The distance along the polyline to the closest point.
+ """
+ if isinstance(point, Point2D):
+ point_ = point.array
+ elif isinstance(point, StateSE2):
+ point_ = point.array[StateSE2Index.XY]
+ elif isinstance(point, Point3D):
+ point_ = point.array[Point3DIndex.XYZ]
+ elif isinstance(point, geom.Point):
+ point_ = np.array(point.coords[0], dtype=np.float64)
+ else:
+ point_ = np.array(point, dtype=np.float64)
+ return self.linestring.project(point_, normalized=normalized)
+
@dataclass
class PolylineSE3:
- # TODO: implement this class
+ # TODO: Implement PolylineSE3 once quaternions are used in StateSE3
+ # Interpolating along SE3 states (i.e., 3D position + orientation) is meaningful,
+ # but more complex than SE2 due to 3D rotations (quaternions or rotation matrices).
+ # Linear interpolation of positions is straightforward, but orientation interpolation
+ # should use SLERP (spherical linear interpolation) for quaternions.
+ # This is commonly needed in robotics, animation, and path planning.
pass
diff --git a/d123/geometry/se.py b/d123/geometry/se.py
index 8abd908a..9c73e341 100644
--- a/d123/geometry/se.py
+++ b/d123/geometry/se.py
@@ -13,24 +13,31 @@
@dataclass
class StateSE2:
- """Class to represents 2D points."""
+ """Class to represents a 2D pose as SE2 (x, y, yaw)."""
- x: float # [m] location
- y: float # [m] location
- yaw: float # [m] location
+ x: float # [m] x-location
+ y: float # [m] y-location
+ yaw: float # [rad] yaw/heading
__slots__ = "x", "y", "yaw"
@classmethod
def from_array(cls, array: npt.NDArray[np.float64]) -> StateSE2:
+ """Constructs a StateSE2 from a numpy array.
+
+ :param array: Array of shape (3,) representing the state [x, y, yaw], indexed by \
+ :class:`~d123.geometry.geometry_index.StateSE2Index`.
+ :return: A StateSE2 instance.
+ """
assert array.ndim == 1
assert array.shape[0] == len(StateSE2Index)
return StateSE2(array[StateSE2Index.X], array[StateSE2Index.Y], array[StateSE2Index.YAW])
@property
def array(self) -> npt.NDArray[np.float64]:
- """
- Convert vector to array
- :return: array containing [x, y]
+ """Converts the StateSE2 instance to a numpy array, indexed by \
+ :class:`~d123.geometry.geometry_index.StateSE2Index`.
+
+ :return: A numpy array of shape (3,) containing the state [x, y, yaw].
"""
array = np.zeros(len(StateSE2Index), dtype=np.float64)
array[StateSE2Index.X] = self.x
@@ -39,10 +46,18 @@ def array(self) -> npt.NDArray[np.float64]:
return array
@property
- def point_2d(self) -> Point2D:
+ def state_se2(self) -> StateSE2:
+ """The 2D pose itself. Helpful for polymorphism.
+
+ :return: A StateSE2 instance representing the 2D pose.
"""
- Convert SE2 state to 2D point (drops heading)
- :return: Point2D dataclass
+ return self
+
+ @property
+ def point_2d(self) -> Point2D:
+ """The 2D projection of the 2D pose.
+
+ :return: A Point2D instance representing the 2D projection of the 2D pose.
"""
return Point2D(self.x, self.y)
@@ -51,30 +66,37 @@ def shapely_point(self) -> geom.Point:
return geom.Point(self.x, self.y)
def __iter__(self) -> Iterable[float]:
- """
- :return: iterator of tuples (x, y)
- """
- return iter((self.x, self.y))
+ """Iterator over the state coordinates (x, y, yaw)."""
+ return iter((self.x, self.y, self.yaw))
def __hash__(self) -> int:
"""Hash method"""
- return hash((self.x, self.y))
+ return hash((self.x, self.y, self.yaw))
@dataclass
class StateSE3:
- """Class to represents 2D points."""
-
- x: float # [m] location
- y: float # [m] location
- z: float # [m] location
- roll: float
- pitch: float
- yaw: float
+ """
+ Class to represents a 3D pose as SE3 (x, y, z, roll, pitch, yaw).
+ TODO: Use quaternions for rotation representation.
+ """
+
+ x: float # [m] x-location
+ y: float # [m] y-location
+ z: float # [m] z-location
+ roll: float # [rad] roll
+ pitch: float # [rad] pitch
+ yaw: float # [rad] yaw
__slots__ = "x", "y", "z", "roll", "pitch", "yaw"
@classmethod
def from_array(cls, array: npt.NDArray[np.float64]) -> StateSE3:
+ """Constructs a StateSE3 from a numpy array.
+
+ :param array: Array of shape (6,) representing the state [x, y, z, roll, pitch, yaw], indexed by \
+ :class:`~d123.geometry.StateSE3Index`.
+ :return: A StateSE3 instance.
+ """
assert array.ndim == 1
assert array.shape[0] == len(StateSE3Index)
return StateSE3(
@@ -87,7 +109,12 @@ def from_array(cls, array: npt.NDArray[np.float64]) -> StateSE3:
)
@classmethod
- def from_matrix(cls, array: npt.NDArray[np.float64]) -> StateSE3:
+ def from_transformation_matrix(cls, array: npt.NDArray[np.float64]) -> StateSE3:
+ """Constructs a StateSE3 from a 4x4 transformation matrix.
+
+ :param array: A 4x4 numpy array representing the transformation matrix.
+ :return: A StateSE3 instance.
+ """
assert array.ndim == 2
assert array.shape == (4, 4)
translation = array[:3, 3]
@@ -103,6 +130,10 @@ def from_matrix(cls, array: npt.NDArray[np.float64]) -> StateSE3:
@property
def array(self) -> npt.NDArray[np.float64]:
+ """Converts the StateSE3 instance to a numpy array, indexed by StateSE3Index.
+
+ :return: A numpy array of shape (6,) containing the state [x, y, z, roll, pitch, yaw].
+ """
array = np.zeros(len(StateSE3Index), dtype=np.float64)
array[StateSE3Index.X] = self.x
array[StateSE3Index.Y] = self.y
@@ -112,28 +143,90 @@ def array(self) -> npt.NDArray[np.float64]:
array[StateSE3Index.YAW] = self.yaw
return array
- # @property
- # def matrix(self) -> npt.NDArray[np.float64]:
- # """Convert SE3 state to 4x4 transformation matrix."""
- # R = get_rotation_matrix(self)
- # translation = np.array([self.x, self.y, self.z], dtype=np.float64)
- # matrix = np.eye(4, dtype=np.float64)
- # matrix[:3, :3] = R
- # matrix[:3, 3] = translation
- # return matrix
-
@property
def state_se2(self) -> StateSE2:
+ """Returns the 3D state as a 2D state by ignoring the z-axis.
+
+ :return: A StateSE2 instance representing the 2D projection of the 3D state.
+ """
return StateSE2(self.x, self.y, self.yaw)
@property
def point_3d(self) -> Point3D:
+ """Returns the 3D point representation of the state.
+
+ :return: A Point3D instance representing the 3D point.
+ """
return Point3D(self.x, self.y, self.z)
@property
def point_2d(self) -> Point2D:
+ """Returns the 2D point representation of the state.
+
+ :return: A Point2D instance representing the 2D point.
+ """
return Point2D(self.x, self.y)
@property
def shapely_point(self) -> geom.Point:
- return geom.Point(self.x, self.y, self.z)
+ """Returns the Shapely point representation of the state.
+
+ :return: A Shapely Point instance representing the 3D point.
+ """
+ return self.point_3d.shapely_point
+
+ @property
+ def transformation_matrix(self) -> npt.NDArray[np.float64]:
+ """Returns the 4x4 transformation matrix representation of the state.
+
+ :return: A 4x4 numpy array representing the transformation matrix.
+ """
+ raise NotImplementedError("Transformation matrix conversion not implemented yet.")
+
+ @property
+ def rotation_matrix(self) -> npt.NDArray[np.float64]:
+ """Returns the 3x3 rotation matrix representation of the state's orientation.
+
+ :return: A 3x3 numpy array representing the rotation matrix.
+ """
+ raise NotImplementedError("Rotation matrix conversion not implemented yet.")
+
+ @property
+ def quaternion(self) -> npt.NDArray[np.float64]:
+ """Returns the quaternion (w, x, y, z) representation of the state's orientation.
+
+ :return: A numpy array of shape (4,) representing the quaternion.
+ """
+ raise NotImplementedError("Quaternion conversion not implemented yet.")
+
+ def __iter__(self) -> Iterable[float]:
+ """Iterator over the state coordinates (x, y, z, roll, pitch, yaw)."""
+ return iter((self.x, self.y, self.z, self.roll, self.pitch, self.yaw))
+
+ def __hash__(self) -> int:
+ """Hash method"""
+ return hash((self.x, self.y, self.z, self.roll, self.pitch, self.yaw))
+
+ def __matmul__(self, other: StateSE3) -> StateSE3:
+ """Combines two SE3 states by applying the transformation of the other state to this state.
+
+ :param other: Another StateSE3 instance representing the transformation to apply.
+ :return: A new StateSE3 instance representing the combined transformation.
+ """
+ return StateSE3.from_transformation_matrix(self.transformation_matrix @ other.transformation_matrix)
+
+
+@dataclass
+class QuaternionSE3:
+ """Class representing a quaternion in SE3 space.
+
+ TODO: Implement and replace StateSE3.
+ """
+
+ x: float
+ y: float
+ z: float
+ qw: float
+ qx: float
+ qy: float
+ qz: float
diff --git a/d123/dataset/dataset_specific/av2/.gitkeep b/d123/geometry/torch/.gitkeep
similarity index 100%
rename from d123/dataset/dataset_specific/av2/.gitkeep
rename to d123/geometry/torch/.gitkeep
diff --git a/d123/geometry/transform/se3.py b/d123/geometry/transform/se3.py
index 6b4219d0..02be57df 100644
--- a/d123/geometry/transform/se3.py
+++ b/d123/geometry/transform/se3.py
@@ -3,33 +3,6 @@
from d123.geometry import Point3DIndex, StateSE3, StateSE3Index, Vector3D
-# def get_rotation_matrix(state_se3: StateSE3) -> npt.NDArray[np.float64]:
-# R_x = np.array(
-# [
-# [1, 0, 0],
-# [0, np.cos(state_se3.roll), -np.sin(state_se3.roll)],
-# [0, np.sin(state_se3.roll), np.cos(state_se3.roll)],
-# ],
-# dtype=np.float64,
-# )
-# R_y = np.array(
-# [
-# [np.cos(state_se3.pitch), 0, np.sin(state_se3.pitch)],
-# [0, 1, 0],
-# [-np.sin(state_se3.pitch), 0, np.cos(state_se3.pitch)],
-# ],
-# dtype=np.float64,
-# )
-# R_z = np.array(
-# [
-# [np.cos(state_se3.yaw), -np.sin(state_se3.yaw), 0],
-# [np.sin(state_se3.yaw), np.cos(state_se3.yaw), 0],
-# [0, 0, 1],
-# ],
-# dtype=np.float64,
-# )
-# return R_z @ R_y @ R_x
-
def get_rotation_matrix(state_se3: StateSE3) -> npt.NDArray[np.float64]:
# Intrinsic Z-Y'-X'' rotation: R = R_x(roll) @ R_y(pitch) @ R_z(yaw)
diff --git a/d123/geometry/utils/bounding_box_utils.py b/d123/geometry/utils/bounding_box_utils.py
index b1ce46b2..a9c40077 100644
--- a/d123/geometry/utils/bounding_box_utils.py
+++ b/d123/geometry/utils/bounding_box_utils.py
@@ -2,7 +2,12 @@
import numpy.typing as npt
import shapely
-from d123.geometry.geometry_index import BoundingBoxSE2Index, Corners2DIndex, Point2DIndex
+from d123.geometry.geometry_index import (
+ BoundingBoxSE2Index,
+ BoundingBoxSE3Index,
+ Corners2DIndex,
+ Point2DIndex,
+)
def bbse2_array_to_corners_array(bbse2: npt.NDArray[np.float64]) -> npt.NDArray[np.float64]:
@@ -36,13 +41,13 @@ def bbse2_array_to_corners_array(bbse2: npt.NDArray[np.float64]) -> npt.NDArray[
return corners_array.squeeze(axis=0) if ndim_one else corners_array
-def corners_array_to_polygon_array(corners_array: npt.NDArray[np.float64]) -> npt.NDArray[np.object_]:
+def corners_2d_array_to_polygon_array(corners_array: npt.NDArray[np.float64]) -> npt.NDArray[np.object_]:
polygons = shapely.creation.polygons(corners_array)
return polygons
def bbse2_array_to_polygon_array(bbse2: npt.NDArray[np.float64]) -> npt.NDArray[np.float64]:
- return corners_array_to_polygon_array(bbse2_array_to_corners_array(bbse2))
+ return corners_2d_array_to_polygon_array(bbse2_array_to_corners_array(bbse2))
def translate_along_yaw_array(
@@ -51,6 +56,7 @@ def translate_along_yaw_array(
lon: npt.NDArray[np.float64],
lat: npt.NDArray[np.float64],
) -> npt.NDArray[np.float64]:
+ # TODO: move somewhere else
assert points_2d.shape[-1] == len(Point2DIndex)
half_pi = np.pi / 2.0
translation: npt.NDArray[np.float64] = np.stack(
@@ -61,3 +67,59 @@ def translate_along_yaw_array(
axis=-1,
)
return points_2d + translation
+
+
+def bbse3_array_to_corners_array(bbse3_array: npt.NDArray[np.float64]) -> npt.NDArray[np.float64]:
+ """Converts an array of BoundingBoxSE3 objects to a coordinates array.
+ TODO: Fix this function
+
+ :param bbse3_array: Array of BoundingBoxSE3 objects, shape (..., 7) [x, y, z, yaw, pitch, roll, length, width, height].
+ :return: Coordinates array of shape (..., 8, 3) where 8 is the number of corners.
+ """
+ assert bbse3_array.shape[-1] == len(BoundingBoxSE3Index)
+
+ ndim_one: bool = bbse3_array.ndim == 1
+ if ndim_one:
+ bbse3_array = bbse3_array[None, :]
+
+ # Extract parameters
+ centers = bbse3_array[..., BoundingBoxSE3Index.XYZ] # (..., 3)
+ yaws = bbse3_array[..., BoundingBoxSE3Index.YAW] # (...,)
+ pitches = bbse3_array[..., BoundingBoxSE3Index.PITCH] # (...,)
+ rolls = bbse3_array[..., BoundingBoxSE3Index.ROLL] # (...,)
+
+ # Corner factors: (x, y, z) in box frame
+ factors = np.array(
+ [
+ [+0.5, -0.5, -0.5], # FRONT_LEFT_BOTTOM
+ [+0.5, +0.5, -0.5], # FRONT_RIGHT_BOTTOM
+ [-0.5, +0.5, -0.5], # BACK_RIGHT_BOTTOM
+ [-0.5, -0.5, -0.5], # BACK_LEFT_BOTTOM
+ [+0.5, -0.5, +0.5], # FRONT_LEFT_TOP
+ [+0.5, +0.5, +0.5], # FRONT_RIGHT_TOP
+ [-0.5, +0.5, +0.5], # BACK_RIGHT_TOP
+ [-0.5, -0.5, +0.5], # BACK_LEFT_TOP
+ ],
+ dtype=np.float64,
+ ) # (8, 3)
+
+ # Box extents
+ extents = bbse3_array[..., BoundingBoxSE3Index.EXTENT] # (...,)
+ corners_local = factors[None, :, :] * extents # (..., 8, 3)
+
+ # Rotation matrices (yaw, pitch, roll)
+ def rotation_matrix(yaw, pitch, roll):
+ cy, sy = np.cos(yaw), np.sin(yaw)
+ cp, sp = np.cos(pitch), np.sin(pitch)
+ cr, sr = np.cos(roll), np.sin(roll)
+ Rz = np.array([[cy, -sy, 0], [sy, cy, 0], [0, 0, 1]])
+ Ry = np.array([[cp, 0, sp], [0, 1, 0], [-sp, 0, cp]])
+ Rx = np.array([[1, 0, 0], [0, cr, -sr], [0, sr, cr]])
+ return Rz @ Ry @ Rx
+
+ corners_world = np.empty((*bbse3_array.shape[:-1], 8, 3), dtype=np.float64)
+ for idx in np.ndindex(bbse3_array.shape[:-1]):
+ R = rotation_matrix(yaws[idx], pitches[idx], rolls[idx])
+ corners_world[idx] = centers[idx] + (corners_local[idx] @ R.T)
+
+ return corners_world.squeeze(axis=0) if ndim_one else corners_world
diff --git a/d123/geometry/vector.py b/d123/geometry/vector.py
index a3b8631d..3ab2ef51 100644
--- a/d123/geometry/vector.py
+++ b/d123/geometry/vector.py
@@ -1,57 +1,215 @@
from __future__ import annotations
+from dataclasses import dataclass
+from typing import Iterable
+
import numpy as np
import numpy.typing as npt
-from d123.geometry.point import Point2D, Point3D, Point3DIndex
+from d123.geometry.geometry_index import Vector2DIndex, Vector3DIndex
+
+
+@dataclass
+class Vector2D:
+ """
+ Class to represents 2D vectors, in x, y direction.
+
+ Example:
+ >>> v1 = Vector2D(3.0, 4.0)
+ >>> v2 = Vector2D(1.0, 2.0)
+ >>> v3 = v1 + v2
+ >>> v3
+ Vector2D(4.0, 6.0)
+ >>> v1.array
+ array([3., 4.])
+ >>> v1.magnitude
+ 5.0
+ """
+
+ x: float # [m] x-component of the vector
+ y: float # [m] y-component of the vector
+ __slots__ = "x", "y"
+
+ @classmethod
+ def from_array(cls, array: npt.NDArray[np.float64]) -> Vector2D:
+ """Constructs a Vector2D from a numpy array.
+
+ :param array: Array of shape (2,) representing the vector components [x, y], indexed by \
+ :class:`~d123.geometry.Vector2DIndex`.
+ :return: A Vector2D instance.
+ """
+ assert array.ndim == 1
+ assert array.shape[0] == len(Vector2DIndex)
+ return Vector2D(array[Vector2DIndex.X], array[Vector2DIndex.Y])
+
+ @property
+ def array(self) -> npt.NDArray[np.float64]:
+ """The array representation of the 2D vector.
+
+ :return: A numpy array of shape (2,) containing the vector components [x, y], indexed by \
+ :class:`~d123.geometry.Vector2DIndex`.
+ """
+ array = np.zeros(len(Vector2DIndex), dtype=np.float64)
+ array[Vector2DIndex.X] = self.x
+ array[Vector2DIndex.Y] = self.y
+ return array
+
+ @property
+ def magnitude(self) -> float:
+ """Calculates the magnitude (length) of the 2D vector.
+ :return: The magnitude of the vector.
+ """
+ return float(np.linalg.norm(self.array))
+
+ @property
+ def vector_2d(self) -> Vector2D:
+ """The 2D vector itself. Handy for polymorphism.
+
+ :return: A Vector2D instance representing the 2D vector.
+ """
+ return self
-class Vector2D(Point2D):
def __add__(self, other: Vector2D) -> Vector2D:
+ """Adds two 2D vectors.
+
+ :param other: The other vector to add.
+ :return: A new Vector2D instance representing the sum.
+ """
return Vector2D(self.x + other.x, self.y + other.y)
def __sub__(self, other: Vector2D) -> Vector2D:
+ """Subtracts two 2D vectors.
+
+ :param other: The other vector to subtract.
+ :return: A new Vector2D instance representing the difference.
+ """
return Vector2D(self.x - other.x, self.y - other.y)
def __mul__(self, scalar: float) -> Vector2D:
+ """Multiplies the 2D vector by a scalar.
+
+ :param scalar: The scalar value to multiply with.
+ :return: A new Vector2D instance representing the scaled vector.
+ """
return Vector2D(self.x * scalar, self.y * scalar)
def __truediv__(self, scalar: float) -> Vector2D:
+ """Divides the 2D vector by a scalar.
+
+ :param scalar: The scalar value to divide by.
+ :return: A new Vector2D instance representing the divided vector.
+ """
return Vector2D(self.x / scalar, self.y / scalar)
+ def __iter__(self) -> Iterable[float]:
+ """Iterator over vector components."""
+ return iter((self.x, self.y))
+
+ def __hash__(self) -> int:
+ """Hash method"""
+ return hash((self.x, self.y))
+
+
+@dataclass
+class Vector3D:
+ """
+ Class to represents 3D vectors, in x, y, z direction.
+
+ Example:
+ >>> v1 = Vector3D(1.0, 2.0, 3.0)
+ >>> v2 = Vector3D(4.0, 5.0, 6.0)
+ >>> v3 = v1 + v2
+ >>> v3
+ Vector3D(5.0, 7.0, 9.0)
+ >>> v1.array
+ array([1., 2., 3.])
+ >>> v1.magnitude
+ 3.7416573867739413
+ """
+
+ x: float # [m] x-component of the vector
+ y: float # [m] y-component of the vector
+ z: float # [m] z-component of the vector
+ __slots__ = "x", "y", "z"
+
+ @classmethod
+ def from_array(cls, array: npt.NDArray[np.float64]) -> Vector3D:
+ """Constructs a Vector3D from a numpy array.
+
+ :param array: Array of shape (3,), indexed by :class:`~d123.geometry.geometry_index.Vector3DIndex`.
+ :return: A Vector3D instance.
+ """
+ assert array.ndim == 1
+ assert array.shape[0] == len(Vector3DIndex)
+ return Vector3D(array[Vector3DIndex.X], array[Vector3DIndex.Y], array[Vector3DIndex.Z])
+
+ @property
+ def array(self) -> npt.NDArray[np.float64]:
+ """
+ Returns the vector components as a numpy array
+
+ :return: A numpy array representing the vector components [x, y, z], indexed by \
+ :class:`~d123.geometry.geometry_index.Vector3DIndex`.
+ """
+ array = np.zeros(len(Vector3DIndex), dtype=np.float64)
+ array[Vector3DIndex.X] = self.x
+ array[Vector3DIndex.Y] = self.y
+ array[Vector3DIndex.Z] = self.z
+ return array
+
+ @property
def magnitude(self) -> float:
- """Calculate the magnitude of the vector."""
+ """Calculates the magnitude (length) of the 3D vector.
+
+ :return: The magnitude of the vector.
+ """
return float(np.linalg.norm(self.array))
@property
def vector_2d(self) -> Vector2D:
- return self
+ """Returns the 2D vector projection (x, y) of the 3D vector.
-
-class Vector3D(Point3D):
-
- @classmethod
- def from_array(cls, array: npt.NDArray[np.float64]) -> Vector3D:
- assert array.ndim == 1
- assert array.shape[0] == len(Point3DIndex)
- return cls(array[Point3DIndex.X], array[Point3DIndex.Y], array[Point3DIndex.Z])
+ :return: A Vector2D instance representing the 2D projection.
+ """
+ return Vector2D(self.x, self.y)
def __add__(self, other: Vector3D) -> Vector3D:
+ """Adds two 3D vectors.
+
+ :param other: The other vector to add.
+ :return: A new Vector2D instance representing the sum.
+ """
return Vector3D(self.x + other.x, self.y + other.y, self.z + other.z)
def __sub__(self, other: Vector3D) -> Vector3D:
+ """Subtracts two 3D vectors.
+
+ :param other: The other vector to subtract.
+ :return: A new Vector3D instance representing the difference.
+ """
return Vector3D(self.x - other.x, self.y - other.y, self.z - other.z)
def __mul__(self, scalar: float) -> Vector3D:
+ """Multiplies the 2D vector by a scalar.
+
+ :param scalar: The scalar value to multiply with.
+ :return: A new Vector3D instance representing the scaled vector.
+ """
return Vector3D(self.x * scalar, self.y * scalar, self.z * scalar)
def __truediv__(self, scalar: float) -> Vector3D:
+ """Divides the 2D vector by a scalar.
+
+ :param scalar: The scalar value to divide by.
+ :return: A new Vector3D instance representing the divided vector.
+ """
return Vector3D(self.x / scalar, self.y / scalar, self.z / scalar)
- def magnitude(self) -> float:
- """Calculate the magnitude of the vector."""
- return float(np.linalg.norm(self.array))
+ def __iter__(self) -> Iterable[float]:
+ """Iterator over vector components."""
+ return iter((self.x, self.y, self.z))
- @property
- def vector_2d(self) -> Vector2D:
- return Vector2D(self.x, self.y)
+ def __hash__(self) -> int:
+ """Hash method"""
+ return hash((self.x, self.y, self.z))
diff --git a/d123/simulation/agents/constant_velocity_agents.py b/d123/simulation/agents/constant_velocity_agents.py
index 7d1a58d0..5201e768 100644
--- a/d123/simulation/agents/constant_velocity_agents.py
+++ b/d123/simulation/agents/constant_velocity_agents.py
@@ -49,7 +49,7 @@ def step(self, non_target_agents: List[BoxDetection]):
time_delta_s = self._timestep_s * self._current_iteration
current_target_agents = []
for initial_agent in self._initial_target_agents:
- speed: float = float(initial_agent.velocity.vector_2d.magnitude())
+ speed: float = float(initial_agent.velocity.vector_2d.magnitude)
propagated_center = translate_along_yaw(initial_agent.center, Point2D(speed * time_delta_s, 0.0))
propagated_bounding_box = BoundingBoxSE2(
diff --git a/d123/simulation/agents/idm_agents.py b/d123/simulation/agents/idm_agents.py
index 072773e1..ef8437cf 100644
--- a/d123/simulation/agents/idm_agents.py
+++ b/d123/simulation/agents/idm_agents.py
@@ -91,7 +91,7 @@ def reset(
self._agent_paths_buffer[agent.metadata.track_token] = polyline_se2.linestring.buffer(
agent.bounding_box_se2.width / 2, cap_style=CAP_STYLE.square
)
- self._agent_initial_vel[agent.metadata.track_token] = float(agent.velocity.vector_2d.magnitude())
+ self._agent_initial_vel[agent.metadata.track_token] = float(agent.velocity.vector_2d.magnitude)
self._past_target_agents = self._initial_target_agents
return self._initial_target_agents
@@ -105,7 +105,7 @@ def step(self, non_target_agents: List[BoxDetection]):
# time_delta_s = self._timestep_s * self._current_iteration
current_target_agents = []
for past_agent in self._past_target_agents:
- agent_velocity: float = float(past_agent.velocity.vector_2d.magnitude())
+ agent_velocity: float = float(past_agent.velocity.vector_2d.magnitude)
agent_path = self._agent_paths[past_agent.metadata.track_token]
agent_path_buffer = self._agent_paths_buffer[past_agent.metadata.track_token]
@@ -133,7 +133,7 @@ def step(self, non_target_agents: List[BoxDetection]):
if leading_agent is not None:
distance_to_lead_agent = past_agent.shapely_polygon.distance(leading_agent.shapely_polygon)
- lead_agent_velocity = float(leading_agent.velocity.vector_2d.magnitude())
+ lead_agent_velocity = float(leading_agent.velocity.vector_2d.magnitude)
else:
distance_to_lead_agent = float(
np.clip(agent_path.length - agent_distance_on_path, a_min=0.0, a_max=None)
diff --git a/d123/simulation/agents/path_following.py b/d123/simulation/agents/path_following.py
index e4d740c3..960486d0 100644
--- a/d123/simulation/agents/path_following.py
+++ b/d123/simulation/agents/path_following.py
@@ -72,7 +72,7 @@ def step(self, non_target_agents: List[BoxDetection]):
time_delta_s = self._timestep_s * self._current_iteration
current_target_agents = []
for initial_agent in self._initial_target_agents:
- speed: float = float(initial_agent.velocity.vector_2d.magnitude())
+ speed: float = float(initial_agent.velocity.vector_2d.magnitude)
propagate_distance = speed * time_delta_s
propagated_center = self._agent_paths[initial_agent.metadata.track_token].interpolate(propagate_distance)
diff --git a/d123/simulation/gym/environment/gym_observation/raster/raster_renderer.py b/d123/simulation/gym/environment/gym_observation/raster/raster_renderer.py
index 38d661ac..ae8c1136 100644
--- a/d123/simulation/gym/environment/gym_observation/raster/raster_renderer.py
+++ b/d123/simulation/gym/environment/gym_observation/raster/raster_renderer.py
@@ -318,11 +318,11 @@ def _render_speed_line(
:param agent: Agent object containing the state and velocity.
:param color: Integer value of color
"""
- if box_detection.velocity.magnitude() > self._meter_per_pixel:
+ if box_detection.velocity.magnitude > self._meter_per_pixel:
future = translate_along_yaw(
pose=box_detection.center,
translation=Vector2D(
- x=box_detection.bounding_box_se2.half_length + box_detection.velocity.magnitude(),
+ x=box_detection.bounding_box_se2.half_length + box_detection.velocity.magnitude,
y=0.0,
),
)
@@ -445,7 +445,7 @@ def _render_detections_from_cache(self, box_detection_cache: BoxDetectionCache)
polygon: Polygon = self._scale_polygon(vehicle.bounding_box_se2.shapely_polygon, self._vehicle_scaling)
self._render_convex_polygons(mask, box_detection_cache.origin, [polygon], color=MAX_VALUE)
vehicles_raster[mask > 0] = self._scale_to_color(
- vehicle.velocity.magnitude(),
+ vehicle.velocity.magnitude,
self._max_vehicle_speed,
)
mask.fill(0)
@@ -460,7 +460,7 @@ def _render_detections_from_cache(self, box_detection_cache: BoxDetectionCache)
)
self._render_convex_polygons(mask, box_detection_cache.origin, [polygon], color=MAX_VALUE)
pedestrians_raster[mask > 0] = self._scale_to_color(
- pedestrian.velocity.magnitude(),
+ pedestrian.velocity.magnitude,
self._max_pedestrian_speed,
)
mask.fill(0)
@@ -484,7 +484,7 @@ def _render_detections_from_cache(self, box_detection_cache: BoxDetectionCache)
ego_polygon: Polygon = self._scale_polygon(ego_detection.shapely_polygon, self._vehicle_scaling)
self._render_convex_polygons(mask, box_detection_cache.origin, [ego_polygon], color=MAX_VALUE)
- ego_raster[mask > 0] = self._scale_to_color(ego_detection.velocity.magnitude(), self._max_vehicle_speed)
+ ego_raster[mask > 0] = self._scale_to_color(ego_detection.velocity.magnitude, self._max_vehicle_speed)
mask.fill(0)
return [vehicles_raster, pedestrians_raster, ego_raster]
diff --git a/d123/simulation/gym/environment/reward_builder/components/time_to_collision.py b/d123/simulation/gym/environment/reward_builder/components/time_to_collision.py
index 9a5fd2d1..7c3e0883 100644
--- a/d123/simulation/gym/environment/reward_builder/components/time_to_collision.py
+++ b/d123/simulation/gym/environment/reward_builder/components/time_to_collision.py
@@ -84,7 +84,7 @@ def calculate_ttc_v1(simulation_wrapper: SimulationWrapper, resolution: int = 2)
) = simulation_wrapper.current_planner_input.history.current_state
assert isinstance(observation, DetectionsTracks)
tracked_objects = observation.tracked_objects
- ego_speed = ego_state.dynamic_car_state.center_velocity_2d.magnitude()
+ ego_speed = ego_state.dynamic_car_state.center_velocity_2d.magnitude
if len(tracked_objects) == 0 or ego_speed < STOPPED_SPEED_THRESHOLD:
return SUCCESS_TTC
@@ -119,7 +119,7 @@ def _add_object(tracked_object: TrackedObject) -> bool:
if _add_object(agent):
agent_tokens.append(agent.track_token)
agent_coords_list.append(_get_coords_array(agent.box))
- agent_dxy.append(_get_dxy(agent.box.center.heading, agent.velocity.magnitude()))
+ agent_dxy.append(_get_dxy(agent.box.center.heading, agent.velocity.magnitude))
agent_coords_array = np.array(agent_coords_list, dtype=np.float64) # (num_agents, 5, 2)
agent_dxy = np.array(agent_dxy, dtype=np.float64) # (num_agents, 2)
if len(agent_tokens) == 0:
@@ -227,7 +227,7 @@ def _extract_tracks_info_excluding_collided_tracks(
tracks_poses: List[List[float]] = [[*tracked_object.center] for tracked_object in tracked_objects]
tracks_speed: List[float] = [
- tracked_object.velocity.magnitude() if isinstance(tracked_object, Agent) else 0
+ tracked_object.velocity.magnitude if isinstance(tracked_object, Agent) else 0
for tracked_object in tracked_objects
]
tracks_boxes: List[OrientedBox] = [tracked_object.box for tracked_object in tracked_objects]
diff --git a/d123/simulation/gym/environment/reward_builder/default_reward_builder.py b/d123/simulation/gym/environment/reward_builder/default_reward_builder.py
index 527d4e71..940b1f47 100644
--- a/d123/simulation/gym/environment/reward_builder/default_reward_builder.py
+++ b/d123/simulation/gym/environment/reward_builder/default_reward_builder.py
@@ -613,7 +613,7 @@ def _calculate_off_road(
[[point.x, point.y] for point in ego_state.agent.box.all_corners()],
dtype=np.float64,
)
- corner_in_polygons = drivable_area_map.points_in_polygons(ego_corners) # (geom, 4)
+ corner_in_polygons = drivable_area_map.contains_vectorized(ego_corners) # (geom, 4)
polygon_indices = np.where(corner_in_polygons.sum(axis=-1) > 0)[0]
corners_dwithin_polygons = corner_in_polygons.sum(axis=0) > 0
diff --git a/docs/conf.py b/docs/conf.py
index f114d331..b14d3fea 100644
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -75,7 +75,7 @@
"members": True,
"member-order": "bysource",
"undoc-members": True,
- "inherited-members": True,
- "exclude-members": "__init__, __post_init__",
+ "inherited-members": False,
+ "exclude-members": "__init__, __post_init__, __new__",
"imported-members": True,
}
diff --git a/docs/geometry.rst b/docs/geometry.rst
index 263b7db0..957db541 100644
--- a/docs/geometry.rst
+++ b/docs/geometry.rst
@@ -2,12 +2,56 @@
Geometry
========
-.. autoclass:: d123.common.geometry.base.Point2D()
+Geometric Primitives
+--------------------
-.. autoclass:: d123.common.geometry.base.Point3D()
+Points
+~~~~~~
+.. autoclass:: d123.geometry.Point2D()
-.. autoclass:: d123.common.geometry.base.StateSE2()
+.. autoclass:: d123.geometry.Point3D()
-.. autoclass:: d123.common.geometry.base.StateSE3()
+Vectors
+~~~~~~~
+.. autoclass:: d123.geometry.Vector2D()
-.. autoclass:: d123.dataset.maps.abstract_map.AbstractMap()
+.. autoclass:: d123.geometry.Vector3D()
+
+Super Euclidean States
+~~~~~~~~~~~~~~~~~~~~~~
+.. autoclass:: d123.geometry.StateSE2()
+
+.. autoclass:: d123.geometry.StateSE3()
+
+Bounding Boxes
+~~~~~~~~~~~~~~
+.. autoclass:: d123.geometry.BoundingBoxSE2()
+
+.. autoclass:: d123.geometry.BoundingBoxSE3()
+
+Indexing Enums
+~~~~~~~~~~~~~~
+.. autoclass:: d123.geometry.Point2DIndex()
+
+.. autoclass:: d123.geometry.Point3DIndex()
+
+.. autoclass:: d123.geometry.Vector2DIndex()
+
+.. autoclass:: d123.geometry.Vector3DIndex()
+
+.. autoclass:: d123.geometry.StateSE2Index()
+
+.. autoclass:: d123.geometry.StateSE3Index()
+
+.. autoclass:: d123.geometry.BoundingBoxSE2Index()
+
+.. autoclass:: d123.geometry.BoundingBoxSE3Index()
+
+.. autoclass:: d123.geometry.Corners2DIndex()
+
+.. autoclass:: d123.geometry.Corners3DIndex()
+
+
+Occupancy Map
+-------------
+.. autoclass:: d123.geometry.OccupancyMap2D()
diff --git a/notebooks/viz/bev_matplotlib.ipynb b/notebooks/viz/bev_matplotlib.ipynb
index f8fe8c69..9052f79b 100644
--- a/notebooks/viz/bev_matplotlib.ipynb
+++ b/notebooks/viz/bev_matplotlib.ipynb
@@ -20,6 +20,21 @@
"id": "1",
"metadata": {},
"outputs": [],
+ "source": [
+ "from d123.geometry import Point2D\n",
+ "import numpy as np\n",
+ "\n",
+ "import torch\n",
+ "\n",
+ "from d123.geometry.polyline import Polyline2D"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "2",
+ "metadata": {},
+ "outputs": [],
"source": [
"# split = \"nuplan_private_test\"\n",
"# log_names = [\"2021.09.29.17.35.58_veh-44_00066_00432\"]\n",
@@ -56,7 +71,7 @@
{
"cell_type": "code",
"execution_count": null,
- "id": "2",
+ "id": "3",
"metadata": {},
"outputs": [],
"source": [
@@ -175,7 +190,6 @@
" map_object: AbstractLaneGroup\n",
" add_shapely_polygon_to_ax(ax, map_object.shapely_polygon, MAP_SURFACE_CONFIG[layer])\n",
"\n",
- "\n",
" if layer in [MapLayer.LANE]:\n",
" add_shapely_linestring_to_ax(ax, map_object.centerline.linestring, CENTERLINE_CONFIG)\n",
"\n",
@@ -236,7 +250,7 @@
" return fig, ax\n",
"\n",
"\n",
- "scene_index = 1\n",
+ "scene_index = 3\n",
"iteration = 99\n",
"fig, ax = plot_scene_at_iteration(scenes[scene_index], iteration=iteration, radius=60)\n",
"plt.show()\n",
@@ -254,14 +268,10 @@
{
"cell_type": "code",
"execution_count": null,
- "id": "3",
+ "id": "4",
"metadata": {},
"outputs": [],
"source": [
- "scene = scenes[scene_index]\n",
- "\n",
- "\n",
- "scene.get_camera_at_iteration(camera_type=CameraType.CAM_F0, iteration=0)\n",
"\n",
"\n",
"\n"
@@ -270,7 +280,7 @@
{
"cell_type": "code",
"execution_count": null,
- "id": "4",
+ "id": "5",
"metadata": {},
"outputs": [],
"source": [
@@ -309,7 +319,7 @@
{
"cell_type": "code",
"execution_count": null,
- "id": "5",
+ "id": "6",
"metadata": {},
"outputs": [],
"source": []
@@ -317,7 +327,7 @@
{
"cell_type": "code",
"execution_count": null,
- "id": "6",
+ "id": "7",
"metadata": {},
"outputs": [],
"source": []
@@ -325,7 +335,7 @@
{
"cell_type": "code",
"execution_count": null,
- "id": "7",
+ "id": "8",
"metadata": {},
"outputs": [],
"source": []
From 0773efd82c9549ed8d9fe23cc9963a25b91451ee Mon Sep 17 00:00:00 2001
From: DanielDauner
Date: Mon, 25 Aug 2025 15:27:03 +0200
Subject: [PATCH 016/145] Make most `geometry` types array-based. Add tests for
some `geometry` types.
---
d123/common/utils/mixin.py | 9 +-
d123/geometry/bounding_box.py | 3 +-
d123/geometry/geometry_index.py | 47 +++
d123/geometry/point.py | 111 ++++---
d123/geometry/polyline.py | 38 +--
d123/geometry/rotation.py | 241 +++++++++++++++
d123/geometry/se.py | 360 +++++++++++++++++-----
d123/geometry/test/__init__.py | 0
d123/geometry/test/test_point.py | 203 ++++++++++++
d123/geometry/test/test_polyline.py | 326 ++++++++++++++++++++
d123/geometry/test/test_rotation.py | 240 +++++++++++++++
d123/geometry/test/test_vector.py | 172 +++++++++++
d123/geometry/utils/bounding_box_utils.py | 24 +-
d123/geometry/utils/rotation_utils.py | 51 ++-
d123/geometry/vector.py | 90 ++++--
15 files changed, 1745 insertions(+), 170 deletions(-)
create mode 100644 d123/geometry/rotation.py
create mode 100644 d123/geometry/test/__init__.py
create mode 100644 d123/geometry/test/test_point.py
create mode 100644 d123/geometry/test/test_polyline.py
create mode 100644 d123/geometry/test/test_rotation.py
create mode 100644 d123/geometry/test/test_vector.py
diff --git a/d123/common/utils/mixin.py b/d123/common/utils/mixin.py
index 5e7ecc0e..9290242b 100644
--- a/d123/common/utils/mixin.py
+++ b/d123/common/utils/mixin.py
@@ -1,20 +1,18 @@
from __future__ import annotations
-import abc
import copy as pycopy
-from functools import cached_property
import numpy as np
import numpy.typing as npt
-class ArrayMixin(abc.ABC):
+class ArrayMixin:
"""Abstract base class for geometric entities."""
- @cached_property
- @abc.abstractmethod
+ @property
def array(self) -> npt.NDArray[np.float64]:
"""The array representation of the geometric entity."""
+ raise NotImplementedError
def __array__(self, dtype: npt.DtypeLike = None, copy: bool = False) -> npt.NDArray:
array = self.array
@@ -34,6 +32,7 @@ def __eq__(self, other) -> bool:
return np.array_equal(self.array, other.array)
return False
+ @property
def shape(self) -> tuple:
"""Return the shape of the array."""
return self.array.shape
diff --git a/d123/geometry/bounding_box.py b/d123/geometry/bounding_box.py
index 6b612c4e..3ea196e3 100644
--- a/d123/geometry/bounding_box.py
+++ b/d123/geometry/bounding_box.py
@@ -3,6 +3,7 @@
from ast import Dict
from dataclasses import dataclass
from functools import cached_property
+from typing import Union
import numpy as np
import numpy.typing as npt
@@ -202,4 +203,4 @@ def corners_dict(self) -> Dict[Corners3DIndex, Point3D]:
return {index: Point3D.from_array(corners_array[index]) for index in Corners3DIndex}
-BoundingBox = BoundingBoxSE2 | BoundingBoxSE3
+BoundingBox = Union[BoundingBoxSE2, BoundingBoxSE3]
diff --git a/d123/geometry/geometry_index.py b/d123/geometry/geometry_index.py
index f77951c2..3f658838 100644
--- a/d123/geometry/geometry_index.py
+++ b/d123/geometry/geometry_index.py
@@ -71,6 +71,27 @@ class Vector3DIndex(IntEnum):
Z = 2
+class EulerAnglesIndex(IntEnum):
+ """
+ Indexes array-like representations of Euler angles (roll,pitch,yaw).
+ """
+
+ ROLL = 0
+ PITCH = 1
+ YAW = 2
+
+
+class QuaternionIndex(IntEnum):
+ """
+ Indexes array-like representations of quaternions (qw,qx,qy,qz).
+ """
+
+ QW = 0
+ QX = 1
+ QY = 2
+ QZ = 3
+
+
class StateSE3Index(IntEnum):
"""
Indexes array-like representations of SE3 states (x,y,z,roll,pitch,yaw).
@@ -97,6 +118,32 @@ def ROTATION_XYZ(cls) -> slice:
return slice(cls.ROLL, cls.YAW + 1)
+class QuaternionSE3Index(IntEnum):
+ """
+ Indexes array-like representations of SE3 states with quaternions (x,y,z,qw,qx,qy,qz).
+ """
+
+ X = 0
+ Y = 1
+ Z = 2
+ QW = 3
+ QX = 4
+ QY = 5
+ QZ = 6
+
+ @classproperty
+ def XY(cls) -> slice:
+ return slice(cls.X, cls.Y + 1)
+
+ @classproperty
+ def XYZ(cls) -> slice:
+ return slice(cls.X, cls.Z + 1)
+
+ @classproperty
+ def QUATERNION(cls) -> slice:
+ return slice(cls.QW, cls.QZ + 1)
+
+
class BoundingBoxSE2Index(IntEnum):
"""
Indexes array-like representations of rotated 2D bounding boxes (x,y,yaw,length,width).
diff --git a/d123/geometry/point.py b/d123/geometry/point.py
index 5641cf17..b3daa3c8 100644
--- a/d123/geometry/point.py
+++ b/d123/geometry/point.py
@@ -1,6 +1,5 @@
from __future__ import annotations
-from dataclasses import dataclass
from functools import cached_property
from typing import Iterable
@@ -12,41 +11,57 @@
from d123.geometry.geometry_index import Point2DIndex, Point3DIndex
-@dataclass
class Point2D(ArrayMixin):
- """Class to represents 2D points.
+ """Class to represents 2D points."""
- :return: A Point2D instance.
- """
+ _array: npt.NDArray[np.float64]
- x: float # [m] location
- y: float # [m] location
- __slots__ = "x", "y"
+ def __init__(self, x: float, y: float):
+ """Initialize StateSE2 with x, y, yaw coordinates."""
+ array = np.zeros(len(Point2DIndex), dtype=np.float64)
+ array[Point2DIndex.X] = x
+ array[Point2DIndex.Y] = y
+ object.__setattr__(self, "_array", array)
@classmethod
- def from_array(cls, array: npt.NDArray[np.float64]) -> Point2D:
+ def from_array(cls, array: npt.NDArray[np.float64], copy: bool = True) -> Point2D:
"""Constructs a Point2D from a numpy array.
:param array: Array of shape (2,) representing the point coordinates [x, y], indexed by \
:class:`~d123.geometry.Point2DIndex`.
+ :param copy: Whether to copy the input array. Defaults to True.
:return: A Point2D instance.
"""
-
assert array.ndim == 1
assert array.shape[0] == len(Point2DIndex)
- return Point2D(array[Point2DIndex.X], array[Point2DIndex.Y])
+ instance = object.__new__(cls)
+ object.__setattr__(instance, "_array", array.copy() if copy else array)
+ return instance
- @cached_property
+ @property
+ def x(self) -> float:
+ """The x coordinate of the point.
+
+ :return: The x coordinate of the point.
+ """
+ return self._array[Point2DIndex.X]
+
+ @property
+ def y(self) -> float:
+ """The y coordinate of the point.
+
+ :return: The y coordinate of the point.
+ """
+ return self._array[Point2DIndex.Y]
+
+ @property
def array(self) -> npt.NDArray[np.float64]:
"""The array representation of the point.
:return: A numpy array of shape (2,) containing the point coordinates [x, y], indexed by \
:class:`~d123.geometry.Point2DIndex`.
"""
- array = np.zeros(len(Point2DIndex), dtype=np.float64)
- array[Point2DIndex.X] = self.x
- array[Point2DIndex.Y] = self.y
- return array
+ return self._array
@property
def shapely_point(self) -> geom.Point:
@@ -65,40 +80,66 @@ def __hash__(self) -> int:
return hash((self.x, self.y))
-@dataclass
class Point3D(ArrayMixin):
"""Class to represents 3D points."""
- x: float # [m] location
- y: float # [m] location
- z: float # [m] location
- __slots__ = "x", "y", "z"
+ _array: npt.NDArray[np.float64]
+
+ def __init__(self, x: float, y: float, z: float):
+ """Initialize Point3D with x, y, z coordinates."""
+ array = np.zeros(len(Point3DIndex), dtype=np.float64)
+ array[Point3DIndex.X] = x
+ array[Point3DIndex.Y] = y
+ array[Point3DIndex.Z] = z
+ object.__setattr__(self, "_array", array)
@classmethod
- def from_array(cls, array: npt.NDArray[np.float64]) -> "Point3D":
+ def from_array(cls, array: npt.NDArray[np.float64], copy: bool = True) -> Point3D:
"""Constructs a Point3D from a numpy array.
:param array: Array of shape (3,) representing the point coordinates [x, y, z], indexed by \
:class:`~d123.geometry.Point3DIndex`.
+ :param copy: Whether to copy the input array. Defaults to True.
:return: A Point3D instance.
"""
- assert array.ndim == 1, f"Array must be 1-dimensional, got shape {array.shape}"
- assert array.shape[0] == len(
- Point3DIndex
- ), f"Array must have the same length as Point3DIndex, got shape {array.shape}"
- return cls(array[Point3DIndex.X], array[Point3DIndex.Y], array[Point3DIndex.Z])
+ assert array.ndim == 1
+ assert array.shape[0] == len(Point3DIndex)
+ instance = object.__new__(cls)
+ object.__setattr__(instance, "_array", array.copy() if copy else array)
+ return instance
@cached_property
def array(self) -> npt.NDArray[np.float64]:
- """Converts the Point3D instance to a numpy array, indexed by :class:`~d123.geometry.Point3DIndex`.
+ """The array representation of the point.
- :return: A numpy array of shape (3,) containing the point coordinates [x, y, z].
+ :return: A numpy array of shape (3,) containing the point coordinates [x, y, z], indexed by \
+ :class:`~d123.geometry.Point3DIndex`.
"""
- array = np.zeros(len(Point3DIndex), dtype=np.float64)
- array[Point3DIndex.X] = self.x
- array[Point3DIndex.Y] = self.y
- array[Point3DIndex.Z] = self.z
- return array
+ return self._array
+
+ @property
+ def x(self) -> float:
+ """The x coordinate of the point.
+
+ :return: The x coordinate of the point.
+ """
+ return self._array[Point3DIndex.X]
+
+ @property
+ def y(self) -> float:
+ """The y coordinate of the point.
+
+ :return: The y coordinate of the point.
+ """
+ return self._array[Point3DIndex.Y]
+
+ @property
+ def z(self) -> float:
+ """The z coordinate of the point.
+
+ :return: The z coordinate of the point.
+ """
+ return self._array[Point3DIndex.Z]
@property
def point_2d(self) -> Point2D:
@@ -106,7 +147,7 @@ def point_2d(self) -> Point2D:
:return: A Point2D instance representing the 2D projection of the 3D point.
"""
- return Point2D(self.x, self.y)
+ return Point2D.from_array(self.array[Point3DIndex.XY], copy=False)
@property
def shapely_point(self) -> geom.Point:
diff --git a/d123/geometry/polyline.py b/d123/geometry/polyline.py
index 3fee304d..51df68b5 100644
--- a/d123/geometry/polyline.py
+++ b/d123/geometry/polyline.py
@@ -105,14 +105,13 @@ def interpolate(
:param distances: The distances at which to interpolate the polyline.
:return: The interpolated point(s) on the polyline.
"""
- distances_ = distances * self.length if normalized else distances
if isinstance(distances, float) or isinstance(distances, int):
- point = self.linestring.interpolate(distances_, normalized=normalized)
+ point = self.linestring.interpolate(distances, normalized=normalized)
return Point2D(point.x, point.y)
else:
- distances = np.asarray(distances_, dtype=np.float64)
- points = self.linestring.interpolate(distances_, normalized=normalized)
+ distances_ = np.asarray(distances, dtype=np.float64)
+ points = self.linestring.interpolate(distances, normalized=normalized)
return np.array([[p.x, p.y] for p in points], dtype=np.float64)
def project(
@@ -126,12 +125,10 @@ def project(
:param normalized: Whether to return the normalized distance, defaults to False.
:return: The distance along the polyline to the closest point.
"""
- if isinstance(point, Point2D):
- point_ = point.array
- elif isinstance(point, StateSE2):
- point_ = point.array[StateSE2Index.XY]
+ if isinstance(point, Point2D) or isinstance(point, StateSE2):
+ point_ = point.shapely_point
elif isinstance(point, geom.Point):
- point_ = np.array(point.coords[0], dtype=np.float64)
+ point_ = point
else:
point_ = np.array(point, dtype=np.float64)
return self.linestring.project(point_, normalized=normalized)
@@ -241,12 +238,10 @@ def project(
:param normalized: Whether to return the normalized distance, defaults to False.
:return: The distance along the polyline to the closest point.
"""
- if isinstance(point, Point2D):
- point_ = point.array
- elif isinstance(point, StateSE2):
- point_ = point.array[StateSE2Index.XY]
+ if isinstance(point, Point2D) or isinstance(point, StateSE2):
+ point_ = point.shapely_point
elif isinstance(point, geom.Point):
- point_ = np.array(point.coords[0], dtype=np.float64)
+ point_ = point
else:
point_ = np.array(point, dtype=np.float64)
return self.linestring.project(point_, normalized=normalized)
@@ -327,14 +322,13 @@ def interpolate(
:param normalized: Whether to interpret the distances as fractions of the length.
:return: A Point3D instance or a numpy array of shape (N, 3) representing the interpolated points.
"""
- distances * self.length if normalized else distances
if isinstance(distances, float) or isinstance(distances, int):
- point = self.linestring.interpolate(distances)
+ point = self.linestring.interpolate(distances, normalized=normalized)
return Point3D(point.x, point.y, point.z)
else:
distances = np.asarray(distances, dtype=np.float64)
- points = self.linestring.interpolate(distances)
+ points = self.linestring.interpolate(distances, normalized=normalized)
return np.array([[p.x, p.y, p.z] for p in points], dtype=np.float64)
def project(
@@ -348,14 +342,10 @@ def project(
:param normalized: Whether to return normalized distances, defaults to False.
:return: The distance along the polyline to the closest point.
"""
- if isinstance(point, Point2D):
- point_ = point.array
- elif isinstance(point, StateSE2):
- point_ = point.array[StateSE2Index.XY]
- elif isinstance(point, Point3D):
- point_ = point.array[Point3DIndex.XYZ]
+ if isinstance(point, Point2D) or isinstance(point, StateSE2) or isinstance(point, Point3D):
+ point_ = point.shapely_point
elif isinstance(point, geom.Point):
- point_ = np.array(point.coords[0], dtype=np.float64)
+ point_ = point
else:
point_ = np.array(point, dtype=np.float64)
return self.linestring.project(point_, normalized=normalized)
diff --git a/d123/geometry/rotation.py b/d123/geometry/rotation.py
new file mode 100644
index 00000000..f98a601d
--- /dev/null
+++ b/d123/geometry/rotation.py
@@ -0,0 +1,241 @@
+from __future__ import annotations
+
+from functools import cached_property
+
+import numpy as np
+import numpy.typing as npt
+import pyquaternion
+
+from d123.common.utils.mixin import ArrayMixin
+from d123.geometry.geometry_index import EulerAnglesIndex, QuaternionIndex
+from d123.geometry.utils.rotation_utils import get_rotation_matrix_from_euler_array
+
+
+class EulerAngles(ArrayMixin):
+ """Class to represent 3D rotation using Euler angles (roll, pitch, yaw) in radians.
+ NOTE: The rotation order is intrinsic Z-Y'-X'' (yaw-pitch-roll).
+ See https://en.wikipedia.org/wiki/Euler_angles for more details.
+ """
+
+ _array: npt.NDArray[np.float64]
+
+ def __init__(self, roll: float, pitch: float, yaw: float):
+ """Initialize EulerAngles with roll, pitch, yaw coordinates."""
+ array = np.zeros(len(EulerAnglesIndex), dtype=np.float64)
+ array[EulerAnglesIndex.ROLL] = roll
+ array[EulerAnglesIndex.PITCH] = pitch
+ array[EulerAnglesIndex.YAW] = yaw
+ object.__setattr__(self, "_array", array)
+
+ @classmethod
+ def from_array(cls, array: npt.NDArray[np.float64], copy: bool = True) -> EulerAngles:
+ """Constructs a EulerAngles from a numpy array.
+
+ :param array: Array of shape (3,) representing the euler angles [roll, pitch, yaw], indexed by \
+ :class:`~d123.geometry.EulerAnglesIndex`.
+ :param copy: Whether to copy the input array. Defaults to True.
+ :return: A EulerAngles instance.
+ """
+ assert array.ndim == 1
+ assert array.shape[0] == len(EulerAnglesIndex)
+ instance = object.__new__(cls)
+ object.__setattr__(instance, "_array", array.copy() if copy else array)
+ return instance
+
+ @classmethod
+ def from_rotation_matrix(cls, rotation_matrix: npt.NDArray[np.float64]) -> EulerAngles:
+ """Constructs a EulerAngles from a 3x3 rotation matrix.
+ NOTE: The rotation order is intrinsic Z-Y'-X'' (yaw-pitch-roll).
+
+ :param rotation_matrix: A 3x3 numpy array representing the rotation matrix.
+ :return: A EulerAngles instance.
+ """
+ assert rotation_matrix.ndim == 2
+ assert rotation_matrix.shape == (3, 3)
+ quaternion = pyquaternion.Quaternion(matrix=rotation_matrix)
+ yaw, pitch, roll = quaternion.yaw_pitch_roll
+ return EulerAngles(roll=roll, pitch=pitch, yaw=yaw)
+
+ @property
+ def roll(self) -> float:
+ """The roll (x-axis rotation) angle in radians.
+
+ :return: The roll angle in radians.
+ """
+ return self._array[EulerAnglesIndex.ROLL]
+
+ @property
+ def pitch(self) -> float:
+ """The pitch (y-axis rotation) angle in radians.
+
+ :return: The pitch angle in radians.
+ """
+ return self._array[EulerAnglesIndex.PITCH]
+
+ @property
+ def yaw(self) -> float:
+ """The yaw (z-axis rotation) angle in radians.
+
+ :return: The yaw angle in radians.
+ """
+ return self._array[EulerAnglesIndex.YAW]
+
+ @cached_property
+ def array(self) -> npt.NDArray[np.float64]:
+ """Converts the EulerAngles instance to a numpy array.
+
+ :return: A numpy array of shape (3,) containing the Euler angles [roll, pitch, yaw], indexed by \
+ :class:`~d123.geometry.EulerAnglesIndex`.
+ """
+ array = np.zeros(len(EulerAnglesIndex), dtype=np.float64)
+ array[EulerAnglesIndex.ROLL] = self.roll
+ array[EulerAnglesIndex.PITCH] = self.pitch
+ array[EulerAnglesIndex.YAW] = self.yaw
+ return array
+
+ @property
+ def rotation_matrix(self) -> npt.NDArray[np.float64]:
+ """Returns the 3x3 rotation matrix representation of the Euler angles.
+ NOTE: The rotation order is intrinsic Z-Y'-X'' (yaw-pitch-roll).
+
+ :return: A 3x3 numpy array representing the rotation matrix.
+ """
+ return get_rotation_matrix_from_euler_array(self.array)
+
+ def __iter__(self):
+ """Iterator over euler angles."""
+ return iter((self.roll, self.pitch, self.yaw))
+
+ def __hash__(self):
+ """Hash function for euler angles."""
+ return hash((self.roll, self.pitch, self.yaw))
+
+
+class Quaternion(ArrayMixin):
+ """
+ Represents a quaternion for 3D rotations.
+ NOTE: This class uses the pyquaternion library for internal computations.
+ """
+
+ _array: npt.NDArray[np.float64]
+
+ def __init__(self, qw: float, qx: float, qy: float, qz: float):
+ """Initialize Quaternion with qw, qx, qy, qz components."""
+ array = np.zeros(len(QuaternionIndex), dtype=np.float64)
+ array[QuaternionIndex.QW] = qw
+ array[QuaternionIndex.QX] = qx
+ array[QuaternionIndex.QY] = qy
+ array[QuaternionIndex.QZ] = qz
+ object.__setattr__(self, "_array", array)
+
+ @classmethod
+ def from_array(cls, arr: npt.NDArray[np.float64], copy: bool = True) -> Quaternion:
+ """Constructs a Quaternion from a numpy array.
+
+ :param arr: A 1D numpy array of shape (4,) containing the quaternion components [qw, qx, qy, qz].
+ :param copy: Whether to copy the array data, defaults to True.
+ :return: A Quaternion instance.
+ """
+ assert arr.ndim == 1
+ assert arr.shape[0] == len(QuaternionIndex)
+ instance = object.__new__(cls)
+ object.__setattr__(instance, "_array", arr.copy() if copy else arr)
+ return instance
+
+ @classmethod
+ def from_rotation_matrix(cls, rotation_matrix: npt.NDArray[np.float64]) -> Quaternion:
+ """Constructs a Quaternion from a 3x3 rotation matrix.
+
+ :param rotation_matrix: A 3x3 numpy array representing the rotation matrix.
+ :return: A Quaternion instance.
+ """
+ assert rotation_matrix.ndim == 2
+ assert rotation_matrix.shape == (3, 3)
+ quaternion = pyquaternion.Quaternion(matrix=rotation_matrix)
+ return Quaternion(qw=quaternion.w, qx=quaternion.x, qy=quaternion.y, qz=quaternion.z)
+
+ @classmethod
+ def from_euler_angles(cls, euler_angles: EulerAngles) -> Quaternion:
+ """Constructs a Quaternion from Euler angles.
+ NOTE: The rotation order is intrinsic Z-Y'-X'' (yaw-pitch-roll).
+
+ :param euler_angles: An EulerAngles instance representing the Euler angles.
+ :return: A Quaternion instance.
+ """
+ rotation_matrix = euler_angles.rotation_matrix
+ return Quaternion.from_rotation_matrix(rotation_matrix)
+
+ @property
+ def qw(self) -> float:
+ """The scalar part of the quaternion.
+
+ :return: The qw component.
+ """
+ return self._array[QuaternionIndex.QW]
+
+ @property
+ def qx(self) -> float:
+ """The x component of the quaternion.
+
+ :return: The qx component.
+ """
+ return self._array[QuaternionIndex.QX]
+
+ @property
+ def qy(self) -> float:
+ """The y component of the quaternion.
+
+ :return: The qy component.
+ """
+ return self._array[QuaternionIndex.QY]
+
+ @property
+ def qz(self) -> float:
+ """The z component of the quaternion.
+
+ :return: The qz component.
+ """
+ return self._array[QuaternionIndex.QZ]
+
+ @property
+ def array(self) -> npt.NDArray[np.float64]:
+ """Converts the Quaternion instance to a numpy array.
+
+ :return: A numpy array of shape (4,) containing the quaternion [qw, qx, qy, qz], indexed by \
+ :class:`~d123.geometry.QuaternionIndex`.
+ """
+ return self._array
+
+ @property
+ def pyquaternion(self) -> pyquaternion.Quaternion:
+ """Returns the pyquaternion.Quaternion representation of the quaternion.
+
+ :return: A pyquaternion.Quaternion representation of the quaternion.
+ """
+ return pyquaternion.Quaternion(array=self.array)
+
+ @cached_property
+ def euler_angles(self) -> EulerAngles:
+ """Returns the Euler angles (roll, pitch, yaw) representation of the quaternion.
+ NOTE: The rotation order is intrinsic Z-Y'-X'' (yaw-pitch-roll).
+
+ :return: An EulerAngles instance representing the Euler angles.
+ """
+ yaw, pitch, roll = self.pyquaternion.yaw_pitch_roll
+ return EulerAngles(roll=roll, pitch=pitch, yaw=yaw)
+
+ @cached_property
+ def rotation_matrix(self) -> npt.NDArray[np.float64]:
+ """Returns the 3x3 rotation matrix representation of the quaternion.
+
+ :return: A 3x3 numpy array representing the rotation matrix.
+ """
+ return self.pyquaternion.rotation_matrix
+
+ def __iter__(self):
+ """Iterator over quaternion components."""
+ return iter((self.qw, self.qx, self.qy, self.qz))
+
+ def __hash__(self):
+ """Hash function for quaternion."""
+ return hash((self.qw, self.qx, self.qy, self.qz))
diff --git a/d123/geometry/se.py b/d123/geometry/se.py
index 9c73e341..b2d8db2c 100644
--- a/d123/geometry/se.py
+++ b/d123/geometry/se.py
@@ -1,49 +1,66 @@
from __future__ import annotations
-from dataclasses import dataclass
from typing import Iterable
import numpy as np
import numpy.typing as npt
import shapely.geometry as geom
+from pyparsing import cached_property
-from d123.geometry.geometry_index import StateSE2Index, StateSE3Index
+from d123.common.utils.mixin import ArrayMixin
+from d123.geometry.geometry_index import Point3DIndex, QuaternionSE3Index, StateSE2Index, StateSE3Index
from d123.geometry.point import Point2D, Point3D
+from d123.geometry.rotation import EulerAngles, Quaternion
-@dataclass
-class StateSE2:
+class StateSE2(ArrayMixin):
"""Class to represents a 2D pose as SE2 (x, y, yaw)."""
- x: float # [m] x-location
- y: float # [m] y-location
- yaw: float # [rad] yaw/heading
- __slots__ = "x", "y", "yaw"
+ _array: npt.NDArray[np.float64]
+
+ def __init__(self, x: float, y: float, yaw: float):
+ """Initialize StateSE2 with x, y, yaw coordinates."""
+ array = np.zeros(len(StateSE2Index), dtype=np.float64)
+ array[StateSE2Index.X] = x
+ array[StateSE2Index.Y] = y
+ array[StateSE2Index.YAW] = yaw
+ object.__setattr__(self, "_array", array)
@classmethod
- def from_array(cls, array: npt.NDArray[np.float64]) -> StateSE2:
+ def from_array(cls, array: npt.NDArray[np.float64], copy: bool = True) -> StateSE2:
"""Constructs a StateSE2 from a numpy array.
:param array: Array of shape (3,) representing the state [x, y, yaw], indexed by \
:class:`~d123.geometry.geometry_index.StateSE2Index`.
+ :param copy: Whether to copy the input array. Defaults to True.
:return: A StateSE2 instance.
"""
assert array.ndim == 1
assert array.shape[0] == len(StateSE2Index)
- return StateSE2(array[StateSE2Index.X], array[StateSE2Index.Y], array[StateSE2Index.YAW])
+ instance = object.__new__(cls)
+ object.__setattr__(instance, "_array", array.copy() if copy else array)
+ return instance
+
+ @property
+ def x(self) -> float:
+ return self._array[StateSE2Index.X]
+
+ @property
+ def y(self) -> float:
+ return self._array[StateSE2Index.Y]
+
+ @property
+ def yaw(self) -> float:
+ return self._array[StateSE2Index.YAW]
@property
def array(self) -> npt.NDArray[np.float64]:
- """Converts the StateSE2 instance to a numpy array, indexed by \
- :class:`~d123.geometry.geometry_index.StateSE2Index`.
+ """Converts the StateSE2 instance to a numpy array
- :return: A numpy array of shape (3,) containing the state [x, y, yaw].
+ :return: A numpy array of shape (3,) containing the state, indexed by \
+ :class:`~d123.geometry.geometry_index.StateSE2Index`.
"""
- array = np.zeros(len(StateSE2Index), dtype=np.float64)
- array[StateSE2Index.X] = self.x
- array[StateSE2Index.Y] = self.y
- array[StateSE2Index.YAW] = self.yaw
- return array
+ return self._array
@property
def state_se2(self) -> StateSE2:
@@ -59,54 +76,68 @@ def point_2d(self) -> Point2D:
:return: A Point2D instance representing the 2D projection of the 2D pose.
"""
- return Point2D(self.x, self.y)
+ return Point2D.from_array(self.array[StateSE2Index.XY])
@property
- def shapely_point(self) -> geom.Point:
- return geom.Point(self.x, self.y)
+ def rotation_matrix(self) -> npt.NDArray[np.float64]:
+ """Returns the 2x2 rotation matrix representation of the state's orientation.
- def __iter__(self) -> Iterable[float]:
- """Iterator over the state coordinates (x, y, yaw)."""
- return iter((self.x, self.y, self.yaw))
+ :return: A 2x2 numpy array representing the rotation matrix.
+ """
+ cos_yaw = np.cos(self.yaw)
+ sin_yaw = np.sin(self.yaw)
+ return np.array([[cos_yaw, -sin_yaw], [sin_yaw, cos_yaw]], dtype=np.float64)
- def __hash__(self) -> int:
- """Hash method"""
- return hash((self.x, self.y, self.yaw))
+ @property
+ def transformation_matrix(self) -> npt.NDArray[np.float64]:
+ """Returns the 3x3 transformation matrix representation of the state.
+
+ :return: A 3x3 numpy array representing the transformation matrix.
+ """
+ matrix = np.zeros((3, 3), dtype=np.float64)
+ matrix[:2, :2] = self.rotation_matrix
+ matrix[0, 2] = self.x
+ matrix[1, 2] = self.y
+ return matrix
+
+ @property
+ def shapely_point(self) -> geom.Point:
+ return geom.Point(self.x, self.y)
-@dataclass
-class StateSE3:
+class StateSE3(ArrayMixin):
"""
Class to represents a 3D pose as SE3 (x, y, z, roll, pitch, yaw).
TODO: Use quaternions for rotation representation.
"""
- x: float # [m] x-location
- y: float # [m] y-location
- z: float # [m] z-location
- roll: float # [rad] roll
- pitch: float # [rad] pitch
- yaw: float # [rad] yaw
- __slots__ = "x", "y", "z", "roll", "pitch", "yaw"
+ _array: npt.NDArray[np.float64]
+
+ def __init__(self, x: float, y: float, z: float, roll: float, pitch: float, yaw: float):
+ """Initialize StateSE3 with x, y, z, roll, pitch, yaw coordinates."""
+ array = np.zeros(len(StateSE3Index), dtype=np.float64)
+ array[StateSE3Index.X] = x
+ array[StateSE3Index.Y] = y
+ array[StateSE3Index.Z] = z
+ array[StateSE3Index.ROLL] = roll
+ array[StateSE3Index.PITCH] = pitch
+ array[StateSE3Index.YAW] = yaw
+ object.__setattr__(self, "_array", array)
@classmethod
- def from_array(cls, array: npt.NDArray[np.float64]) -> StateSE3:
+ def from_array(cls, array: npt.NDArray[np.float64], copy: bool = True) -> StateSE3:
"""Constructs a StateSE3 from a numpy array.
:param array: Array of shape (6,) representing the state [x, y, z, roll, pitch, yaw], indexed by \
- :class:`~d123.geometry.StateSE3Index`.
+ :class:`~d123.geometry.geometry_index.StateSE3Index`.
+ :param copy: Whether to copy the input array. Defaults to True.
:return: A StateSE3 instance.
"""
assert array.ndim == 1
assert array.shape[0] == len(StateSE3Index)
- return StateSE3(
- array[StateSE3Index.X],
- array[StateSE3Index.Y],
- array[StateSE3Index.Z],
- array[StateSE3Index.ROLL],
- array[StateSE3Index.PITCH],
- array[StateSE3Index.YAW],
- )
+ instance = object.__new__(cls)
+ object.__setattr__(instance, "_array", array.copy() if copy else array)
+ return instance
@classmethod
def from_transformation_matrix(cls, array: npt.NDArray[np.float64]) -> StateSE3:
@@ -119,29 +150,73 @@ def from_transformation_matrix(cls, array: npt.NDArray[np.float64]) -> StateSE3:
assert array.shape == (4, 4)
translation = array[:3, 3]
rotation = array[:3, :3]
+ roll, pitch, yaw = EulerAngles.from_rotation_matrix(rotation)
+
return StateSE3(
- x=translation[0],
- y=translation[1],
- z=translation[2],
- roll=np.arctan2(rotation[2, 1], rotation[2, 2]),
- pitch=np.arctan2(-rotation[2, 0], np.sqrt(rotation[2, 1] ** 2 + rotation[2, 2] ** 2)),
- yaw=np.arctan2(rotation[1, 0], rotation[0, 0]),
+ x=translation[Point3DIndex.X],
+ y=translation[Point3DIndex.Y],
+ z=translation[Point3DIndex.Z],
+ roll=roll,
+ pitch=pitch,
+ yaw=yaw,
)
+ @property
+ def x(self) -> float:
+ """Returns the x-coordinate of the 3D state.
+
+ :return: The x-coordinate.
+ """
+ return self._array[StateSE3Index.X]
+
+ @property
+ def y(self) -> float:
+ """Returns the y-coordinate of the 3D state.
+
+ :return: The y-coordinate.
+ """
+ return self._array[StateSE3Index.Y]
+
+ @property
+ def z(self) -> float:
+ """Returns the z-coordinate of the 3D state.
+
+ :return: The z-coordinate.
+ """
+ return self._array[StateSE3Index.Z]
+
+ @property
+ def roll(self) -> float:
+ """Returns the roll (x-axis rotation) of the 3D state.
+
+ :return: The roll angle.
+ """
+ return self._array[StateSE3Index.ROLL]
+
+ @property
+ def pitch(self) -> float:
+ """Returns the pitch (y-axis rotation) of the 3D state.
+
+ :return: The pitch angle.
+ """
+ return self._array[StateSE3Index.PITCH]
+
+ @property
+ def yaw(self) -> float:
+ """Returns the yaw (z-axis rotation) of the 3D state.
+
+ :return: The yaw angle.
+ """
+ return self._array[StateSE3Index.YAW]
+
@property
def array(self) -> npt.NDArray[np.float64]:
- """Converts the StateSE3 instance to a numpy array, indexed by StateSE3Index.
+ """Returns the StateSE3 instance as a numpy array.
- :return: A numpy array of shape (6,) containing the state [x, y, z, roll, pitch, yaw].
+ :return: A numpy array of shape (6,), indexed by \
+ :class:`~d123.geometry.geometry_index.StateSE3Index`.
"""
- array = np.zeros(len(StateSE3Index), dtype=np.float64)
- array[StateSE3Index.X] = self.x
- array[StateSE3Index.Y] = self.y
- array[StateSE3Index.Z] = self.z
- array[StateSE3Index.ROLL] = self.roll
- array[StateSE3Index.PITCH] = self.pitch
- array[StateSE3Index.YAW] = self.yaw
- return array
+ return self._array
@property
def state_se2(self) -> StateSE2:
@@ -175,14 +250,6 @@ def shapely_point(self) -> geom.Point:
"""
return self.point_3d.shapely_point
- @property
- def transformation_matrix(self) -> npt.NDArray[np.float64]:
- """Returns the 4x4 transformation matrix representation of the state.
-
- :return: A 4x4 numpy array representing the transformation matrix.
- """
- raise NotImplementedError("Transformation matrix conversion not implemented yet.")
-
@property
def rotation_matrix(self) -> npt.NDArray[np.float64]:
"""Returns the 3x3 rotation matrix representation of the state's orientation.
@@ -191,6 +258,18 @@ def rotation_matrix(self) -> npt.NDArray[np.float64]:
"""
raise NotImplementedError("Rotation matrix conversion not implemented yet.")
+ @property
+ def transformation_matrix(self) -> npt.NDArray[np.float64]:
+ """Returns the 4x4 transformation matrix representation of the state.
+
+ :return: A 4x4 numpy array representing the transformation matrix.
+ """
+ rotation_matrix = self.rotation_matrix
+ transformation_matrix = np.eye(4, dtype=np.float64)
+ transformation_matrix[:3, :3] = rotation_matrix
+ transformation_matrix[3, :3] = self.array[StateSE3Index.XYZ]
+ return transformation_matrix
+
@property
def quaternion(self) -> npt.NDArray[np.float64]:
"""Returns the quaternion (w, x, y, z) representation of the state's orientation.
@@ -216,17 +295,142 @@ def __matmul__(self, other: StateSE3) -> StateSE3:
return StateSE3.from_transformation_matrix(self.transformation_matrix @ other.transformation_matrix)
-@dataclass
class QuaternionSE3:
"""Class representing a quaternion in SE3 space.
TODO: Implement and replace StateSE3.
"""
- x: float
- y: float
- z: float
- qw: float
- qx: float
- qy: float
- qz: float
+ _array: npt.NDArray[np.float64]
+
+ def __init__(self, x: float, y: float, z: float, qw: float, qx: float, qy: float, qz: float):
+ """Initialize QuaternionSE3 with x, y, z, qw, qx, qy, qz coordinates."""
+ array = np.zeros(len(QuaternionSE3Index), dtype=np.float64)
+ array[QuaternionSE3Index.X] = x
+ array[QuaternionSE3Index.Y] = y
+ array[QuaternionSE3Index.Z] = z
+ array[QuaternionSE3Index.QW] = qw
+ array[QuaternionSE3Index.QX] = qx
+ array[QuaternionSE3Index.QY] = qy
+ array[QuaternionSE3Index.QZ] = qz
+ object.__setattr__(self, "_array", array)
+
+ @classmethod
+ def from_array(cls, array: npt.NDArray[np.float64], copy: bool = True) -> QuaternionSE3:
+ """Constructs a QuaternionSE3 from a numpy array.
+
+ :param array: Array of shape (7,), indexed by :class:`~d123.geometry.geometry_index.QuaternionSE3Index`.
+ :param copy: Whether to copy the input array. Defaults to True.
+ :return: A QuaternionSE3 instance.
+ """
+ assert array.ndim == 1
+ assert array.shape[0] == len(QuaternionSE3Index)
+ instance = object.__new__(cls)
+ object.__setattr__(instance, "_array", array.copy() if copy else array)
+ return instance
+
+ @property
+ def x(self) -> float:
+ """Returns the x-coordinate of the quaternion.
+
+ :return: The x-coordinate.
+ """
+ return self._array[QuaternionSE3Index.X]
+
+ @property
+ def y(self) -> float:
+ """Returns the y-coordinate of the quaternion.
+
+ :return: The y-coordinate.
+ """
+ return self._array[QuaternionSE3Index.Y]
+
+ @property
+ def z(self) -> float:
+ """Returns the z-coordinate of the quaternion.
+
+ :return: The z-coordinate.
+ """
+ return self._array[QuaternionSE3Index.Z]
+
+ @property
+ def qw(self) -> float:
+ """Returns the w-coordinate of the quaternion.
+
+ :return: The w-coordinate.
+ """
+ return self._array[QuaternionSE3Index.QW]
+
+ @property
+ def qx(self) -> float:
+ """Returns the x-coordinate of the quaternion.
+
+ :return: The x-coordinate.
+ """
+ return self._array[QuaternionSE3Index.QX]
+
+ @property
+ def qy(self) -> float:
+ """Returns the y-coordinate of the quaternion.
+
+ :return: The y-coordinate.
+ """
+ return self._array[QuaternionSE3Index.QY]
+
+ @property
+ def qz(self) -> float:
+ """Returns the z-coordinate of the quaternion.
+
+ :return: The z-coordinate.
+ """
+ return self._array[QuaternionSE3Index.QZ]
+
+ @property
+ def array(self) -> npt.NDArray[np.float64]:
+ """Converts the QuaternionSE3 instance to a numpy array.
+
+ :return: A numpy array of shape (7,), indexed by :class:`~d123.geometry.geometry_index.QuaternionSE3Index`.
+ """
+ return self._array
+
+ @property
+ def state_se2(self) -> StateSE2:
+ """Returns the quaternion state as a 2D state by ignoring the z-axis.
+
+ :return: A StateSE2 instance representing the 2D projection of the 3D state.
+ """
+ # Convert quaternion to yaw angle
+ yaw = self.quaternion.euler_angles.yaw
+ return StateSE2(self.x, self.y, yaw)
+
+ @property
+ def point_3d(self) -> Point3D:
+ """Returns the 3D point representation of the state.
+
+ :return: A Point3D instance representing the 3D point.
+ """
+ return Point3D(self.x, self.y, self.z)
+
+ @property
+ def point_2d(self) -> Point2D:
+ """Returns the 2D point representation of the state.
+
+ :return: A Point2D instance representing the 2D point.
+ """
+ return Point2D(self.x, self.y)
+
+ @property
+ def shapely_point(self) -> geom.Point:
+ """Returns the Shapely point representation of the state.
+
+ :return: A Shapely Point instance representing the 3D point.
+ """
+ return self.point_3d.shapely_point
+
+ @cached_property
+ def quaternion(self) -> Quaternion:
+ """Returns the quaternion (w, x, y, z) representation of the state's orientation.
+
+ :return: A Quaternion instance representing the quaternion.
+ """
+ return Quaternion.from_array(self.array[QuaternionSE3Index.QUATERNION])
diff --git a/d123/geometry/test/__init__.py b/d123/geometry/test/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/d123/geometry/test/test_point.py b/d123/geometry/test/test_point.py
new file mode 100644
index 00000000..d3141bbb
--- /dev/null
+++ b/d123/geometry/test/test_point.py
@@ -0,0 +1,203 @@
+import unittest
+from unittest.mock import MagicMock, patch
+
+import numpy as np
+
+from d123.geometry import Point2D, Point2DIndex
+from d123.geometry.geometry_index import Point3DIndex
+from d123.geometry.point import Point3D
+
+# Point3D, Point3DIndex
+
+
+class TestPoint2D(unittest.TestCase):
+ """Unit tests for Point2D class."""
+
+ def setUp(self):
+ """Set up test fixtures."""
+ self.x_coord = 3.5
+ self.y_coord = 4.2
+ self.point = Point2D(x=self.x_coord, y=self.y_coord)
+ self.test_array = np.zeros([2], dtype=np.float64)
+ self.test_array[Point2DIndex.X] = self.x_coord
+ self.test_array[Point2DIndex.Y] = self.y_coord
+
+ def test_init(self):
+ """Test Point2D initialization."""
+ point = Point2D(1.0, 2.0)
+ self.assertEqual(point.x, 1.0)
+ self.assertEqual(point.y, 2.0)
+
+ def test_from_array_valid(self):
+ """Test from_array class method with valid input."""
+ # Mock Point2DIndex enum values
+ point = Point2D.from_array(self.test_array)
+ self.assertEqual(point.x, self.x_coord)
+ self.assertEqual(point.y, self.y_coord)
+
+ def test_from_array_invalid_dimensions(self):
+ """Test from_array with invalid array dimensions."""
+ # 2D array should raise assertion error
+ array_2d = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float64)
+ with self.assertRaises(AssertionError):
+ Point2D.from_array(array_2d)
+
+ # 3D array should raise assertion error
+ array_3d = np.array([[[1.0]]], dtype=np.float64)
+ with self.assertRaises(AssertionError):
+ Point2D.from_array(array_3d)
+
+ def test_from_array_invalid_shape(self):
+ """Test from_array with invalid array shape."""
+
+ array_wrong_length = np.array([1.0, 2.0, 3.0], dtype=np.float64)
+ with self.assertRaises(AssertionError):
+ Point2D.from_array(array_wrong_length)
+
+ # Empty array
+ empty_array = np.array([], dtype=np.float64)
+ with self.assertRaises(AssertionError):
+ Point2D.from_array(empty_array)
+
+ def test_array_property(self):
+ """Test the array property."""
+ expected_array = np.array([self.x_coord, self.y_coord], dtype=np.float64)
+ np.testing.assert_array_equal(self.point.array, expected_array)
+ self.assertEqual(self.point.array.dtype, np.float64)
+ self.assertEqual(self.point.array.shape, (2,))
+
+ def test_array_like(self):
+ """Test the __array__ behavior."""
+ expected_array = np.array([self.x_coord, self.y_coord], dtype=np.float32)
+ output_array = np.array(self.point, dtype=np.float32)
+ np.testing.assert_array_equal(output_array, expected_array)
+ self.assertEqual(output_array.dtype, np.float32)
+ self.assertEqual(output_array.shape, (2,))
+
+ def test_shapely_point_property(self):
+ """Test the shapely_point property."""
+ with patch("shapely.geometry.Point") as mock_point:
+ mock_point_instance = MagicMock()
+ mock_point.return_value = mock_point_instance
+
+ result = self.point.shapely_point
+
+ mock_point.assert_called_once_with(self.x_coord, self.y_coord)
+ self.assertEqual(result, mock_point_instance)
+
+ def test_iter(self):
+ """Test the __iter__ method."""
+ coords = list(self.point)
+ self.assertEqual(coords, [self.x_coord, self.y_coord])
+
+ # Test that it's actually iterable
+ x, y = self.point
+ self.assertEqual(x, self.x_coord)
+ self.assertEqual(y, self.y_coord)
+
+ def test_hash(self):
+ """Test the __hash__ method."""
+ point_dict = {self.point: "test"}
+ self.assertIn(self.point, point_dict)
+ self.assertEqual(point_dict[self.point], "test")
+
+
+class TestPoint3D(unittest.TestCase):
+ """Unit tests for Point3D class."""
+
+ def setUp(self):
+ """Set up test fixtures."""
+ self.x_coord = 3.5
+ self.y_coord = 4.2
+ self.z_coord = 5.1
+ self.point = Point3D(self.x_coord, self.y_coord, self.z_coord)
+ self.test_array = np.zeros((3,), dtype=np.float64)
+ self.test_array[Point3DIndex.X] = self.x_coord
+ self.test_array[Point3DIndex.Y] = self.y_coord
+ self.test_array[Point3DIndex.Z] = self.z_coord
+
+ def test_init(self):
+ """Test Point3D initialization."""
+ point = Point3D(1.0, 2.0, 3.0)
+ self.assertEqual(point.x, 1.0)
+ self.assertEqual(point.y, 2.0)
+ self.assertEqual(point.z, 3.0)
+
+ def test_from_array_valid(self):
+ """Test from_array class method with valid input."""
+ # Mock Point3DIndex enum values
+ point = Point3D.from_array(self.test_array)
+ self.assertEqual(point.x, self.x_coord)
+ self.assertEqual(point.y, self.y_coord)
+ self.assertEqual(point.z, self.z_coord)
+
+ def test_from_array_invalid_dimensions(self):
+ """Test from_array with invalid array dimensions."""
+ # 2D array should raise assertion error
+ array_2d = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], dtype=np.float64)
+ with self.assertRaises(AssertionError):
+ Point3D.from_array(array_2d)
+
+ # 3D array should raise assertion error
+ array_3d = np.array([[[1.0]]], dtype=np.float64)
+ with self.assertRaises(AssertionError):
+ Point3D.from_array(array_3d)
+
+ def test_from_array_invalid_shape(self):
+ """Test from_array with invalid array shape."""
+
+ array_wrong_length = np.array([1.0, 2.0], dtype=np.float64)
+ with self.assertRaises(AssertionError):
+ Point3D.from_array(array_wrong_length)
+
+ # Empty array
+ empty_array = np.array([], dtype=np.float64)
+ with self.assertRaises(AssertionError):
+ Point3D.from_array(empty_array)
+
+ def test_array_property(self):
+ """Test the array property."""
+ expected_array = np.array([self.x_coord, self.y_coord, self.z_coord], dtype=np.float64)
+ np.testing.assert_array_equal(self.point.array, expected_array)
+ self.assertEqual(self.point.array.dtype, np.float64)
+ self.assertEqual(self.point.array.shape, (3,))
+
+ def test_array_like(self):
+ """Test the __array__ behavior."""
+ expected_array = np.array([self.x_coord, self.y_coord, self.z_coord], dtype=np.float32)
+ output_array = np.array(self.point, dtype=np.float32)
+ np.testing.assert_array_equal(output_array, expected_array)
+ self.assertEqual(output_array.dtype, np.float32)
+ self.assertEqual(output_array.shape, (3,))
+
+ def test_shapely_point_property(self):
+ """Test the shapely_point property."""
+ with patch("shapely.geometry.Point") as mock_point:
+ mock_point_instance = MagicMock()
+ mock_point.return_value = mock_point_instance
+
+ result = self.point.shapely_point
+
+ mock_point.assert_called_once_with(self.x_coord, self.y_coord, self.z_coord)
+ self.assertEqual(result, mock_point_instance)
+
+ def test_iter(self):
+ """Test the __iter__ method."""
+ coords = list(self.point)
+ self.assertEqual(coords, [self.x_coord, self.y_coord, self.z_coord])
+
+ # Test that it's actually iterable
+ x, y, z = self.point
+ self.assertEqual(x, self.x_coord)
+ self.assertEqual(y, self.y_coord)
+ self.assertEqual(z, self.z_coord)
+
+ def test_hash(self):
+ """Test the __hash__ method."""
+ point_dict = {self.point: "test"}
+ self.assertIn(self.point, point_dict)
+ self.assertEqual(point_dict[self.point], "test")
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/d123/geometry/test/test_polyline.py b/d123/geometry/test/test_polyline.py
new file mode 100644
index 00000000..e4103364
--- /dev/null
+++ b/d123/geometry/test/test_polyline.py
@@ -0,0 +1,326 @@
+import unittest
+
+import numpy as np
+import shapely.geometry as geom
+
+from d123.geometry.point import Point2D, Point3D
+from d123.geometry.polyline import Polyline2D, Polyline3D, PolylineSE2
+from d123.geometry.se import StateSE2
+
+
+class TestPolyline2D(unittest.TestCase):
+ """Test class for Polyline2D."""
+
+ def test_from_linestring(self):
+ """Test creating Polyline2D from LineString."""
+ coords = [(0.0, 0.0), (1.0, 1.0), (2.0, 0.0)]
+ linestring = geom.LineString(coords)
+ polyline = Polyline2D.from_linestring(linestring)
+ self.assertIsInstance(polyline, Polyline2D)
+ self.assertTrue(polyline.linestring.equals(linestring))
+
+ def test_from_linestring_with_z(self):
+ """Test creating Polyline2D from LineString with Z coordinates."""
+ coords = [(0.0, 0.0, 1.0), (1.0, 1.0, 2.0), (2.0, 0.0, 3.0)]
+ linestring = geom.LineString(coords)
+ polyline = Polyline2D.from_linestring(linestring)
+ self.assertIsInstance(polyline, Polyline2D)
+ self.assertFalse(polyline.linestring.has_z)
+
+ def test_from_array_2d(self):
+ """Test creating Polyline2D from 2D array."""
+ array = np.array([[0.0, 0.0], [1.0, 1.0], [2.0, 0.0]], dtype=np.float32)
+ polyline = Polyline2D.from_array(array)
+ self.assertIsInstance(polyline, Polyline2D)
+ np.testing.assert_array_almost_equal(polyline.array, array)
+
+ def test_from_array_3d(self):
+ """Test creating Polyline2D from 3D array."""
+ array = np.array([[0.0, 0.0, 1.0], [1.0, 1.0, 2.0], [2.0, 0.0, 3.0]], dtype=np.float32)
+ polyline = Polyline2D.from_array(array)
+ self.assertIsInstance(polyline, Polyline2D)
+ expected = array[:, :2]
+ np.testing.assert_array_almost_equal(polyline.array, expected)
+
+ def test_from_array_invalid_shape(self):
+ """Test creating Polyline2D from invalid array shape."""
+ array = np.array([[0.0], [1.0], [2.0]], dtype=np.float32)
+ with self.assertRaises(ValueError):
+ Polyline2D.from_array(array)
+
+ def test_array_property(self):
+ """Test array property."""
+ coords = [(0.0, 0.0), (1.0, 1.0), (2.0, 0.0)]
+ linestring = geom.LineString(coords)
+ polyline = Polyline2D.from_linestring(linestring)
+ array = polyline.array
+ self.assertEqual(array.shape, (3, 2))
+ self.assertEqual(array.dtype, np.float64)
+ np.testing.assert_array_almost_equal(array, coords)
+
+ def test_length_property(self):
+ """Test length property."""
+ coords = [(0.0, 0.0), (1.0, 0.0), (2.0, 0.0)]
+ linestring = geom.LineString(coords)
+ polyline = Polyline2D.from_linestring(linestring)
+ self.assertEqual(polyline.length, 2.0)
+
+ def test_interpolate_single_distance(self):
+ """Test interpolation with single distance."""
+ coords = [(0.0, 0.0), (2.0, 0.0)]
+ linestring = geom.LineString(coords)
+ polyline = Polyline2D.from_linestring(linestring)
+ point = polyline.interpolate(1.0)
+ self.assertIsInstance(point, Point2D)
+ self.assertEqual(point.x, 1.0)
+ self.assertEqual(point.y, 0.0)
+
+ def test_interpolate_multiple_distances(self):
+ """Test interpolation with multiple distances."""
+ coords = [(0.0, 0.0), (2.0, 0.0)]
+ linestring = geom.LineString(coords)
+ polyline = Polyline2D.from_linestring(linestring)
+ points = polyline.interpolate(np.array([0.0, 1.0, 2.0]))
+ self.assertIsInstance(points, np.ndarray)
+ self.assertEqual(points.shape, (3, 2))
+ expected = np.array([[0.0, 0.0], [1.0, 0.0], [2.0, 0.0]])
+ np.testing.assert_array_almost_equal(points, expected)
+
+ def test_interpolate_normalized(self):
+ """Test normalized interpolation."""
+ coords = [(0.0, 0.0), (2.0, 0.0)]
+ linestring = geom.LineString(coords)
+ polyline = Polyline2D.from_linestring(linestring)
+ point = polyline.interpolate(0.5, normalized=True)
+ self.assertIsInstance(point, Point2D)
+ self.assertEqual(point.x, 1.0)
+ self.assertEqual(point.y, 0.0)
+
+ def test_project_point2d(self):
+ """Test projecting Point2D onto polyline."""
+ coords = [(0.0, 0.0), (2.0, 0.0)]
+ linestring = geom.LineString(coords)
+ polyline = Polyline2D.from_linestring(linestring)
+ point = Point2D(1.0, 1.0)
+ distance = polyline.project(point)
+ self.assertEqual(distance, 1.0)
+
+ def test_project_statese2(self):
+ """Test projecting StateSE2 onto polyline."""
+ coords = [(0.0, 0.0), (2.0, 0.0)]
+ linestring = geom.LineString(coords)
+ polyline = Polyline2D.from_linestring(linestring)
+ state = StateSE2(1.0, 1.0, 0.0)
+ distance = polyline.project(state)
+ self.assertEqual(distance, 1.0)
+
+ def test_polyline_se2_property(self):
+ """Test polyline_se2 property."""
+ coords = [(0.0, 0.0), (1.0, 0.0), (2.0, 0.0)]
+ linestring = geom.LineString(coords)
+ polyline = Polyline2D.from_linestring(linestring)
+ polyline_se2 = polyline.polyline_se2
+ self.assertIsInstance(polyline_se2, PolylineSE2)
+
+
+class TestPolylineSE2(unittest.TestCase):
+ """Test class for PolylineSE2."""
+
+ def test_from_linestring(self):
+ """Test creating PolylineSE2 from LineString."""
+ coords = [(0.0, 0.0), (1.0, 0.0), (2.0, 0.0)]
+ linestring = geom.LineString(coords)
+ polyline = PolylineSE2.from_linestring(linestring)
+ self.assertIsInstance(polyline, PolylineSE2)
+ self.assertEqual(polyline.se2_array.shape, (3, 3))
+
+ def test_from_array_2d(self):
+ """Test creating PolylineSE2 from 2D array."""
+ array = np.array([[0.0, 0.0], [1.0, 0.0], [2.0, 0.0]], dtype=np.float32)
+ polyline = PolylineSE2.from_array(array)
+ self.assertIsInstance(polyline, PolylineSE2)
+ self.assertEqual(polyline.se2_array.shape, (3, 3))
+
+ def test_from_array_se2(self):
+ """Test creating PolylineSE2 from SE2 array."""
+ array = np.array([[0.0, 0.0, 0.0], [1.0, 0.0, 0.0], [2.0, 0.0, 0.0]], dtype=np.float32)
+ polyline = PolylineSE2.from_array(array)
+ self.assertIsInstance(polyline, PolylineSE2)
+ np.testing.assert_array_almost_equal(polyline.se2_array, array)
+
+ def test_from_array_invalid_shape(self):
+ """Test creating PolylineSE2 from invalid array shape."""
+ array = np.array([[0.0], [1.0], [2.0]], dtype=np.float32)
+ with self.assertRaises(ValueError):
+ PolylineSE2.from_array(array)
+
+ def test_from_discrete_se2(self):
+ """Test creating PolylineSE2 from discrete SE2 states."""
+ states = [StateSE2(0.0, 0.0, 0.0), StateSE2(1.0, 0.0, 0.0), StateSE2(2.0, 0.0, 0.0)]
+ polyline = PolylineSE2.from_discrete_se2(states)
+ self.assertIsInstance(polyline, PolylineSE2)
+ self.assertEqual(polyline.se2_array.shape, (3, 3))
+
+ def test_length_property(self):
+ """Test length property."""
+ array = np.array([[0.0, 0.0, 0.0], [1.0, 0.0, 0.0], [2.0, 0.0, 0.0]], dtype=np.float64)
+ polyline = PolylineSE2.from_array(array)
+ self.assertEqual(polyline.length, 2.0)
+
+ def test_interpolate_single_distance(self):
+ """Test interpolation with single distance."""
+ array = np.array([[0.0, 0.0, 0.0], [2.0, 0.0, 0.0]], dtype=np.float64)
+ polyline = PolylineSE2.from_array(array)
+ state = polyline.interpolate(1.0)
+ self.assertIsInstance(state, StateSE2)
+ self.assertEqual(state.x, 1.0)
+ self.assertEqual(state.y, 0.0)
+
+ def test_interpolate_multiple_distances(self):
+ """Test interpolation with multiple distances."""
+ array = np.array([[0.0, 0.0, 0.0], [2.0, 0.0, 0.0]], dtype=np.float64)
+ polyline = PolylineSE2.from_array(array)
+ states = polyline.interpolate(np.array([0.0, 1.0, 2.0]))
+ self.assertIsInstance(states, np.ndarray)
+ self.assertEqual(states.shape, (3, 3))
+
+ def test_interpolate_normalized(self):
+ """Test normalized interpolation."""
+ array = np.array([[0.0, 0.0, 0.0], [2.0, 0.0, 0.0]], dtype=np.float64)
+ polyline = PolylineSE2.from_array(array)
+ state = polyline.interpolate(0.5, normalized=True)
+ self.assertIsInstance(state, StateSE2)
+ self.assertEqual(state.x, 1.0)
+ self.assertEqual(state.y, 0.0)
+
+ def test_project_point2d(self):
+ """Test projecting Point2D onto SE2 polyline."""
+ array = np.array([[0.0, 0.0, 0.0], [2.0, 0.0, 0.0]], dtype=np.float64)
+ polyline = PolylineSE2.from_array(array)
+ point = Point2D(1.0, 1.0)
+ distance = polyline.project(point)
+ self.assertEqual(distance, 1.0)
+
+ def test_project_statese2(self):
+ """Test projecting StateSE2 onto SE2 polyline."""
+ array = np.array([[0.0, 0.0, 0.0], [2.0, 0.0, 0.0]], dtype=np.float64)
+ polyline = PolylineSE2.from_array(array)
+ state = StateSE2(1.0, 1.0, 0.0)
+ distance = polyline.project(state)
+ self.assertEqual(distance, 1.0)
+
+
+class TestPolyline3D(unittest.TestCase):
+ """Test class for Polyline3D."""
+
+ def test_from_linestring_with_z(self):
+ """Test creating Polyline3D from LineString with Z coordinates."""
+ coords = [(0.0, 0.0, 1.0), (1.0, 1.0, 2.0), (2.0, 0.0, 3.0)]
+ linestring = geom.LineString(coords)
+ polyline = Polyline3D.from_linestring(linestring)
+ self.assertIsInstance(polyline, Polyline3D)
+ self.assertTrue(polyline.linestring.has_z)
+
+ def test_from_linestring_without_z(self):
+ """Test creating Polyline3D from LineString without Z coordinates."""
+ coords = [(0.0, 0.0), (1.0, 1.0), (2.0, 0.0)]
+ linestring = geom.LineString(coords)
+ polyline = Polyline3D.from_linestring(linestring)
+ self.assertIsInstance(polyline, Polyline3D)
+ self.assertTrue(polyline.linestring.has_z)
+
+ def test_from_array(self):
+ """Test creating Polyline3D from 3D array."""
+ array = np.array([[0.0, 0.0, 1.0], [1.0, 1.0, 2.0], [2.0, 0.0, 3.0]], dtype=np.float64)
+ polyline = Polyline3D.from_array(array)
+ self.assertIsInstance(polyline, Polyline3D)
+ np.testing.assert_array_almost_equal(polyline.array, array)
+
+ def test_from_array_invalid_shape(self):
+ """Test creating Polyline3D from invalid array shape."""
+ array = np.array([[0.0, 0.0], [1.0, 1.0]], dtype=np.float64)
+ with self.assertRaises(AssertionError):
+ Polyline3D.from_array(array)
+
+ def test_array_property(self):
+ """Test array property."""
+ coords = [(0.0, 0.0, 1.0), (1.0, 1.0, 2.0), (2.0, 0.0, 3.0)]
+ linestring = geom.LineString(coords)
+ polyline = Polyline3D.from_linestring(linestring)
+ array = polyline.array
+ self.assertEqual(array.shape, (3, 3))
+ self.assertEqual(array.dtype, np.float64)
+ np.testing.assert_array_almost_equal(array, coords)
+
+ def test_polyline_2d_property(self):
+ """Test polyline_2d property."""
+ coords = [(0.0, 0.0, 1.0), (1.0, 1.0, 2.0), (2.0, 0.0, 3.0)]
+ linestring = geom.LineString(coords)
+ polyline = Polyline3D.from_linestring(linestring)
+ polyline_2d = polyline.polyline_2d
+ self.assertIsInstance(polyline_2d, Polyline2D)
+ self.assertFalse(polyline_2d.linestring.has_z)
+
+ def test_polyline_se2_property(self):
+ """Test polyline_se2 property."""
+ coords = [(0.0, 0.0, 1.0), (1.0, 0.0, 2.0), (2.0, 0.0, 3.0)]
+ linestring = geom.LineString(coords)
+ polyline = Polyline3D.from_linestring(linestring)
+ polyline_se2 = polyline.polyline_se2
+ self.assertIsInstance(polyline_se2, PolylineSE2)
+
+ def test_length_property(self):
+ """Test length property."""
+ coords = [(0.0, 0.0, 0.0), (1.0, 0.0, 0.0), (2.0, 0.0, 0.0)]
+ linestring = geom.LineString(coords)
+ polyline = Polyline3D.from_linestring(linestring)
+ self.assertEqual(polyline.length, 2.0)
+
+ def test_interpolate_single_distance(self):
+ """Test interpolation with single distance."""
+ coords = [(0.0, 0.0, 0.0), (2.0, 0.0, 2.0)]
+ linestring = geom.LineString(coords)
+ polyline = Polyline3D.from_linestring(linestring)
+ point = polyline.interpolate(1.0)
+ self.assertIsInstance(point, Point3D)
+ self.assertEqual(point.x, 1.0)
+ self.assertEqual(point.y, 0.0)
+ self.assertEqual(point.z, 1.0)
+
+ def test_interpolate_multiple_distances(self):
+ """Test interpolation with multiple distances."""
+ coords = [(0.0, 0.0, 0.0), (2.0, 0.0, 2.0)]
+ linestring = geom.LineString(coords)
+ polyline = Polyline3D.from_linestring(linestring)
+ points = polyline.interpolate(np.array([0.0, 1.0, 2.0]))
+ self.assertIsInstance(points, np.ndarray)
+ self.assertEqual(points.shape, (3, 3))
+
+ def test_interpolate_normalized(self):
+ """Test normalized interpolation."""
+ coords = [(0.0, 0.0, 0.0), (2.0, 0.0, 2.0)]
+ linestring = geom.LineString(coords)
+ polyline = Polyline3D.from_linestring(linestring)
+ point = polyline.interpolate(0.5, normalized=True)
+ self.assertIsInstance(point, Point3D)
+ self.assertEqual(point.x, 1.0)
+ self.assertEqual(point.y, 0.0)
+ self.assertEqual(point.z, 1.0)
+
+ def test_project_point2d(self):
+ """Test projecting Point2D onto 3D polyline."""
+ coords = [(0.0, 0.0, 0.0), (2.0, 0.0, 0.0)]
+ linestring = geom.LineString(coords)
+ polyline = Polyline3D.from_linestring(linestring)
+ point = Point2D(1.0, 1.0)
+ distance = polyline.project(point)
+ self.assertEqual(distance, 1.0)
+
+ def test_project_point3d(self):
+ """Test projecting Point3D onto 3D polyline."""
+ coords = [(0.0, 0.0, 0.0), (2.0, 0.0, 0.0)]
+ linestring = geom.LineString(coords)
+ polyline = Polyline3D.from_linestring(linestring)
+ point = Point3D(1.0, 1.0, 1.0)
+ distance = polyline.project(point)
+ self.assertEqual(distance, 1.0)
diff --git a/d123/geometry/test/test_rotation.py b/d123/geometry/test/test_rotation.py
new file mode 100644
index 00000000..65f902d9
--- /dev/null
+++ b/d123/geometry/test/test_rotation.py
@@ -0,0 +1,240 @@
+import unittest
+
+import numpy as np
+
+from d123.geometry.geometry_index import EulerAnglesIndex, QuaternionIndex
+from d123.geometry.rotation import EulerAngles, Quaternion
+
+
+class TestEulerAngles(unittest.TestCase):
+ """Unit tests for EulerAngles class."""
+
+ def setUp(self):
+ """Set up test fixtures."""
+ self.roll = 0.1
+ self.pitch = 0.2
+ self.yaw = 0.3
+ self.euler_angles = EulerAngles(self.roll, self.pitch, self.yaw)
+ self.test_array = np.zeros([3], dtype=np.float64)
+ self.test_array[EulerAnglesIndex.ROLL] = self.roll
+ self.test_array[EulerAnglesIndex.PITCH] = self.pitch
+ self.test_array[EulerAnglesIndex.YAW] = self.yaw
+
+ def test_init(self):
+ """Test EulerAngles initialization."""
+ euler = EulerAngles(roll=0.1, pitch=0.2, yaw=0.3)
+ self.assertEqual(euler.roll, 0.1)
+ self.assertEqual(euler.pitch, 0.2)
+ self.assertEqual(euler.yaw, 0.3)
+
+ def test_from_array_valid(self):
+ """Test from_array class method with valid input."""
+ euler = EulerAngles.from_array(self.test_array)
+ self.assertIsInstance(euler, EulerAngles)
+ self.assertAlmostEqual(euler.roll, self.roll)
+ self.assertAlmostEqual(euler.pitch, self.pitch)
+ self.assertAlmostEqual(euler.yaw, self.yaw)
+
+ def test_from_array_invalid_shape(self):
+ """Test from_array with invalid array shape."""
+ with self.assertRaises(AssertionError):
+ EulerAngles.from_array(np.array([1, 2]))
+ with self.assertRaises(AssertionError):
+ EulerAngles.from_array(np.array([[1, 2, 3]]))
+
+ def test_from_array_copy(self):
+ """Test from_array with copy parameter."""
+ original_array = self.test_array.copy()
+ euler_copy = EulerAngles.from_array(original_array, copy=True)
+ euler_no_copy = EulerAngles.from_array(original_array, copy=False)
+
+ original_array[0] = 999.0
+ self.assertNotEqual(euler_copy.roll, 999.0)
+ self.assertEqual(euler_no_copy.roll, 999.0)
+
+ def test_from_rotation_matrix(self):
+ """Test from_rotation_matrix class method."""
+ identity_matrix = np.eye(3)
+ euler = EulerAngles.from_rotation_matrix(identity_matrix)
+ self.assertAlmostEqual(euler.roll, 0.0, places=10)
+ self.assertAlmostEqual(euler.pitch, 0.0, places=10)
+ self.assertAlmostEqual(euler.yaw, 0.0, places=10)
+
+ def test_from_rotation_matrix_invalid(self):
+ """Test from_rotation_matrix with invalid input."""
+ with self.assertRaises(AssertionError):
+ EulerAngles.from_rotation_matrix(np.array([[1, 2]]))
+ with self.assertRaises(AssertionError):
+ EulerAngles.from_rotation_matrix(np.array([1, 2, 3]))
+
+ def test_array_property(self):
+ """Test array property."""
+ array = self.euler_angles.array
+ self.assertEqual(array.shape, (3,))
+ self.assertEqual(array[EulerAnglesIndex.ROLL], self.roll)
+ self.assertEqual(array[EulerAnglesIndex.PITCH], self.pitch)
+ self.assertEqual(array[EulerAnglesIndex.YAW], self.yaw)
+
+ def test_iterator(self):
+ """Test iterator functionality."""
+ values = list(self.euler_angles)
+ self.assertEqual(values, [self.roll, self.pitch, self.yaw])
+
+ def test_hash(self):
+ """Test hash functionality."""
+ euler1 = EulerAngles(0.1, 0.2, 0.3)
+ euler2 = EulerAngles(0.1, 0.2, 0.3)
+ euler3 = EulerAngles(0.1, 0.2, 0.4)
+
+ self.assertEqual(hash(euler1), hash(euler2))
+ self.assertNotEqual(hash(euler1), hash(euler3))
+
+
+class TestQuaternion(unittest.TestCase):
+ """Unit tests for Quaternion class."""
+
+ def setUp(self):
+ """Set up test fixtures."""
+ self.qw = 1.0
+ self.qx = 0.0
+ self.qy = 0.0
+ self.qz = 0.0
+ self.quaternion = Quaternion(self.qw, self.qx, self.qy, self.qz)
+ self.test_array = np.zeros([4], dtype=np.float64)
+ self.test_array[QuaternionIndex.QW] = self.qw
+ self.test_array[QuaternionIndex.QX] = self.qx
+ self.test_array[QuaternionIndex.QY] = self.qy
+ self.test_array[QuaternionIndex.QZ] = self.qz
+
+ def test_init(self):
+ """Test Quaternion initialization."""
+ quat = Quaternion(1.0, 0.0, 0.0, 0.0)
+ self.assertEqual(quat.qw, 1.0)
+ self.assertEqual(quat.qx, 0.0)
+ self.assertEqual(quat.qy, 0.0)
+ self.assertEqual(quat.qz, 0.0)
+
+ def test_from_array_valid(self):
+ """Test from_array class method with valid input."""
+ quat = Quaternion.from_array(self.test_array)
+ self.assertAlmostEqual(quat.qw, self.qw)
+ self.assertAlmostEqual(quat.qx, self.qx)
+ self.assertAlmostEqual(quat.qy, self.qy)
+ self.assertAlmostEqual(quat.qz, self.qz)
+
+ def test_from_array_invalid_shape(self):
+ """Test from_array with invalid array shape."""
+ with self.assertRaises(AssertionError):
+ Quaternion.from_array(np.array([1, 2, 3]))
+ with self.assertRaises(AssertionError):
+ Quaternion.from_array(np.array([[1, 2, 3, 4]]))
+
+ def test_from_array_copy(self):
+ """Test from_array with copy parameter."""
+ original_array = self.test_array.copy()
+ quat_copy = Quaternion.from_array(original_array, copy=True)
+ quat_no_copy = Quaternion.from_array(original_array, copy=False)
+
+ original_array[0] = 999.0
+ self.assertNotEqual(quat_copy.qw, 999.0)
+ self.assertEqual(quat_no_copy.qw, 999.0)
+
+ def test_from_rotation_matrix(self):
+ """Test from_rotation_matrix class method."""
+ identity_matrix = np.eye(3)
+ quat = Quaternion.from_rotation_matrix(identity_matrix)
+ self.assertAlmostEqual(quat.qw, 1.0, places=10)
+ self.assertAlmostEqual(quat.qx, 0.0, places=10)
+ self.assertAlmostEqual(quat.qy, 0.0, places=10)
+ self.assertAlmostEqual(quat.qz, 0.0, places=10)
+
+ def test_from_rotation_matrix_invalid(self):
+ """Test from_rotation_matrix with invalid input."""
+ with self.assertRaises(AssertionError):
+ Quaternion.from_rotation_matrix(np.array([[1, 2]]))
+ with self.assertRaises(AssertionError):
+ Quaternion.from_rotation_matrix(np.array([1, 2, 3]))
+
+ def test_from_euler_angles(self):
+ """Test from_euler_angles class method."""
+ euler = EulerAngles(0.0, 0.0, 0.0)
+ quat = Quaternion.from_euler_angles(euler)
+ self.assertAlmostEqual(quat.qw, 1.0, places=10)
+ self.assertAlmostEqual(quat.qx, 0.0, places=10)
+ self.assertAlmostEqual(quat.qy, 0.0, places=10)
+ self.assertAlmostEqual(quat.qz, 0.0, places=10)
+
+ def test_array_property(self):
+ """Test array property."""
+ array = self.quaternion.array
+ self.assertEqual(array.shape, (4,))
+ np.testing.assert_array_equal(array, self.test_array)
+
+ def test_pyquaternion_property(self):
+ """Test pyquaternion property."""
+ pyquat = self.quaternion.pyquaternion
+ self.assertEqual(pyquat.w, self.qw)
+ self.assertEqual(pyquat.x, self.qx)
+ self.assertEqual(pyquat.y, self.qy)
+ self.assertEqual(pyquat.z, self.qz)
+
+ def test_euler_angles_property(self):
+ """Test euler_angles property."""
+ euler = self.quaternion.euler_angles
+ self.assertIsInstance(euler, EulerAngles)
+ self.assertAlmostEqual(euler.roll, 0.0, places=10)
+ self.assertAlmostEqual(euler.pitch, 0.0, places=10)
+ self.assertAlmostEqual(euler.yaw, 0.0, places=10)
+
+ def test_rotation_matrix_property(self):
+ """Test rotation_matrix property."""
+ rot_matrix = self.quaternion.rotation_matrix
+ self.assertEqual(rot_matrix.shape, (3, 3))
+ np.testing.assert_array_almost_equal(rot_matrix, np.eye(3))
+
+ def test_iterator(self):
+ """Test iterator functionality."""
+ values = list(self.quaternion)
+ self.assertEqual(values, [self.qw, self.qx, self.qy, self.qz])
+
+ def test_hash(self):
+ """Test hash functionality."""
+ quat1 = Quaternion(1.0, 0.0, 0.0, 0.0)
+ quat2 = Quaternion(1.0, 0.0, 0.0, 0.0)
+ quat3 = Quaternion(0.0, 1.0, 0.0, 0.0)
+
+ self.assertEqual(hash(quat1), hash(quat2))
+ self.assertNotEqual(hash(quat1), hash(quat3))
+
+
+class TestRotationConversions(unittest.TestCase):
+ """Test conversions between EulerAngles and Quaternion."""
+
+ def test_euler_to_quaternion_to_euler(self):
+ """Test round-trip conversion from Euler to Quaternion and back."""
+ original_euler = EulerAngles(0.1, 0.2, 0.3)
+ quaternion = Quaternion.from_euler_angles(original_euler)
+ converted_euler = quaternion.euler_angles
+
+ self.assertAlmostEqual(original_euler.roll, converted_euler.roll, places=10)
+ self.assertAlmostEqual(original_euler.pitch, converted_euler.pitch, places=10)
+ self.assertAlmostEqual(original_euler.yaw, converted_euler.yaw, places=10)
+
+ def test_rotation_matrix_consistency(self):
+ """Test that rotation matrix conversions are consistent."""
+ euler = EulerAngles(0.1, 0.2, 0.3)
+ quat = Quaternion.from_euler_angles(euler)
+
+ euler_from_matrix = EulerAngles.from_rotation_matrix(euler.rotation_matrix)
+ quat_from_matrix = Quaternion.from_rotation_matrix(quat.rotation_matrix)
+ self.assertAlmostEqual(euler.roll, euler_from_matrix.roll, places=10)
+ self.assertAlmostEqual(euler.pitch, euler_from_matrix.pitch, places=10)
+ self.assertAlmostEqual(euler.yaw, euler_from_matrix.yaw, places=10)
+ self.assertAlmostEqual(quat.qw, quat_from_matrix.qw, places=10)
+ self.assertAlmostEqual(quat.qx, quat_from_matrix.qx, places=10)
+ self.assertAlmostEqual(quat.qy, quat_from_matrix.qy, places=10)
+ self.assertAlmostEqual(quat.qz, quat_from_matrix.qz, places=10)
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/d123/geometry/test/test_vector.py b/d123/geometry/test/test_vector.py
new file mode 100644
index 00000000..526a2104
--- /dev/null
+++ b/d123/geometry/test/test_vector.py
@@ -0,0 +1,172 @@
+import unittest
+
+import numpy as np
+
+from d123.geometry import Vector2D, Vector2DIndex, Vector3D, Vector3DIndex
+
+
+class TestVector2D(unittest.TestCase):
+ """Unit tests for Vector2D class."""
+
+ def setUp(self):
+ """Set up test fixtures."""
+ self.x_coord = 3.5
+ self.y_coord = 4.2
+ self.vector = Vector2D(x=self.x_coord, y=self.y_coord)
+ self.test_array = np.zeros([2], dtype=np.float64)
+ self.test_array[Vector2DIndex.X] = self.x_coord
+ self.test_array[Vector2DIndex.Y] = self.y_coord
+
+ def test_init(self):
+ """Test Vector2D initialization."""
+ vector = Vector2D(1.0, 2.0)
+ self.assertEqual(vector.x, 1.0)
+ self.assertEqual(vector.y, 2.0)
+
+ def test_from_array_valid(self):
+ """Test from_array class method with valid input."""
+ vector = Vector2D.from_array(self.test_array)
+ self.assertEqual(vector.x, self.x_coord)
+ self.assertEqual(vector.y, self.y_coord)
+
+ def test_from_array_invalid_dimensions(self):
+ """Test from_array with invalid array dimensions."""
+ # 2D array should raise assertion error
+ array_2d = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float64)
+ with self.assertRaises(AssertionError):
+ Vector2D.from_array(array_2d)
+
+ # 3D array should raise assertion error
+ array_3d = np.array([[[1.0]]], dtype=np.float64)
+ with self.assertRaises(AssertionError):
+ Vector2D.from_array(array_3d)
+
+ def test_from_array_invalid_shape(self):
+ """Test from_array with invalid array shape."""
+ array_wrong_length = np.array([1.0, 2.0, 3.0], dtype=np.float64)
+ with self.assertRaises(AssertionError):
+ Vector2D.from_array(array_wrong_length)
+
+ # Empty array
+ empty_array = np.array([], dtype=np.float64)
+ with self.assertRaises(AssertionError):
+ Vector2D.from_array(empty_array)
+
+ def test_array_property(self):
+ """Test the array property."""
+ expected_array = np.array([self.x_coord, self.y_coord], dtype=np.float64)
+ np.testing.assert_array_equal(self.vector.array, expected_array)
+ self.assertEqual(self.vector.array.dtype, np.float64)
+ self.assertEqual(self.vector.array.shape, (2,))
+
+ def test_array_like(self):
+ """Test the __array__ behavior."""
+ expected_array = np.array([self.x_coord, self.y_coord], dtype=np.float32)
+ output_array = np.array(self.vector, dtype=np.float32)
+ np.testing.assert_array_equal(output_array, expected_array)
+ self.assertEqual(output_array.dtype, np.float32)
+ self.assertEqual(output_array.shape, (2,))
+
+ def test_iter(self):
+ """Test the __iter__ method."""
+ coords = list(self.vector)
+ self.assertEqual(coords, [self.x_coord, self.y_coord])
+
+ # Test that it's actually iterable
+ x, y = self.vector
+ self.assertEqual(x, self.x_coord)
+ self.assertEqual(y, self.y_coord)
+
+ def test_hash(self):
+ """Test the __hash__ method."""
+ vector_dict = {self.vector: "test"}
+ self.assertIn(self.vector, vector_dict)
+ self.assertEqual(vector_dict[self.vector], "test")
+
+
+class TestVector3D(unittest.TestCase):
+ """Unit tests for Vector3D class."""
+
+ def setUp(self):
+ """Set up test fixtures."""
+ self.x_coord = 3.5
+ self.y_coord = 4.2
+ self.z_coord = 5.1
+ self.vector = Vector3D(self.x_coord, self.y_coord, self.z_coord)
+ self.test_array = np.zeros((3,), dtype=np.float64)
+ self.test_array[Vector3DIndex.X] = self.x_coord
+ self.test_array[Vector3DIndex.Y] = self.y_coord
+ self.test_array[Vector3DIndex.Z] = self.z_coord
+
+ def test_init(self):
+ """Test Vector3D initialization."""
+ vector = Vector3D(1.0, 2.0, 3.0)
+ self.assertEqual(vector.x, 1.0)
+ self.assertEqual(vector.y, 2.0)
+ self.assertEqual(vector.z, 3.0)
+
+ def test_from_array_valid(self):
+ """Test from_array class method with valid input."""
+ vector = Vector3D.from_array(self.test_array)
+ self.assertEqual(vector.x, self.x_coord)
+ self.assertEqual(vector.y, self.y_coord)
+ self.assertEqual(vector.z, self.z_coord)
+
+ def test_from_array_invalid_dimensions(self):
+ """Test from_array with invalid array dimensions."""
+ # 2D array should raise assertion error
+ array_2d = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], dtype=np.float64)
+ with self.assertRaises(AssertionError):
+ Vector3D.from_array(array_2d)
+
+ # 3D array should raise assertion error
+ array_3d = np.array([[[1.0]]], dtype=np.float64)
+ with self.assertRaises(AssertionError):
+ Vector3D.from_array(array_3d)
+
+ def test_from_array_invalid_shape(self):
+ """Test from_array with invalid array shape."""
+ array_wrong_length = np.array([1.0, 2.0], dtype=np.float64)
+ with self.assertRaises(AssertionError):
+ Vector3D.from_array(array_wrong_length)
+
+ # Empty array
+ empty_array = np.array([], dtype=np.float64)
+ with self.assertRaises(AssertionError):
+ Vector3D.from_array(empty_array)
+
+ def test_array_property(self):
+ """Test the array property."""
+ expected_array = np.array([self.x_coord, self.y_coord, self.z_coord], dtype=np.float64)
+ np.testing.assert_array_equal(self.vector.array, expected_array)
+ self.assertEqual(self.vector.array.dtype, np.float64)
+ self.assertEqual(self.vector.array.shape, (3,))
+
+ def test_array_like(self):
+ """Test the __array__ behavior."""
+ expected_array = np.array([self.x_coord, self.y_coord, self.z_coord], dtype=np.float32)
+ output_array = np.array(self.vector, dtype=np.float32)
+ np.testing.assert_array_equal(output_array, expected_array)
+ self.assertEqual(output_array.dtype, np.float32)
+ self.assertEqual(output_array.shape, (3,))
+
+ def test_iter(self):
+ """Test the __iter__ method."""
+ coords = list(self.vector)
+ self.assertEqual(coords, [self.x_coord, self.y_coord, self.z_coord])
+
+ # Test that it's actually iterable
+ x, y, z = self.vector
+ self.assertEqual(x, self.x_coord)
+ self.assertEqual(y, self.y_coord)
+ self.assertEqual(z, self.z_coord)
+
+ def test_hash(self):
+ """Test the __hash__ method."""
+ vector_dict = {self.vector: "test"}
+ self.assertIn(self.vector, vector_dict)
+ self.assertEqual(vector_dict[self.vector], "test")
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/d123/geometry/utils/bounding_box_utils.py b/d123/geometry/utils/bounding_box_utils.py
index a9c40077..ff8bb237 100644
--- a/d123/geometry/utils/bounding_box_utils.py
+++ b/d123/geometry/utils/bounding_box_utils.py
@@ -29,14 +29,30 @@ def bbse2_array_to_corners_array(bbse2: npt.NDArray[np.float64]) -> npt.NDArray[
half_length = bbse2[..., BoundingBoxSE2Index.LENGTH] / 2.0
half_width = bbse2[..., BoundingBoxSE2Index.WIDTH] / 2.0
- corners_array[..., Corners2DIndex.FRONT_LEFT, :] = translate_along_yaw_array(centers, yaws, half_length, half_width)
+ corners_array[..., Corners2DIndex.FRONT_LEFT, :] = translate_along_yaw_array(
+ centers,
+ yaws,
+ half_length,
+ half_width,
+ )
corners_array[..., Corners2DIndex.FRONT_RIGHT, :] = translate_along_yaw_array(
- centers, yaws, half_length, -half_width
+ centers,
+ yaws,
+ half_length,
+ -half_width,
)
corners_array[..., Corners2DIndex.BACK_RIGHT, :] = translate_along_yaw_array(
- centers, yaws, -half_length, -half_width
+ centers,
+ yaws,
+ -half_length,
+ -half_width,
+ )
+ corners_array[..., Corners2DIndex.BACK_LEFT, :] = translate_along_yaw_array(
+ centers,
+ yaws,
+ -half_length,
+ half_width,
)
- corners_array[..., Corners2DIndex.BACK_LEFT, :] = translate_along_yaw_array(centers, yaws, -half_length, half_width)
return corners_array.squeeze(axis=0) if ndim_one else corners_array
diff --git a/d123/geometry/utils/rotation_utils.py b/d123/geometry/utils/rotation_utils.py
index 4fa2e458..de12fc0f 100644
--- a/d123/geometry/utils/rotation_utils.py
+++ b/d123/geometry/utils/rotation_utils.py
@@ -1,14 +1,57 @@
+from typing import Union
+
import numpy as np
+import numpy.typing as npt
+
+from d123.geometry.geometry_index import EulerAnglesIndex
# TODO: move this somewhere else
# TODO: Maybe rename wrap angle?
# TODO: Add implementation for torch, jax, or whatever else is needed.
-def normalize_angle(angle):
+def normalize_angle(angle: Union[float, npt.NDArray[np.float64]]) -> Union[float, npt.NDArray[np.float64]]:
"""
Map a angle in range [-π, π]
- :param angle: any angle as float
- :return: normalized angle
+ :param angle: any angle as float or array of floats
+ :return: normalized angle or array of normalized angles
"""
- return np.arctan2(np.sin(angle), np.cos(angle))
+ return ((angle + np.pi) % (2 * np.pi)) - np.pi
+
+
+def get_rotation_matrices_from_euler_array(euler_angles_array: npt.NDArray[np.float64]) -> npt.NDArray[np.float64]:
+ assert euler_angles_array.ndim == 2 and euler_angles_array.shape[1] == len(EulerAnglesIndex)
+
+ # Extract roll, pitch, yaw for all samples at once
+ roll = euler_angles_array[:, EulerAnglesIndex.ROLL]
+ pitch = euler_angles_array[:, EulerAnglesIndex.PITCH]
+ yaw = euler_angles_array[:, EulerAnglesIndex.YAW]
+
+ # Compute sin/cos for all angles at once
+ cos_roll, sin_roll = np.cos(roll), np.sin(roll)
+ cos_pitch, sin_pitch = np.cos(pitch), np.sin(pitch)
+ cos_yaw, sin_yaw = np.cos(yaw), np.sin(yaw)
+
+ # Build rotation matrices for entire batch
+ batch_size = euler_angles_array.shape[0]
+ rotation_matrices = np.zeros((batch_size, 3, 3), dtype=np.float64)
+
+ # R_x @ R_y @ R_z components
+ rotation_matrices[:, 0, 0] = cos_pitch * cos_yaw
+ rotation_matrices[:, 0, 1] = -cos_pitch * sin_yaw
+ rotation_matrices[:, 0, 2] = sin_pitch
+
+ rotation_matrices[:, 1, 0] = sin_roll * sin_pitch * cos_yaw + cos_roll * sin_yaw
+ rotation_matrices[:, 1, 1] = -sin_roll * sin_pitch * sin_yaw + cos_roll * cos_yaw
+ rotation_matrices[:, 1, 2] = -sin_roll * cos_pitch
+
+ rotation_matrices[:, 2, 0] = -cos_roll * sin_pitch * cos_yaw + sin_roll * sin_yaw
+ rotation_matrices[:, 2, 1] = cos_roll * sin_pitch * sin_yaw + sin_roll * cos_yaw
+ rotation_matrices[:, 2, 2] = cos_roll * cos_pitch
+
+ return rotation_matrices
+
+
+def get_rotation_matrix_from_euler_array(euler_angles: npt.NDArray[np.float64]) -> npt.NDArray[np.float64]:
+ assert euler_angles.ndim == 1 and euler_angles.shape[0] == len(EulerAnglesIndex)
+ return get_rotation_matrices_from_euler_array(euler_angles[None, :])[0]
diff --git a/d123/geometry/vector.py b/d123/geometry/vector.py
index 3ab2ef51..77a3f67f 100644
--- a/d123/geometry/vector.py
+++ b/d123/geometry/vector.py
@@ -6,11 +6,11 @@
import numpy as np
import numpy.typing as npt
+from d123.common.utils.mixin import ArrayMixin
from d123.geometry.geometry_index import Vector2DIndex, Vector3DIndex
-@dataclass
-class Vector2D:
+class Vector2D(ArrayMixin):
"""
Class to represents 2D vectors, in x, y direction.
@@ -26,21 +26,45 @@ class Vector2D:
5.0
"""
- x: float # [m] x-component of the vector
- y: float # [m] y-component of the vector
- __slots__ = "x", "y"
+ _array: npt.NDArray[np.float64]
+
+ def __init__(self, x: float, y: float):
+ """Initialize Vector2D with x, y components."""
+ array = np.zeros(len(Vector2DIndex), dtype=np.float64)
+ array[Vector2DIndex.X] = x
+ array[Vector2DIndex.Y] = y
+ object.__setattr__(self, "_array", array)
@classmethod
- def from_array(cls, array: npt.NDArray[np.float64]) -> Vector2D:
+ def from_array(cls, array: npt.NDArray[np.float64], copy: bool = True) -> Vector2D:
"""Constructs a Vector2D from a numpy array.
:param array: Array of shape (2,) representing the vector components [x, y], indexed by \
:class:`~d123.geometry.Vector2DIndex`.
+ :param copy: Whether to copy the input array. Defaults to True.
:return: A Vector2D instance.
"""
assert array.ndim == 1
assert array.shape[0] == len(Vector2DIndex)
- return Vector2D(array[Vector2DIndex.X], array[Vector2DIndex.Y])
+ instance = object.__new__(cls)
+ object.__setattr__(instance, "_array", array.copy() if copy else array)
+ return instance
+
+ @property
+ def x(self) -> float:
+ """The x component of the vector.
+
+ :return: The x component of the vector.
+ """
+ return self._array[Vector2DIndex.X]
+
+ @property
+ def y(self) -> float:
+ """The y component of the vector.
+
+ :return: The y component of the vector.
+ """
+ return self._array[Vector2DIndex.Y]
@property
def array(self) -> npt.NDArray[np.float64]:
@@ -112,7 +136,7 @@ def __hash__(self) -> int:
@dataclass
-class Vector3D:
+class Vector3D(ArrayMixin):
"""
Class to represents 3D vectors, in x, y, z direction.
@@ -128,21 +152,53 @@ class Vector3D:
3.7416573867739413
"""
- x: float # [m] x-component of the vector
- y: float # [m] y-component of the vector
- z: float # [m] z-component of the vector
- __slots__ = "x", "y", "z"
+ _array: npt.NDArray[np.float64]
+
+ def __init__(self, x: float, y: float, z: float):
+ """Initialize Vector3D with x, y, z components."""
+ array = np.zeros(len(Vector3DIndex), dtype=np.float64)
+ array[Vector3DIndex.X] = x
+ array[Vector3DIndex.Y] = y
+ array[Vector3DIndex.Z] = z
+ object.__setattr__(self, "_array", array)
@classmethod
- def from_array(cls, array: npt.NDArray[np.float64]) -> Vector3D:
+ def from_array(cls, array: npt.NDArray[np.float64], copy: bool = True) -> Vector3D:
"""Constructs a Vector3D from a numpy array.
:param array: Array of shape (3,), indexed by :class:`~d123.geometry.geometry_index.Vector3DIndex`.
+ :param copy: Whether to copy the input array. Defaults to True.
:return: A Vector3D instance.
"""
assert array.ndim == 1
assert array.shape[0] == len(Vector3DIndex)
- return Vector3D(array[Vector3DIndex.X], array[Vector3DIndex.Y], array[Vector3DIndex.Z])
+ instance = object.__new__(cls)
+ object.__setattr__(instance, "_array", array.copy() if copy else array)
+ return instance
+
+ @property
+ def x(self) -> float:
+ """The x component of the vector.
+
+ :return: The x component of the vector.
+ """
+ return self._array[Vector3DIndex.X]
+
+ @property
+ def y(self) -> float:
+ """The y component of the vector.
+
+ :return: The y component of the vector.
+ """
+ return self._array[Vector3DIndex.Y]
+
+ @property
+ def z(self) -> float:
+ """The z component of the vector.
+
+ :return: The z component of the vector.
+ """
+ return self._array[Vector3DIndex.Z]
@property
def array(self) -> npt.NDArray[np.float64]:
@@ -152,11 +208,7 @@ def array(self) -> npt.NDArray[np.float64]:
:return: A numpy array representing the vector components [x, y, z], indexed by \
:class:`~d123.geometry.geometry_index.Vector3DIndex`.
"""
- array = np.zeros(len(Vector3DIndex), dtype=np.float64)
- array[Vector3DIndex.X] = self.x
- array[Vector3DIndex.Y] = self.y
- array[Vector3DIndex.Z] = self.z
- return array
+ return self._array
@property
def magnitude(self) -> float:
From 7e5d0da7300700ba16b00602db28a450ba98c90c Mon Sep 17 00:00:00 2001
From: DanielDauner
Date: Mon, 25 Aug 2025 17:43:34 +0200
Subject: [PATCH 017/145] Make bounding box array-like. Add tests for bounding
boxes and Occupancy Map (#44)
---
.../waymo_map_utils/womp_boundary_utils.py | 4 +-
d123/geometry/bounding_box.py | 179 +++++++----
d123/geometry/occupancy_map.py | 2 +-
d123/geometry/test/test_bounding_box.py | 215 +++++++++++++
d123/geometry/test/test_occupancy_map.py | 285 ++++++++++++++++++
notebooks/viz/bev_matplotlib.ipynb | 2 +-
notebooks/viz/viser_testing_v2_scene.ipynb | 6 +-
7 files changed, 631 insertions(+), 62 deletions(-)
create mode 100644 d123/geometry/test/test_bounding_box.py
create mode 100644 d123/geometry/test/test_occupancy_map.py
diff --git a/d123/dataset/dataset_specific/wopd/waymo_map_utils/womp_boundary_utils.py b/d123/dataset/dataset_specific/wopd/waymo_map_utils/womp_boundary_utils.py
index 5831ea2a..269b09b6 100644
--- a/d123/dataset/dataset_specific/wopd/waymo_map_utils/womp_boundary_utils.py
+++ b/d123/dataset/dataset_specific/wopd/waymo_map_utils/womp_boundary_utils.py
@@ -67,7 +67,7 @@ def _collect_perpendicular_hits(
perp_end_point = translate_along_yaw(lane_query_se2, Vector2D(0.0, sign * MAX_LANE_WIDTH / 2.0))
perp_linestring = geom.LineString([[perp_start_point.x, perp_start_point.y], [perp_end_point.x, perp_end_point.y]])
- lane_linestring = occupancy_2d.geometries[occupancy_2d.token_to_idx[lane_token]]
+ lane_linestring = occupancy_2d.geometries[occupancy_2d.id_to_idx[lane_token]]
# 1. find intersecting lines, compute 3D distance
intersecting_tokens = occupancy_2d.intersects(perp_linestring)
@@ -75,7 +75,7 @@ def _collect_perpendicular_hits(
perpendicular_hits: List[PerpendicularHit] = []
for intersecting_token in intersecting_tokens:
intersecting_polyline_3d = get_polyline_from_token(polyline_dict, intersecting_token)
- intersecting_linestring = occupancy_2d.geometries[occupancy_2d.token_to_idx[intersecting_token]]
+ intersecting_linestring = occupancy_2d.geometries[occupancy_2d.id_to_idx[intersecting_token]]
centerline_hit_crossing: bool = (
lane_linestring.intersects(intersecting_linestring) if intersecting_token.startswith("lane_") else False
)
diff --git a/d123/geometry/bounding_box.py b/d123/geometry/bounding_box.py
index 3ea196e3..dd211ca0 100644
--- a/d123/geometry/bounding_box.py
+++ b/d123/geometry/bounding_box.py
@@ -1,7 +1,6 @@
from __future__ import annotations
from ast import Dict
-from dataclasses import dataclass
from functools import cached_property
from typing import Union
@@ -16,7 +15,6 @@
from d123.geometry.utils.bounding_box_utils import bbse2_array_to_corners_array, bbse3_array_to_corners_array
-@dataclass
class BoundingBoxSE2(ArrayMixin):
"""
Rotated bounding box in 2D defined by center (StateSE2), length and width.
@@ -32,23 +30,67 @@ class BoundingBoxSE2(ArrayMixin):
8.0
"""
- center: StateSE2
- length: float
- width: float
+ _array: npt.NDArray[np.float64]
+
+ def __init__(self, center: StateSE2, length: float, width: float):
+ """Initialize BoundingBoxSE2 with center (StateSE2), length and width.
+
+ :param center: Center of the bounding box as a StateSE2 instance.
+ :param length: Length of the bounding box along the x-axis in the local frame.
+ :param width: Width of the bounding box along the y-axis in the local frame.
+ """
+ array = np.zeros(len(BoundingBoxSE2Index), dtype=np.float64)
+ array[BoundingBoxSE2Index.SE2] = center.array
+ array[BoundingBoxSE2Index.LENGTH] = length
+ array[BoundingBoxSE2Index.WIDTH] = width
+ object.__setattr__(self, "_array", array)
@classmethod
- def from_array(cls, array: npt.NDArray[np.float64]) -> BoundingBoxSE2:
- """Create a BoundingBoxSE2 from a numpy array, index by :class:`~d123.geometry.BoundingBoxSE2Index`.
+ def from_array(cls, array: npt.NDArray[np.float64], copy: bool = True) -> BoundingBoxSE2:
+ """Create a BoundingBoxSE2 from a numpy array.
- :param array: A 1D numpy array containing the bounding box parameters.
+ :param array: A 1D numpy array containing the bounding box parameters, indexed by \
+ :class:`~d123.geometry.BoundingBoxSE2Index`.
+ :param copy: Whether to copy the input array. Defaults to True.
:return: A BoundingBoxSE2 instance.
"""
- assert array.ndim == 1 and array.shape[-1] == len(BoundingBoxSE2Index)
- return BoundingBoxSE2(
- center=StateSE2.from_array(array[BoundingBoxSE2Index.SE2]),
- length=array[BoundingBoxSE2Index.LENGTH],
- width=array[BoundingBoxSE2Index.WIDTH],
- )
+ assert array.ndim == 1
+ assert array.shape[-1] == len(BoundingBoxSE2Index)
+ instance = object.__new__(cls)
+ object.__setattr__(instance, "_array", array.copy() if copy else array)
+ return instance
+
+ @property
+ def center(self) -> StateSE2:
+ """The center of the bounding box as a StateSE2 instance.
+
+ :return: The center of the bounding box as a StateSE2 instance.
+ """
+ return StateSE2.from_array(self._array[BoundingBoxSE2Index.SE2])
+
+ @property
+ def center_se2(self) -> StateSE2:
+ """The center of the bounding box as a StateSE2 instance.
+
+ :return: The center of the bounding box as a StateSE2 instance.
+ """
+ return self.center
+
+ @property
+ def length(self) -> float:
+ """The length of the bounding box along the x-axis in the local frame.
+
+ :return: The length of the bounding box.
+ """
+ return self._array[BoundingBoxSE2Index.LENGTH]
+
+ @property
+ def width(self) -> float:
+ """The width of the bounding box along the y-axis in the local frame.
+
+ :return: The width of the bounding box.
+ """
+ return self._array[BoundingBoxSE2Index.WIDTH]
@cached_property
def array(self) -> npt.NDArray[np.float64]:
@@ -56,13 +98,7 @@ def array(self) -> npt.NDArray[np.float64]:
:return: A numpy array of shape (5,) containing the bounding box parameters [x, y, yaw, length, width].
"""
- array = np.zeros(len(BoundingBoxSE2Index), dtype=np.float64)
- array[BoundingBoxSE2Index.X] = self.center.x
- array[BoundingBoxSE2Index.Y] = self.center.y
- array[BoundingBoxSE2Index.YAW] = self.center.yaw
- array[BoundingBoxSE2Index.LENGTH] = self.length
- array[BoundingBoxSE2Index.WIDTH] = self.width
- return array
+ return self._array
@cached_property
def shapely_polygon(self) -> geom.Polygon:
@@ -99,7 +135,6 @@ def corners_dict(self) -> Dict[Corners2DIndex, Point2D]:
return {index: Point2D.from_array(corners_array[index]) for index in Corners2DIndex}
-@dataclass
class BoundingBoxSE3(ArrayMixin):
"""
Rotated bounding box in 3D defined by center (StateSE3), length, width and height.
@@ -115,45 +150,86 @@ class BoundingBoxSE3(ArrayMixin):
8.0
"""
- center: StateSE3
- length: float
- width: float
- height: float
+ _array: npt.NDArray[np.float64]
+
+ def __init__(self, center: StateSE3, length: float, width: float, height: float):
+ """Initialize BoundingBoxSE3 with center (StateSE3), length, width and height.
+
+ :param center: Center of the bounding box as a StateSE3 instance.
+ :param length: Length of the bounding box along the x-axis in the local frame.
+ :param width: Width of the bounding box along the y-axis in the local frame.
+ :param height: Height of the bounding box along the z-axis in the local frame.
+ """
+ array = np.zeros(len(BoundingBoxSE3Index), dtype=np.float64)
+ array[BoundingBoxSE3Index.STATE_SE3] = center.array
+ array[BoundingBoxSE3Index.LENGTH] = length
+ array[BoundingBoxSE3Index.WIDTH] = width
+ array[BoundingBoxSE3Index.HEIGHT] = height
+ object.__setattr__(self, "_array", array)
@classmethod
- def from_array(cls, array: npt.NDArray[np.float64]) -> BoundingBoxSE3:
+ def from_array(cls, array: npt.NDArray[np.float64], copy: bool = True) -> BoundingBoxSE3:
"""Create a BoundingBoxSE3 from a numpy array.
:param array: A 1D numpy array containing the bounding box parameters, indexed by \
:class:`~d123.geometry.BoundingBoxSE3Index`.
+ :param copy: Whether to copy the input array. Defaults to True.
:return: A BoundingBoxSE3 instance.
"""
- assert array.ndim == 1 and array.shape[-1] == len(BoundingBoxSE3Index)
- return BoundingBoxSE3(
- center=StateSE3.from_array(array[BoundingBoxSE3Index.STATE_SE3]),
- length=array[BoundingBoxSE3Index.LENGTH],
- width=array[BoundingBoxSE3Index.WIDTH],
- height=array[BoundingBoxSE3Index.HEIGHT],
- )
+ assert array.ndim == 1
+ assert array.shape[-1] == len(BoundingBoxSE3Index)
+ instance = object.__new__(cls)
+ object.__setattr__(instance, "_array", array.copy() if copy else array)
+ return instance
- @cached_property
+ @property
+ def center(self) -> StateSE3:
+ """The center of the bounding box as a StateSE3 instance.
+
+ :return: The center of the bounding box as a StateSE3 instance.
+ """
+ return StateSE3.from_array(self._array[BoundingBoxSE3Index.STATE_SE3])
+
+ @property
+ def center_se3(self) -> StateSE3:
+ """The center of the bounding box as a StateSE3 instance.
+
+ :return: The center of the bounding box as a StateSE3 instance.
+ """
+ return self.center
+
+ @property
+ def length(self) -> float:
+ """The length of the bounding box along the x-axis in the local frame.
+
+ :return: The length of the bounding box.
+ """
+ return self._array[BoundingBoxSE3Index.LENGTH]
+
+ @property
+ def width(self) -> float:
+ """The width of the bounding box along the y-axis in the local frame.
+
+ :return: The width of the bounding box.
+ """
+ return self._array[BoundingBoxSE3Index.WIDTH]
+
+ @property
+ def height(self) -> float:
+ """The height of the bounding box along the z-axis in the local frame.
+
+ :return: The height of the bounding box.
+ """
+ return self._array[BoundingBoxSE3Index.HEIGHT]
+
+ @property
def array(self) -> npt.NDArray[np.float64]:
"""Convert the BoundingBoxSE3 instance to a numpy array.
:return: A 1D numpy array containing the bounding box parameters, indexed by \
:class:`~d123.geometry.BoundingBoxSE3Index`.
"""
- array = np.zeros(len(BoundingBoxSE3Index), dtype=np.float64)
- array[BoundingBoxSE3Index.X] = self.center.x
- array[BoundingBoxSE3Index.Y] = self.center.y
- array[BoundingBoxSE3Index.Z] = self.center.z
- array[BoundingBoxSE3Index.ROLL] = self.center.roll
- array[BoundingBoxSE3Index.PITCH] = self.center.pitch
- array[BoundingBoxSE3Index.YAW] = self.center.yaw
- array[BoundingBoxSE3Index.LENGTH] = self.length
- array[BoundingBoxSE3Index.WIDTH] = self.width
- array[BoundingBoxSE3Index.HEIGHT] = self.height
- return array
+ return self._array
@property
def bounding_box_se2(self) -> BoundingBoxSE2:
@@ -161,20 +237,13 @@ def bounding_box_se2(self) -> BoundingBoxSE2:
:return: A BoundingBoxSE2 instance.
"""
+ center_se3 = self.center_se3
return BoundingBoxSE2(
- center=StateSE2(self.center.x, self.center.y, self.center.yaw),
+ center=StateSE2(center_se3.x, center_se3.y, center_se3.yaw),
length=self.length,
width=self.width,
)
- @property
- def center_se3(self) -> StateSE3:
- """Returns the center of the bounding box as a StateSE3 instance.
-
- :return: The center of the bounding box as a StateSE3 instance.
- """
- return self.center
-
@property
def shapely_polygon(self) -> geom.Polygon:
"""Return a Shapely polygon representation of the 2D projection of the bounding box.
diff --git a/d123/geometry/occupancy_map.py b/d123/geometry/occupancy_map.py
index d8df5a1d..c9886a14 100644
--- a/d123/geometry/occupancy_map.py
+++ b/d123/geometry/occupancy_map.py
@@ -79,7 +79,7 @@ def geometries(self) -> Sequence[BaseGeometry]:
return self._geometries
@property
- def token_to_idx(self) -> Dict[Union[int, str], int]:
+ def id_to_idx(self) -> Dict[Union[int, str], int]:
"""Mapping from geometry IDs to indices in the occupancy map.
:return: dictionary of IDs and indices
diff --git a/d123/geometry/test/test_bounding_box.py b/d123/geometry/test/test_bounding_box.py
new file mode 100644
index 00000000..545c511c
--- /dev/null
+++ b/d123/geometry/test/test_bounding_box.py
@@ -0,0 +1,215 @@
+import unittest
+
+import numpy as np
+import shapely.geometry as geom
+
+from d123.common.utils.mixin import ArrayMixin
+from d123.geometry import BoundingBoxSE2, BoundingBoxSE3, Point2D, Point3D, StateSE2, StateSE3
+from d123.geometry.geometry_index import (
+ BoundingBoxSE2Index,
+ BoundingBoxSE3Index,
+ Corners2DIndex,
+ Corners3DIndex,
+ Point2DIndex,
+)
+
+
+class TestBoundingBoxSE2(unittest.TestCase):
+ """Unit tests for BoundingBoxSE2 class."""
+
+ def setUp(self):
+ """Set up test fixtures."""
+ self.center = StateSE2(1.0, 2.0, 0.5)
+ self.length = 4.0
+ self.width = 2.0
+ self.bbox = BoundingBoxSE2(self.center, self.length, self.width)
+
+ def test_init(self):
+ """Test BoundingBoxSE2 initialization."""
+ bbox = BoundingBoxSE2(self.center, self.length, self.width)
+ self.assertEqual(bbox.length, self.length)
+ self.assertEqual(bbox.width, self.width)
+ np.testing.assert_array_equal(bbox.center.array, self.center.array)
+
+ def test_from_array(self):
+ """Test BoundingBoxSE2.from_array method."""
+ array = np.array([1.0, 2.0, 0.5, 4.0, 2.0])
+ bbox = BoundingBoxSE2.from_array(array)
+ np.testing.assert_array_equal(bbox.array, array)
+
+ def test_from_array_copy(self):
+ """Test BoundingBoxSE2.from_array with copy parameter."""
+ array = np.array([1.0, 2.0, 0.5, 4.0, 2.0])
+ bbox_copy = BoundingBoxSE2.from_array(array, copy=True)
+ bbox_no_copy = BoundingBoxSE2.from_array(array, copy=False)
+
+ array[0] = 999.0
+ self.assertNotEqual(bbox_copy.array[0], 999.0)
+ self.assertEqual(bbox_no_copy.array[0], 999.0)
+
+ def test_properties(self):
+ """Test BoundingBoxSE2 properties."""
+ self.assertEqual(self.bbox.length, self.length)
+ self.assertEqual(self.bbox.width, self.width)
+ np.testing.assert_array_equal(self.bbox.center.array, self.center.array)
+ np.testing.assert_array_equal(self.bbox.center_se2.array, self.center.array)
+
+ def test_array_property(self):
+ """Test array property."""
+ expected = np.array([1.0, 2.0, 0.5, 4.0, 2.0])
+ np.testing.assert_array_equal(self.bbox.array, expected)
+
+ def test_array_mixin(self):
+ """Test that BoundingBoxSE2 is an instance of ArrayMixin."""
+ self.assertIsInstance(self.bbox, ArrayMixin)
+
+ expected = np.array([1.0, 2.0, 0.5, 4.0, 2.0], dtype=np.float16)
+ output_array = np.array(self.bbox, dtype=np.float16)
+ np.testing.assert_array_equal(output_array, expected)
+ self.assertEqual(output_array.dtype, np.float16)
+ self.assertEqual(output_array.shape, (len(BoundingBoxSE2Index),))
+
+ def test_bounding_box_se2_property(self):
+ """Test bounding_box_se2 property returns self."""
+ self.assertIs(self.bbox.bounding_box_se2, self.bbox)
+
+ def test_corners_array(self):
+ """Test corners_array property."""
+ corners = self.bbox.corners_array
+ self.assertEqual(corners.shape, (len(Corners2DIndex), len(Point2DIndex)))
+ self.assertIsInstance(corners, np.ndarray)
+
+ def test_corners_dict(self):
+ """Test corners_dict property."""
+ corners_dict = self.bbox.corners_dict
+ self.assertEqual(len(corners_dict), len(Corners2DIndex))
+ for index in Corners2DIndex:
+ self.assertIn(index, corners_dict)
+ self.assertIsInstance(corners_dict[index], Point2D)
+
+ def test_shapely_polygon(self):
+ """Test shapely_polygon property."""
+ polygon = self.bbox.shapely_polygon
+ self.assertIsInstance(polygon, geom.Polygon)
+ self.assertAlmostEqual(polygon.area, self.length * self.width)
+
+ def test_array_assertions(self):
+ """Test array assertions in from_array."""
+ # Test 2D array
+ with self.assertRaises(AssertionError):
+ BoundingBoxSE2.from_array(np.array([[1, 2, 3, 4, 5]]))
+
+ # Test wrong size
+ with self.assertRaises(AssertionError):
+ BoundingBoxSE2.from_array(np.array([1, 2, 3, 4]))
+
+
+class TestBoundingBoxSE3(unittest.TestCase):
+ """Unit tests for BoundingBoxSE3 class."""
+
+ def setUp(self):
+ """Set up test fixtures."""
+ self.center = StateSE3(1.0, 2.0, 3.0, 0.1, 0.2, 0.3)
+ self.length = 4.0
+ self.width = 2.0
+ self.height = 1.5
+ self.bbox = BoundingBoxSE3(self.center, self.length, self.width, self.height)
+
+ def test_init(self):
+ """Test BoundingBoxSE3 initialization."""
+ bbox = BoundingBoxSE3(self.center, self.length, self.width, self.height)
+ self.assertEqual(bbox.length, self.length)
+ self.assertEqual(bbox.width, self.width)
+ self.assertEqual(bbox.height, self.height)
+ np.testing.assert_array_equal(bbox.center.array, self.center.array)
+
+ def test_from_array(self):
+ """Test BoundingBoxSE3.from_array method."""
+ array = np.array([1.0, 2.0, 3.0, 0.1, 0.2, 0.3, 4.0, 2.0, 1.5])
+ bbox = BoundingBoxSE3.from_array(array)
+ np.testing.assert_array_equal(bbox.array, array)
+
+ def test_from_array_copy(self):
+ """Test BoundingBoxSE3.from_array with copy parameter."""
+ array = np.array([1.0, 2.0, 3.0, 0.1, 0.2, 0.3, 4.0, 2.0, 1.5])
+ bbox_copy = BoundingBoxSE3.from_array(array, copy=True)
+ bbox_no_copy = BoundingBoxSE3.from_array(array, copy=False)
+
+ array[0] = 999.0
+ self.assertNotEqual(bbox_copy.array[0], 999.0)
+ self.assertEqual(bbox_no_copy.array[0], 999.0)
+
+ def test_properties(self):
+ """Test BoundingBoxSE3 properties."""
+ self.assertEqual(self.bbox.length, self.length)
+ self.assertEqual(self.bbox.width, self.width)
+ self.assertEqual(self.bbox.height, self.height)
+ np.testing.assert_array_equal(self.bbox.center.array, self.center.array)
+ np.testing.assert_array_equal(self.bbox.center_se3.array, self.center.array)
+
+ def test_array_property(self):
+ """Test array property."""
+ expected = np.array([1.0, 2.0, 3.0, 0.1, 0.2, 0.3, 4.0, 2.0, 1.5])
+ np.testing.assert_array_equal(self.bbox.array, expected)
+
+ def test_array_mixin(self):
+ """Test that BoundingBoxSE3 is an instance of ArrayMixin."""
+ self.assertIsInstance(self.bbox, ArrayMixin)
+
+ expected = np.array([1.0, 2.0, 3.0, 0.1, 0.2, 0.3, 4.0, 2.0, 1.5], dtype=np.float16)
+ output_array = np.array(self.bbox, dtype=np.float16)
+ np.testing.assert_array_equal(output_array, expected)
+ self.assertEqual(output_array.dtype, np.float16)
+ self.assertEqual(output_array.shape, (len(BoundingBoxSE3Index),))
+
+ def test_bounding_box_se2_property(self):
+ """Test bounding_box_se2 property."""
+ bbox_2d = self.bbox.bounding_box_se2
+ self.assertIsInstance(bbox_2d, BoundingBoxSE2)
+ self.assertEqual(bbox_2d.length, self.length)
+ self.assertEqual(bbox_2d.width, self.width)
+ self.assertEqual(bbox_2d.center.x, self.center.x)
+ self.assertEqual(bbox_2d.center.y, self.center.y)
+ self.assertEqual(bbox_2d.center.yaw, self.center.yaw)
+
+ def test_corners_array(self):
+ """Test corners_array property."""
+ corners = self.bbox.corners_array
+ self.assertEqual(corners.shape, (8, 3))
+ self.assertIsInstance(corners, np.ndarray)
+
+ def test_corners_dict(self):
+ """Test corners_dict property."""
+ corners_dict = self.bbox.corners_dict
+ self.assertEqual(len(corners_dict), 8)
+ for index in Corners3DIndex:
+ self.assertIn(index, corners_dict)
+ self.assertIsInstance(corners_dict[index], Point3D)
+
+ def test_shapely_polygon(self):
+ """Test shapely_polygon property."""
+ polygon = self.bbox.shapely_polygon
+ self.assertIsInstance(polygon, geom.Polygon)
+ self.assertAlmostEqual(polygon.area, self.length * self.width)
+
+ def test_array_assertions(self):
+ """Test array assertions in from_array."""
+ # Test 2D array
+ with self.assertRaises(AssertionError):
+ BoundingBoxSE3.from_array(np.array([[1, 2, 3, 4, 5, 6, 7, 8, 9]]))
+
+ # Test wrong size
+ with self.assertRaises(AssertionError):
+ BoundingBoxSE3.from_array(np.array([1, 2, 3, 4, 5, 6, 7, 8]))
+
+ def test_zero_dimensions(self):
+ """Test bounding box with zero dimensions."""
+ center = StateSE3(0.0, 0.0, 0.0, 0.0, 0.0, 0.0)
+ bbox = BoundingBoxSE3(center, 0.0, 0.0, 0.0)
+ self.assertEqual(bbox.length, 0.0)
+ self.assertEqual(bbox.width, 0.0)
+ self.assertEqual(bbox.height, 0.0)
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/d123/geometry/test/test_occupancy_map.py b/d123/geometry/test/test_occupancy_map.py
new file mode 100644
index 00000000..7344f4de
--- /dev/null
+++ b/d123/geometry/test/test_occupancy_map.py
@@ -0,0 +1,285 @@
+import unittest
+
+import numpy as np
+import shapely.geometry as geom
+
+from d123.geometry import OccupancyMap2D
+
+
+class TestOccupancyMap2D(unittest.TestCase):
+ """Unit tests for OccupancyMap2D class."""
+
+ def setUp(self):
+ """Set up test fixtures with various geometries."""
+ self.square1 = geom.Polygon([(0, 0), (2, 0), (2, 2), (0, 2)])
+ self.square2 = geom.Polygon([(3, 3), (5, 3), (5, 5), (3, 5)])
+ self.circle = geom.Point(1, 1).buffer(0.5)
+ self.line = geom.LineString([(0, 0), (1, 1)])
+
+ self.geometries = [self.square1, self.square2, self.circle, self.line]
+ self.string_ids = ["square1", "square2", "circle", "line"]
+ self.int_ids = [1, 2, 3, 4]
+
+ def test_init_with_default_ids(self):
+ """Test initialization with default string IDs."""
+ occ_map = OccupancyMap2D(self.geometries)
+
+ self.assertEqual(len(occ_map), 4)
+ self.assertEqual(occ_map.ids, ["0", "1", "2", "3"])
+ self.assertEqual(len(occ_map.geometries), 4)
+
+ def test_init_with_string_ids(self):
+ """Test initialization with custom string IDs."""
+ occ_map = OccupancyMap2D(self.geometries, ids=self.string_ids)
+
+ self.assertEqual(len(occ_map), 4)
+ self.assertEqual(occ_map.ids, self.string_ids)
+ self.assertEqual(occ_map["square1"], self.square1)
+
+ def test_init_with_int_ids(self):
+ """Test initialization with integer IDs."""
+ occ_map = OccupancyMap2D(self.geometries, ids=self.int_ids)
+
+ self.assertEqual(len(occ_map), 4)
+ self.assertEqual(occ_map.ids, self.int_ids)
+ self.assertEqual(occ_map[1], self.square1)
+
+ def test_init_with_mismatched_ids_length(self):
+ """Test that initialization fails with mismatched IDs length."""
+ with self.assertRaises(AssertionError):
+ OccupancyMap2D(self.geometries, ids=["id1", "id2"])
+
+ def test_init_with_custom_node_capacity(self):
+ """Test initialization with custom node capacity."""
+ occ_map = OccupancyMap2D(self.geometries, node_capacity=5)
+ self.assertEqual(occ_map._node_capacity, 5)
+
+ def test_from_dict_constructor(self):
+ """Test construction from dictionary."""
+ geometry_dict = {"square": self.square1, "circle": self.circle, "line": self.line}
+
+ occ_map = OccupancyMap2D.from_dict(geometry_dict)
+
+ self.assertEqual(len(occ_map), 3)
+ self.assertEqual(set(occ_map.ids), set(["square", "circle", "line"]))
+ self.assertEqual(occ_map["square"], self.square1)
+
+ def test_getitem_string_id(self):
+ """Test geometry retrieval by string ID."""
+ occ_map = OccupancyMap2D(self.geometries, ids=self.string_ids)
+
+ self.assertEqual(occ_map["square1"], self.square1)
+ self.assertEqual(occ_map["circle"], self.circle)
+
+ def test_getitem_int_id(self):
+ """Test geometry retrieval by integer ID."""
+ occ_map = OccupancyMap2D(self.geometries, ids=self.int_ids)
+
+ self.assertEqual(occ_map[1], self.square1)
+ self.assertEqual(occ_map[3], self.circle)
+
+ def test_getitem_invalid_id(self):
+ """Test that invalid ID raises KeyError."""
+ occ_map = OccupancyMap2D(self.geometries, ids=self.string_ids)
+
+ with self.assertRaises(KeyError):
+ _ = occ_map["nonexistent"]
+
+ def test_len(self):
+ """Test length property."""
+ occ_map = OccupancyMap2D(self.geometries)
+ self.assertEqual(len(occ_map), 4)
+
+ empty_map = OccupancyMap2D([])
+ self.assertEqual(len(empty_map), 0)
+
+ def test_ids_property(self):
+ """Test IDs property getter."""
+ occ_map = OccupancyMap2D(self.geometries, ids=self.string_ids)
+ self.assertEqual(occ_map.ids, self.string_ids)
+
+ def test_geometries_property(self):
+ """Test geometries property getter."""
+ occ_map = OccupancyMap2D(self.geometries)
+ self.assertEqual(list(occ_map.geometries), self.geometries)
+
+ def test_id_to_idx_property(self):
+ """Test id_to_idx property."""
+ occ_map = OccupancyMap2D(self.geometries, ids=self.string_ids)
+ expected_mapping = {"square1": 0, "square2": 1, "circle": 2, "line": 3}
+ self.assertEqual(occ_map.id_to_idx, expected_mapping)
+
+ def test_intersects_with_overlapping_geometry(self):
+ """Test intersects method with overlapping geometry."""
+ occ_map = OccupancyMap2D(self.geometries, ids=self.string_ids)
+
+ # Create a geometry that intersects with square1 and circle
+ query_geom = geom.Polygon([(0.5, 0.5), (1.5, 0.5), (1.5, 1.5), (0.5, 1.5)])
+ intersecting_ids = occ_map.intersects(query_geom)
+
+ # NOTE: square2 does not intersect with the query geometry, the rest does.
+ self.assertIn("square1", intersecting_ids)
+ self.assertIn("circle", intersecting_ids)
+ self.assertIn("line", intersecting_ids)
+ self.assertEqual(len(intersecting_ids), 3)
+
+ def test_intersects_with_non_overlapping_geometry(self):
+ """Test intersects method with non-overlapping geometry."""
+ occ_map = OccupancyMap2D(self.geometries, ids=self.string_ids)
+
+ # Create a geometry that doesn't intersect with any
+ query_geom = geom.Polygon([(10, 10), (12, 10), (12, 12), (10, 12)])
+ intersecting_ids = occ_map.intersects(query_geom)
+
+ self.assertEqual(len(intersecting_ids), 0)
+
+ def test_query_with_intersects_predicate(self):
+ """Test query method with intersects predicate."""
+ occ_map = OccupancyMap2D(self.geometries, ids=self.string_ids)
+
+ query_geom = geom.Point(1, 1)
+ indices = occ_map.query(query_geom, predicate="intersects")
+ self.assertIsInstance(indices, np.ndarray)
+ self.assertIn(occ_map.id_to_idx["square1"], indices)
+ self.assertIn(occ_map.id_to_idx["circle"], indices)
+ self.assertIn(occ_map.id_to_idx["line"], indices)
+ self.assertNotIn(occ_map.id_to_idx["square2"], indices)
+
+ def test_query_with_contains_predicate(self):
+ """Test query method with contains predicate."""
+ occ_map = OccupancyMap2D(self.geometries, ids=self.string_ids)
+
+ query_geom = geom.Point(4, 4)
+ indices = occ_map.query(query_geom, predicate="within")
+
+ self.assertIsInstance(indices, np.ndarray)
+ self.assertIn(occ_map.id_to_idx["square2"], indices)
+ self.assertNotIn(occ_map.id_to_idx["square1"], indices)
+ self.assertNotIn(occ_map.id_to_idx["circle"], indices)
+ self.assertNotIn(occ_map.id_to_idx["line"], indices)
+
+ def test_query_with_distance(self):
+ """Test query method with distance parameter."""
+ occ_map = OccupancyMap2D(self.geometries, ids=self.string_ids)
+
+ query_geom = geom.Point(4, 4)
+ indices = occ_map.query(query_geom, predicate="dwithin", distance=3.0)
+
+ self.assertIsInstance(indices, np.ndarray)
+ self.assertIn(occ_map.id_to_idx["square2"], indices)
+ self.assertIn(occ_map.id_to_idx["square1"], indices)
+ self.assertNotIn(occ_map.id_to_idx["circle"], indices)
+ self.assertNotIn(occ_map.id_to_idx["line"], indices)
+
+ def test_query_nearest_basic(self):
+ """Test query_nearest method basic functionality."""
+ occ_map = OccupancyMap2D(self.geometries, ids=self.string_ids)
+
+ query_geom = geom.Point(4, 4)
+ nearest_indices = occ_map.query_nearest(query_geom)
+
+ self.assertIsInstance(nearest_indices, np.ndarray)
+
+ def test_query_nearest_with_distance(self):
+ """Test query_nearest method with return_distance=True."""
+ occ_map = OccupancyMap2D(self.geometries)
+
+ query_geom = geom.Point(1, 1)
+ result = occ_map.query_nearest(query_geom, return_distance=True)
+
+ self.assertIsInstance(result, tuple)
+ self.assertEqual(len(result), 2)
+ indices, distances = result
+ self.assertIsInstance(indices, np.ndarray)
+ self.assertIsInstance(distances, np.ndarray)
+
+ def test_query_nearest_with_max_distance(self):
+ """Test query_nearest method with max_distance."""
+ occ_map = OccupancyMap2D(self.geometries)
+
+ query_geom = geom.Point(10, 10)
+
+ nearest_indices = occ_map.query_nearest(query_geom, max_distance=1.0)
+ self.assertIsInstance(nearest_indices, np.ndarray)
+ self.assertEqual(len(nearest_indices), 0)
+
+ nearest_indices = occ_map.query_nearest(query_geom, max_distance=10.0)
+ self.assertIsInstance(nearest_indices, np.ndarray)
+ self.assertTrue(len(nearest_indices) > 0)
+
+ def test_contains_vectorized_single_point(self):
+ """Test contains_vectorized with a single point."""
+ occ_map = OccupancyMap2D(self.geometries)
+
+ points = np.array([[1.0, 1.0]]) # Point inside square1 and circle
+ result = occ_map.contains_vectorized(points)
+
+ self.assertEqual(result.shape, (4, 1))
+ self.assertIsInstance(result, np.ndarray)
+ self.assertEqual(result.dtype, bool)
+
+ def test_contains_vectorized_multiple_points(self):
+ """Test contains_vectorized with multiple points."""
+ occ_map = OccupancyMap2D(self.geometries)
+
+ points = np.array(
+ [
+ [1.0, 1.0], # Inside square1 and circle
+ [4.0, 4.0], # Inside square2
+ [10.0, 10.0], # Outside all geometries
+ ]
+ )
+ result = occ_map.contains_vectorized(points)
+
+ self.assertEqual(result.shape, (4, 3))
+ self.assertIsInstance(result, np.ndarray)
+ self.assertEqual(result.dtype, bool)
+
+ # Check specific containment results
+ # Point [1.0, 1.0] should be in square1 (index 0) and circle (index 2)
+ self.assertTrue(result[0, 0]) # square1 contains point 0
+ self.assertFalse(result[1, 0]) # square2 does not contain point 0
+ self.assertTrue(result[2, 0]) # circle contains point 0
+ self.assertFalse(result[3, 0]) # line does not contain point 0
+
+ # Point [4.0, 4.0] should be in square2 (index 1) only
+ self.assertFalse(result[0, 1]) # square1 does not contain point 1
+ self.assertTrue(result[1, 1]) # square2 contains point 1
+ self.assertFalse(result[2, 1]) # circle does not contain point 1
+ self.assertFalse(result[3, 1]) # line does not contain point 1
+
+ # Point [10.0, 10.0] should not be in any geometry
+ self.assertFalse(result[0, 2]) # square1 does not contain point 2
+ self.assertFalse(result[1, 2]) # square2 does not contain point 2
+ self.assertFalse(result[2, 2]) # circle does not contain point 2
+ self.assertFalse(result[3, 2]) # line does not contain point 2
+
+ def test_contains_vectorized_empty_points(self):
+ """Test contains_vectorized with empty points array."""
+ occ_map = OccupancyMap2D(self.geometries)
+
+ points = np.empty((0, 2))
+ result = occ_map.contains_vectorized(points)
+
+ self.assertEqual(result.shape, (4, 0))
+
+ def test_empty_occupancy_map(self):
+ """Test behavior with empty geometry list."""
+ occ_map = OccupancyMap2D([])
+
+ self.assertEqual(len(occ_map), 0)
+ self.assertEqual(occ_map.ids, [])
+ self.assertEqual(len(occ_map.geometries), 0)
+
+ def test_single_geometry_map(self):
+ """Test behavior with single geometry."""
+ occ_map = OccupancyMap2D([self.square1], ids=["single"])
+
+ self.assertEqual(len(occ_map), 1)
+ self.assertEqual(occ_map.ids, ["single"])
+ self.assertEqual(occ_map["single"], self.square1)
+
+
+if __name__ == "__main__":
+
+ unittest.main()
diff --git a/notebooks/viz/bev_matplotlib.ipynb b/notebooks/viz/bev_matplotlib.ipynb
index 9052f79b..32328cbf 100644
--- a/notebooks/viz/bev_matplotlib.ipynb
+++ b/notebooks/viz/bev_matplotlib.ipynb
@@ -250,7 +250,7 @@
" return fig, ax\n",
"\n",
"\n",
- "scene_index = 3\n",
+ "scene_index = 0\n",
"iteration = 99\n",
"fig, ax = plot_scene_at_iteration(scenes[scene_index], iteration=iteration, radius=60)\n",
"plt.show()\n",
diff --git a/notebooks/viz/viser_testing_v2_scene.ipynb b/notebooks/viz/viser_testing_v2_scene.ipynb
index 07d91d1d..f2371058 100644
--- a/notebooks/viz/viser_testing_v2_scene.ipynb
+++ b/notebooks/viz/viser_testing_v2_scene.ipynb
@@ -28,9 +28,9 @@
"\n",
"\n",
"# splits = [\"nuplan_private_test\"]\n",
- "splits = [\"carla\"]\n",
+ "# splits = [\"carla\"]\n",
"# splits = [\"wopd_train\"]\n",
- "# splits = [\"av2-sensor-mini_train\"]\n",
+ "splits = [\"av2-sensor-mini_train\"]\n",
"log_names = None\n",
"\n",
"scene_tokens = None\n",
@@ -99,7 +99,7 @@
],
"metadata": {
"kernelspec": {
- "display_name": "d123",
+ "display_name": "d123_dev",
"language": "python",
"name": "python3"
},
From 7a9775da4e5007aa011fcd27b9a454a47cd22053 Mon Sep 17 00:00:00 2001
From: DanielDauner
Date: Mon, 25 Aug 2025 18:44:10 +0200
Subject: [PATCH 018/145] Refactor both transform se2 and se3. NOTE: Both are
untested
---
.../vehicle_state/vehicle_parameters.py | 8 +-
.../visualization/matplotlib/camera copy.py | 329 ------------------
.../common/visualization/matplotlib/camera.py | 2 +-
.../visualization/matplotlib/observation.py | 9 +-
d123/common/visualization/viser/utils.py | 7 +-
d123/common/visualization/viser/utils_v2.py | 2 +-
.../wopd/wopd_data_converter.py | 2 +-
d123/geometry/geometry_index.py | 8 +
d123/geometry/se.py | 2 +-
d123/geometry/transform/rotation.py | 27 --
d123/geometry/transform/se3.py | 179 ----------
d123/geometry/transform/tranform_2d.py | 22 --
.../{se2_array.py => transform_se2.py} | 91 ++++-
d123/geometry/transform/transform_se3.py | 177 ++++++++++
d123/simulation/agents/smart_agents.py | 2 +-
.../gym_observation/raster/raster_renderer.py | 2 +-
.../feature_builder/smart_feature_builder.py | 2 +-
17 files changed, 285 insertions(+), 586 deletions(-)
delete mode 100644 d123/common/visualization/matplotlib/camera copy.py
delete mode 100644 d123/geometry/transform/rotation.py
delete mode 100644 d123/geometry/transform/se3.py
delete mode 100644 d123/geometry/transform/tranform_2d.py
rename d123/geometry/transform/{se2_array.py => transform_se2.py} (51%)
create mode 100644 d123/geometry/transform/transform_se3.py
diff --git a/d123/common/datatypes/vehicle_state/vehicle_parameters.py b/d123/common/datatypes/vehicle_state/vehicle_parameters.py
index c8a84828..19ff334b 100644
--- a/d123/common/datatypes/vehicle_state/vehicle_parameters.py
+++ b/d123/common/datatypes/vehicle_state/vehicle_parameters.py
@@ -1,8 +1,8 @@
from dataclasses import dataclass
from d123.geometry import StateSE2, StateSE3, Vector2D
-from d123.geometry.transform.se3 import translate_se3_along_x, translate_se3_along_z
-from d123.geometry.transform.tranform_2d import translate_along_yaw
+from d123.geometry.transform.transform_se2 import translate_se2_along_yaw
+from d123.geometry.transform.transform_se3 import translate_se3_along_x, translate_se3_along_z
# TODO: Add more vehicle parameters, potentially extend the parameters.
@@ -115,7 +115,7 @@ def center_se2_to_rear_axle_se2(center_se2: StateSE2, vehicle_parameters: Vehicl
:param vehicle_parameters: The vehicle parameters.
:return: The rear axle state in 2D.
"""
- return translate_along_yaw(center_se2, Vector2D(-vehicle_parameters.rear_axle_to_center_longitudinal, 0))
+ return translate_se2_along_yaw(center_se2, Vector2D(-vehicle_parameters.rear_axle_to_center_longitudinal, 0))
def rear_axle_se2_to_center_se2(rear_axle_se2: StateSE2, vehicle_parameters: VehicleParameters) -> StateSE2:
@@ -125,4 +125,4 @@ def rear_axle_se2_to_center_se2(rear_axle_se2: StateSE2, vehicle_parameters: Veh
:param vehicle_parameters: The vehicle parameters.
:return: The center state in 2D.
"""
- return translate_along_yaw(rear_axle_se2, Vector2D(vehicle_parameters.rear_axle_to_center_longitudinal, 0))
+ return translate_se2_along_yaw(rear_axle_se2, Vector2D(vehicle_parameters.rear_axle_to_center_longitudinal, 0))
diff --git a/d123/common/visualization/matplotlib/camera copy.py b/d123/common/visualization/matplotlib/camera copy.py
deleted file mode 100644
index ed3a7d2a..00000000
--- a/d123/common/visualization/matplotlib/camera copy.py
+++ /dev/null
@@ -1,329 +0,0 @@
-# from typing import List, Optional, Tuple
-
-from typing import List, Optional, Tuple
-
-import cv2
-import matplotlib.pyplot as plt
-import numpy as np
-import numpy.typing as npt
-
-# from PIL import ImageColor
-from pyquaternion import Quaternion
-
-from d123.common.datatypes.detection.detection import BoxDetectionSE3, BoxDetectionWrapper
-from d123.common.datatypes.detection.detection_types import DetectionType
-from d123.common.datatypes.sensor.camera import Camera
-from d123.common.datatypes.vehicle_state.ego_state import EgoStateSE3
-from d123.common.visualization.color.default import BOX_DETECTION_CONFIG
-from d123.geometry import BoundingBoxSE3Index, Corners3DIndex, StateSE3
-from d123.geometry.transform.se3 import convert_absolute_to_relative_se3_array, get_rotation_matrix
-
-# from navsim.common.dataclasses import Annotations, Camera, Lidar
-# from navsim.common.enums import BoundingBoxIndex, LidarIndex
-# from navsim.planning.scenario_builder.navsim_scenario_utils import tracked_object_types
-# from navsim.visualization.config import AGENT_CONFIG
-# from navsim.visualization.lidar import filter_lidar_pc, get_lidar_pc_color
-
-
-def add_camera_ax(ax: plt.Axes, camera: Camera) -> plt.Axes:
- """
- Adds camera image to matplotlib ax object
- :param ax: matplotlib ax object
- :param camera: navsim camera dataclass
- :return: ax object with image
- """
- ax.imshow(camera.image)
- return ax
-
-
-# FIXME:
-# def add_lidar_to_camera_ax(ax: plt.Axes, camera: Camera, lidar: Lidar) -> plt.Axes:
-# """
-# Adds camera image with lidar point cloud on matplotlib ax object
-# :param ax: matplotlib ax object
-# :param camera: navsim camera dataclass
-# :param lidar: navsim lidar dataclass
-# :return: ax object with image
-# """
-
-# image, lidar_pc = camera.image.copy(), lidar.lidar_pc.copy()
-# image_height, image_width = image.shape[:2]
-
-# lidar_pc = filter_lidar_pc(lidar_pc)
-# lidar_pc_colors = np.array(get_lidar_pc_color(lidar_pc))
-
-# pc_in_cam, pc_in_fov_mask = _transform_pcs_to_images(
-# lidar_pc,
-# camera.sensor2lidar_rotation,
-# camera.sensor2lidar_translation,
-# camera.intrinsics,
-# img_shape=(image_height, image_width),
-# )
-
-# for (x, y), color in zip(pc_in_cam[pc_in_fov_mask], lidar_pc_colors[pc_in_fov_mask]):
-# color = (int(color[0]), int(color[1]), int(color[2]))
-# cv2.circle(image, (int(x), int(y)), 5, color, -1)
-
-# ax.imshow(image)
-# return ax
-
-
-def add_box_detections_to_camera_ax(
- ax: plt.Axes,
- camera: Camera,
- box_detections: BoxDetectionWrapper,
- ego_state_se3: EgoStateSE3,
-) -> plt.Axes:
-
- box_detection_array = np.zeros((len(box_detections.box_detections), len(BoundingBoxSE3Index)), dtype=np.float64)
- detection_types = np.array(
- [detection.metadata.detection_type for detection in box_detections.box_detections], dtype=object
- )
- for idx, box_detection in enumerate(box_detections.box_detections):
- assert isinstance(
- box_detection, BoxDetectionSE3
- ), f"Box detection must be of type BoxDetectionSE3, got {type(box_detection)}"
- box_detection_array[idx] = box_detection.bounding_box_se3.array
-
- box_detection_array[..., BoundingBoxSE3Index.STATE_SE3] = convert_absolute_to_relative_se3_array(
- ego_state_se3.rear_axle_se3, box_detection_array[..., BoundingBoxSE3Index.STATE_SE3]
- )
- detection_positions, detection_extents, detection_yaws = _transform_annotations_to_camera(
- box_detection_array, camera.extrinsic
- )
-
- corners_norm = np.stack(np.unravel_index(np.arange(len(Corners3DIndex)), [2] * 3), axis=1)
- corners_norm = corners_norm[[0, 1, 3, 2, 4, 5, 7, 6]]
- corners_norm = corners_norm - np.array([0.5, 0.5, 0.5])
- corners = detection_extents.reshape([-1, 1, 3]) * corners_norm.reshape([1, 8, 3])
-
- corners = _rotation_3d_in_axis(corners, detection_yaws, axis=1)
- corners += detection_positions.reshape(-1, 1, 3)
-
- # Then draw project corners to image.
- box_corners, corners_pc_in_fov = _transform_points_to_image(corners.reshape(-1, 3), camera.metadata.intrinsic)
- box_corners = box_corners.reshape(-1, 8, 2)
- corners_pc_in_fov = corners_pc_in_fov.reshape(-1, 8)
- valid_corners = corners_pc_in_fov.any(-1)
-
- box_corners, detection_types = box_corners[valid_corners], detection_types[valid_corners]
- image = _plot_rect_3d_on_img(camera.image.copy(), box_corners, detection_types)
-
- ax.imshow(image)
- return ax
-
-
-def _transform_annotations_to_camera(
- boxes: npt.NDArray[np.float32], extrinsic: npt.NDArray[np.float64]
-) -> npt.NDArray[np.float32]:
- """
- Helper function to transform bounding boxes into camera frame
- TODO: Refactor
- :param boxes: array representation of bounding boxes
- :param sensor2lidar_rotation: camera rotation
- :param sensor2lidar_translation: camera translation
- :return: bounding boxes in camera coordinates
- """
- sensor2ego_rotation = extrinsic[:3, :3]
- sensor2ego_translation = extrinsic[:3, 3]
-
- locs, rots = (
- boxes[:, BoundingBoxSE3Index.XYZ],
- boxes[:, BoundingBoxSE3Index.YAW],
- )
- dims_cam = boxes[
- :, [BoundingBoxSE3Index.LENGTH, BoundingBoxSE3Index.HEIGHT, BoundingBoxSE3Index.WIDTH]
- ] # l, w, h -> l, h, w
-
- rots_cam = np.zeros_like(rots)
- for idx, state_se3_array in enumerate(boxes[:, BoundingBoxSE3Index.STATE_SE3]):
- rot = Quaternion(matrix=get_rotation_matrix(StateSE3.from_array(state_se3_array)))
- rot = Quaternion(matrix=sensor2ego_rotation).inverse * rot
- rots_cam[idx] = -rot.yaw_pitch_roll[0]
-
- lidar2cam_r = np.linalg.inv(sensor2ego_rotation)
- lidar2cam_t = sensor2ego_translation @ lidar2cam_r.T
- lidar2cam_rt = np.eye(4)
- lidar2cam_rt[:3, :3] = lidar2cam_r.T
- lidar2cam_rt[3, :3] = -lidar2cam_t
-
- locs_cam = np.concatenate([locs, np.ones_like(locs)[:, :1]], -1) # -1, 4
- locs_cam = lidar2cam_rt.T @ locs_cam.T
- locs_cam = locs_cam.T
- locs_cam = locs_cam[:, :-1]
- return locs_cam, dims_cam, rots_cam
-
-
-def _rotation_3d_in_axis(points: npt.NDArray[np.float32], angles: npt.NDArray[np.float32], axis: int = 0):
- """
- Rotate 3D points by angles according to axis.
- TODO: Refactor
- :param points: array of points
- :param angles: array of angles
- :param axis: axis to perform rotation, defaults to 0
- :raises value: _description_
- :raises ValueError: if axis invalid
- :return: rotated points
- """
- rot_sin = np.sin(angles)
- rot_cos = np.cos(angles)
- ones = np.ones_like(rot_cos)
- zeros = np.zeros_like(rot_cos)
- if axis == 1:
- rot_mat_T = np.stack(
- [
- np.stack([rot_cos, zeros, -rot_sin]),
- np.stack([zeros, ones, zeros]),
- np.stack([rot_sin, zeros, rot_cos]),
- ]
- )
- elif axis == 2 or axis == -1:
- rot_mat_T = np.stack(
- [
- np.stack([rot_cos, -rot_sin, zeros]),
- np.stack([rot_sin, rot_cos, zeros]),
- np.stack([zeros, zeros, ones]),
- ]
- )
- elif axis == 0:
- rot_mat_T = np.stack(
- [
- np.stack([zeros, rot_cos, -rot_sin]),
- np.stack([zeros, rot_sin, rot_cos]),
- np.stack([ones, zeros, zeros]),
- ]
- )
- else:
- raise ValueError(f"axis should in range [0, 1, 2], got {axis}")
- return np.einsum("aij,jka->aik", points, rot_mat_T)
-
-
-def _plot_rect_3d_on_img(
- image: npt.NDArray[np.float32],
- box_corners: npt.NDArray[np.float32],
- detection_types: List[DetectionType],
- thickness: int = 1,
-) -> npt.NDArray[np.uint8]:
- """
- Plot the boundary lines of 3D rectangular on 2D images.
- TODO: refactor
- :param image: The numpy array of image.
- :param box_corners: Coordinates of the corners of 3D, shape of [N, 8, 2].
- :param box_labels: labels of boxes for coloring
- :param thickness: pixel width of liens, defaults to 3
- :return: image with 3D bounding boxes
- """
- line_indices = (
- (0, 1),
- (0, 3),
- (0, 4),
- (1, 2),
- (1, 5),
- (3, 2),
- (3, 7),
- (4, 5),
- (4, 7),
- (2, 6),
- (5, 6),
- (6, 7),
- )
- for i in range(len(box_corners)):
- color = BOX_DETECTION_CONFIG[detection_types[i]].fill_color.rgb
- corners = box_corners[i].astype(np.int64)
- for start, end in line_indices:
- cv2.line(
- image,
- (corners[start, 0], corners[start, 1]),
- (corners[end, 0], corners[end, 1]),
- color,
- thickness,
- cv2.LINE_AA,
- )
- return image.astype(np.uint8)
-
-
-def _transform_points_to_image(
- points: npt.NDArray[np.float32],
- intrinsic: npt.NDArray[np.float32],
- image_shape: Optional[Tuple[int, int]] = None,
- eps: float = 1e-3,
-) -> Tuple[npt.NDArray[np.float32], npt.NDArray[np.bool_]]:
- """
- Transforms points in camera frame to image pixel coordinates
- TODO: refactor
- :param points: points in camera frame
- :param intrinsic: camera intrinsics
- :param image_shape: shape of image in pixel
- :param eps: lower threshold of points, defaults to 1e-3
- :return: points in pixel coordinates, mask of values in frame
- """
- points = points[:, :3]
-
- viewpad = np.eye(4)
- viewpad[: intrinsic.shape[0], : intrinsic.shape[1]] = intrinsic
-
- pc_img = np.concatenate([points, np.ones_like(points)[:, :1]], -1)
- pc_img = viewpad @ pc_img.T
- pc_img = pc_img.T
-
- cur_pc_in_fov = pc_img[:, 2] > eps
- pc_img = pc_img[..., 0:2] / np.maximum(pc_img[..., 2:3], np.ones_like(pc_img[..., 2:3]) * eps)
- if image_shape is not None:
- img_h, img_w = image_shape
- cur_pc_in_fov = (
- cur_pc_in_fov
- & (pc_img[:, 0] < (img_w - 1))
- & (pc_img[:, 0] > 0)
- & (pc_img[:, 1] < (img_h - 1))
- & (pc_img[:, 1] > 0)
- )
- return pc_img, cur_pc_in_fov
-
-
-# def _transform_pcs_to_images(
-# lidar_pc: npt.NDArray[np.float32],
-# sensor2lidar_rotation: npt.NDArray[np.float32],
-# sensor2lidar_translation: npt.NDArray[np.float32],
-# intrinsic: npt.NDArray[np.float32],
-# img_shape: Optional[Tuple[int, int]] = None,
-# eps: float = 1e-3,
-# ) -> Tuple[npt.NDArray[np.float32], npt.NDArray[np.bool_]]:
-# """
-# Transforms points in camera frame to image pixel coordinates
-# TODO: refactor
-# :param lidar_pc: lidar point cloud
-# :param sensor2lidar_rotation: camera rotation
-# :param sensor2lidar_translation: camera translation
-# :param intrinsic: camera intrinsics
-# :param img_shape: image shape in pixels, defaults to None
-# :param eps: threshold for lidar pc height, defaults to 1e-3
-# :return: lidar pc in pixel coordinates, mask of values in frame
-# """
-# pc_xyz = lidar_pc[LidarIndex.POSITION, :].T
-
-# lidar2cam_r = np.linalg.inv(sensor2lidar_rotation)
-# lidar2cam_t = sensor2lidar_translation @ lidar2cam_r.T
-# lidar2cam_rt = np.eye(4)
-# lidar2cam_rt[:3, :3] = lidar2cam_r.T
-# lidar2cam_rt[3, :3] = -lidar2cam_t
-
-# viewpad = np.eye(4)
-# viewpad[: intrinsic.shape[0], : intrinsic.shape[1]] = intrinsic
-# lidar2img_rt = viewpad @ lidar2cam_rt.T
-
-# cur_pc_xyz = np.concatenate([pc_xyz, np.ones_like(pc_xyz)[:, :1]], -1)
-# cur_pc_cam = lidar2img_rt @ cur_pc_xyz.T
-# cur_pc_cam = cur_pc_cam.T
-# cur_pc_in_fov = cur_pc_cam[:, 2] > eps
-# cur_pc_cam = cur_pc_cam[..., 0:2] / np.maximum(cur_pc_cam[..., 2:3], np.ones_like(cur_pc_cam[..., 2:3]) * eps)
-
-# if img_shape is not None:
-# img_h, img_w = img_shape
-# cur_pc_in_fov = (
-# cur_pc_in_fov
-# & (cur_pc_cam[:, 0] < (img_w - 1))
-# & (cur_pc_cam[:, 0] > 0)
-# & (cur_pc_cam[:, 1] < (img_h - 1))
-# & (cur_pc_cam[:, 1] > 0)
-# )
-# return cur_pc_cam, cur_pc_in_fov
diff --git a/d123/common/visualization/matplotlib/camera.py b/d123/common/visualization/matplotlib/camera.py
index 2b8ecce4..071a5284 100644
--- a/d123/common/visualization/matplotlib/camera.py
+++ b/d123/common/visualization/matplotlib/camera.py
@@ -16,7 +16,7 @@
from d123.common.datatypes.vehicle_state.ego_state import EgoStateSE3
from d123.common.visualization.color.default import BOX_DETECTION_CONFIG
from d123.geometry import BoundingBoxSE3Index, Corners3DIndex
-from d123.geometry.transform.se3 import convert_absolute_to_relative_se3_array
+from d123.geometry.transform.transform_se3 import convert_absolute_to_relative_se3_array
# from navsim.common.dataclasses import Annotations, Camera, Lidar
# from navsim.common.enums import BoundingBoxIndex, LidarIndex
diff --git a/d123/common/visualization/matplotlib/observation.py b/d123/common/visualization/matplotlib/observation.py
index ecddaa02..6814f600 100644
--- a/d123/common/visualization/matplotlib/observation.py
+++ b/d123/common/visualization/matplotlib/observation.py
@@ -27,7 +27,9 @@
from d123.dataset.maps.map_datatypes import MapLayer
from d123.dataset.scene.abstract_scene import AbstractScene
from d123.geometry import BoundingBoxSE2, BoundingBoxSE3, Point2D
-from d123.geometry.transform.tranform_2d import translate_along_yaw
+from d123.geometry.geometry_index import StateSE2Index
+from d123.geometry.transform.transform_se2 import translate_se2_along_yaw
+from d123.geometry.vector import Vector2D
def add_default_map_on_ax(
@@ -156,7 +158,10 @@ def add_bounding_box_to_ax(
)
arrow = np.zeros((2, 2), dtype=np.float64)
arrow[0] = center_se2.point_2d.array
- arrow[1] = translate_along_yaw(center_se2, Point2D(bounding_box.length / 2.0 + 0.5, 0.0)).point_2d.array
+ arrow[1] = translate_se2_along_yaw(
+ center_se2,
+ Vector2D(bounding_box.length / 2.0 + 0.5, 0.0),
+ ).array[StateSE2Index.XY]
ax.plot(
arrow[:, 0],
arrow[:, 1],
diff --git a/d123/common/visualization/viser/utils.py b/d123/common/visualization/viser/utils.py
index afa5ea2e..f72fa666 100644
--- a/d123/common/visualization/viser/utils.py
+++ b/d123/common/visualization/viser/utils.py
@@ -16,7 +16,7 @@
from d123.dataset.maps.abstract_map_objects import AbstractLane, AbstractSurfaceMapObject
from d123.dataset.scene.abstract_scene import AbstractScene
from d123.geometry import BoundingBoxSE3, Point3D, Polyline3D, StateSE3
-from d123.geometry.transform.se3 import convert_relative_to_absolute_points_3d_array
+from d123.geometry.transform.transform_se3 import convert_relative_to_absolute_points_3d_array
# TODO: Refactor this file.
# TODO: Add general utilities for 3D primitives and mesh support.
@@ -231,11 +231,8 @@ def get_camera_values(
camera_to_ego = camera.extrinsic # 4x4 transformation from camera to ego frame
# Get the rotation matrix of the rear axle pose
- from d123.geometry.transform.se3 import get_rotation_matrix
- ego_transform = np.eye(4, dtype=np.float64)
- ego_transform[:3, :3] = get_rotation_matrix(rear_axle)
- ego_transform[:3, 3] = rear_axle.point_3d.array
+ ego_transform = rear_axle.transformation_matrix
camera_transform = ego_transform @ camera_to_ego
diff --git a/d123/common/visualization/viser/utils_v2.py b/d123/common/visualization/viser/utils_v2.py
index 44831f3e..c88faf3e 100644
--- a/d123/common/visualization/viser/utils_v2.py
+++ b/d123/common/visualization/viser/utils_v2.py
@@ -7,7 +7,7 @@
# from d123.common.datatypes.sensor.camera_parameters import get_nuplan_camera_parameters
from d123.geometry import BoundingBoxSE3, Corners3DIndex, Point3D, Point3DIndex, Vector3D
-from d123.geometry.transform.se3 import translate_body_frame
+from d123.geometry.transform.transform_se3 import translate_body_frame
# TODO: Refactor this file.
# TODO: Add general utilities for 3D primitives and mesh support.
diff --git a/d123/dataset/dataset_specific/wopd/wopd_data_converter.py b/d123/dataset/dataset_specific/wopd/wopd_data_converter.py
index a6371421..c1d1698f 100644
--- a/d123/dataset/dataset_specific/wopd/wopd_data_converter.py
+++ b/d123/dataset/dataset_specific/wopd/wopd_data_converter.py
@@ -28,7 +28,7 @@
from d123.dataset.dataset_specific.wopd.wopd_utils import parse_range_image_and_camera_projection
from d123.dataset.logs.log_metadata import LogMetadata
from d123.geometry import BoundingBoxSE3Index, Point3D, StateSE3, Vector3D, Vector3DIndex
-from d123.geometry.transform.se3 import convert_relative_to_absolute_se3_array, get_rotation_matrix
+from d123.geometry.transform.transform_se3 import convert_relative_to_absolute_se3_array, get_rotation_matrix
from d123.geometry.utils.constants import DEFAULT_PITCH, DEFAULT_ROLL
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
diff --git a/d123/geometry/geometry_index.py b/d123/geometry/geometry_index.py
index 3f658838..e5c61154 100644
--- a/d123/geometry/geometry_index.py
+++ b/d123/geometry/geometry_index.py
@@ -70,6 +70,10 @@ class Vector3DIndex(IntEnum):
Y = 1
Z = 2
+ @classproperty
+ def XYZ(cls) -> slice:
+ return slice(cls.X, cls.Z + 1)
+
class EulerAnglesIndex(IntEnum):
"""
@@ -117,6 +121,10 @@ def XYZ(cls) -> slice:
def ROTATION_XYZ(cls) -> slice:
return slice(cls.ROLL, cls.YAW + 1)
+ @classproperty
+ def EULER_ANGLES(cls) -> slice:
+ return slice(cls.ROLL, cls.YAW + 1)
+
class QuaternionSE3Index(IntEnum):
"""
diff --git a/d123/geometry/se.py b/d123/geometry/se.py
index b2d8db2c..5ab13283 100644
--- a/d123/geometry/se.py
+++ b/d123/geometry/se.py
@@ -256,7 +256,7 @@ def rotation_matrix(self) -> npt.NDArray[np.float64]:
:return: A 3x3 numpy array representing the rotation matrix.
"""
- raise NotImplementedError("Rotation matrix conversion not implemented yet.")
+ return EulerAngles.from_array(self.array[StateSE3Index.EULER_ANGLES]).rotation_matrix
@property
def transformation_matrix(self) -> npt.NDArray[np.float64]:
diff --git a/d123/geometry/transform/rotation.py b/d123/geometry/transform/rotation.py
deleted file mode 100644
index a99a2fc5..00000000
--- a/d123/geometry/transform/rotation.py
+++ /dev/null
@@ -1,27 +0,0 @@
-# import numpy as np
-# import numpy.typing as npt
-
-# from d123.geometry.base import Point3DIndex, StateSE3, StateSE3Index
-# from d123.geometry.vector import Vector3D
-
-
-# def get_roll_pitch_yaw_from_rotation_matrix(
-# rotation_matrix: npt.NDArray[np.float64],
-# ) -> Vector3D:
-# """Extract roll, pitch, and yaw angles from a rotation matrix."""
-# assert rotation_matrix.shape == (3, 3), "Rotation matrix must be 3x3."
-
-# sy = np.sqrt(rotation_matrix[0, 0] ** 2 + rotation_matrix[1, 0] ** 2)
-
-# singular = sy < 1e-6
-
-# if not singular:
-# x = np.arctan2(rotation_matrix[2, 1], rotation_matrix[2, 2])
-# y = np.arctan2(-rotation_matrix[2, 0], sy)
-# z = np.arctan2(rotation_matrix[1, 0], rotation_matrix[0, 0])
-# else:
-# x = np.arctan2(-rotation_matrix[1, 2], rotation_matrix[1, 1])
-# y = np.arctan2(-rotation_matrix[2, 0], sy)
-# z = 0.0
-
-# return Vector3D(x=x, y=y, z=z)
diff --git a/d123/geometry/transform/se3.py b/d123/geometry/transform/se3.py
deleted file mode 100644
index 02be57df..00000000
--- a/d123/geometry/transform/se3.py
+++ /dev/null
@@ -1,179 +0,0 @@
-import numpy as np
-import numpy.typing as npt
-
-from d123.geometry import Point3DIndex, StateSE3, StateSE3Index, Vector3D
-
-
-def get_rotation_matrix(state_se3: StateSE3) -> npt.NDArray[np.float64]:
- # Intrinsic Z-Y'-X'' rotation: R = R_x(roll) @ R_y(pitch) @ R_z(yaw)
- R_x = np.array(
- [
- [1, 0, 0],
- [0, np.cos(state_se3.roll), -np.sin(state_se3.roll)],
- [0, np.sin(state_se3.roll), np.cos(state_se3.roll)],
- ],
- dtype=np.float64,
- )
- R_y = np.array(
- [
- [np.cos(state_se3.pitch), 0, np.sin(state_se3.pitch)],
- [0, 1, 0],
- [-np.sin(state_se3.pitch), 0, np.cos(state_se3.pitch)],
- ],
- dtype=np.float64,
- )
- R_z = np.array(
- [
- [np.cos(state_se3.yaw), -np.sin(state_se3.yaw), 0],
- [np.sin(state_se3.yaw), np.cos(state_se3.yaw), 0],
- [0, 0, 1],
- ],
- dtype=np.float64,
- )
- return R_x @ R_y @ R_z
-
-
-def translate_se3_along_z(state_se3: StateSE3, distance: float) -> StateSE3:
-
- R = get_rotation_matrix(state_se3)
- z_axis = R[:, 2]
-
- new_x = state_se3.x + distance * z_axis[0]
- new_y = state_se3.y + distance * z_axis[1]
- new_z = state_se3.z + distance * z_axis[2]
-
- return StateSE3(new_x, new_y, new_z, state_se3.roll, state_se3.pitch, state_se3.yaw)
-
-
-def translate_se3_along_y(state_se3: StateSE3, distance: float) -> StateSE3:
-
- R = get_rotation_matrix(state_se3)
- y_axis = R[:, 1]
-
- new_x = state_se3.x + distance * y_axis[0]
- new_y = state_se3.y + distance * y_axis[1]
- new_z = state_se3.z + distance * y_axis[2]
-
- return StateSE3(new_x, new_y, new_z, state_se3.roll, state_se3.pitch, state_se3.yaw)
-
-
-def translate_se3_along_x(state_se3: StateSE3, distance: float) -> StateSE3:
-
- R = get_rotation_matrix(state_se3)
- x_axis = R[:, 0]
-
- new_x = state_se3.x + distance * x_axis[0]
- new_y = state_se3.y + distance * x_axis[1]
- new_z = state_se3.z + distance * x_axis[2]
-
- return StateSE3(new_x, new_y, new_z, state_se3.roll, state_se3.pitch, state_se3.yaw)
-
-
-def translate_body_frame(state_se3: StateSE3, vector_3d: Vector3D) -> StateSE3:
- R = get_rotation_matrix(state_se3)
-
- body_translation = vector_3d.array
-
- # Transform to world frame
- world_translation = R @ body_translation
-
- return StateSE3(
- state_se3.x + world_translation[0],
- state_se3.y + world_translation[1],
- state_se3.z + world_translation[2],
- state_se3.roll,
- state_se3.pitch,
- state_se3.yaw,
- )
-
-
-def convert_relative_to_absolute_points_3d_array(
- origin: StateSE3, points_3d_array: npt.NDArray[np.float64]
-) -> npt.NDArray[np.float64]:
-
- # TODO: implement function for origin as np.ndarray
-
- R = get_rotation_matrix(origin)
- absolute_points = points_3d_array @ R.T + origin.point_3d.array
- return absolute_points
-
-
-def convert_absolute_to_relative_se3_array(
- origin: StateSE3, se3_array: npt.NDArray[np.float64]
-) -> npt.NDArray[np.float64]:
- assert se3_array.shape[-1] == len(StateSE3Index)
- # TODO: remove transform for-loop, use vectorized operations
-
- # Extract rotation and translation of origin
- R_origin = get_rotation_matrix(origin)
- t_origin = origin.point_3d.array
-
- # Prepare output array
- rel_se3_array = np.empty_like(se3_array)
-
- # For each SE3 in the array
- for i in range(se3_array.shape[0]):
- abs_se3 = se3_array[i]
- abs_pos = abs_se3[StateSE3Index.XYZ]
- abs_rpy = abs_se3[StateSE3Index.ROLL : StateSE3Index.YAW + 1]
-
- # Relative position: rotate and translate
- rel_pos = R_origin.T @ (abs_pos - t_origin)
-
- # Relative orientation: subtract origin's rpy
- rel_rpy = abs_rpy - np.array([origin.roll, origin.pitch, origin.yaw], dtype=np.float64)
-
- rel_se3_array[i, StateSE3Index.X : StateSE3Index.Z + 1] = rel_pos
- rel_se3_array[i, StateSE3Index.ROLL : StateSE3Index.YAW + 1] = rel_rpy
-
- return rel_se3_array
-
-
-def convert_relative_to_absolute_se3_array(
- origin: StateSE3, se3_array: npt.NDArray[np.float64]
-) -> npt.NDArray[np.float64]:
- assert se3_array.shape[-1] == len(StateSE3Index)
- # TODO: remove transform for-loop, use vectorized operations
-
- # Extract rotation and translation of origin
- R_origin = get_rotation_matrix(origin)
- t_origin = origin.point_3d.array
-
- # Prepare output array
- abs_se3_array = np.empty_like(se3_array)
-
- # For each SE3 in the array
- for i in range(se3_array.shape[0]):
- rel_se3 = se3_array[i]
- rel_pos = rel_se3[StateSE3Index.XYZ]
- rel_rpy = rel_se3[StateSE3Index.ROLL : StateSE3Index.YAW + 1]
-
- # Absolute position: rotate and translate
- abs_pos = R_origin @ rel_pos + t_origin
-
- # Absolute orientation: add origin's rpy
- abs_rpy = rel_rpy + np.array([origin.roll, origin.pitch, origin.yaw], dtype=np.float64)
-
- abs_se3_array[i, StateSE3Index.X : StateSE3Index.Z + 1] = abs_pos
- abs_se3_array[i, StateSE3Index.ROLL : StateSE3Index.YAW + 1] = abs_rpy
-
- return abs_se3_array
-
-
-def translate_points_3d_along_z(
- state_se3: StateSE3,
- points_3d: npt.NDArray[np.float64],
- distance: float,
-) -> npt.NDArray[np.float64]:
- assert points_3d.shape[-1] == len(Point3DIndex)
-
- R = get_rotation_matrix(state_se3)
- z_axis = R[:, 2]
-
- translated_points = np.zeros_like(points_3d)
-
- translated_points[..., Point3DIndex.X] = points_3d[..., Point3DIndex.X] + distance * z_axis[0]
- translated_points[..., Point3DIndex.Y] = points_3d[..., Point3DIndex.Y] + distance * z_axis[1]
- translated_points[..., Point3DIndex.Z] = points_3d[..., Point3DIndex.Z] + distance * z_axis[2]
-
- return translated_points
diff --git a/d123/geometry/transform/tranform_2d.py b/d123/geometry/transform/tranform_2d.py
deleted file mode 100644
index b85e598b..00000000
--- a/d123/geometry/transform/tranform_2d.py
+++ /dev/null
@@ -1,22 +0,0 @@
-import numpy as np
-import numpy.typing as npt
-
-from d123.geometry.se import StateSE2
-from d123.geometry.vector import Vector2D
-
-# TODO: Refactor 2D and 3D transform functions in a more consistent and general way.
-
-
-def translate(pose: StateSE2, translation: Vector2D) -> StateSE2:
- return StateSE2(pose.x + translation.x, pose.y + translation.y, pose.yaw)
-
-
-def translate_along_yaw(pose: StateSE2, translation: Vector2D) -> StateSE2:
- half_pi = np.pi / 2.0
- translation: npt.NDArray[np.float64] = np.array(
- [
- (translation.y * np.cos(pose.yaw + half_pi)) + (translation.x * np.cos(pose.yaw)),
- (translation.y * np.sin(pose.yaw + half_pi)) + (translation.x * np.sin(pose.yaw)),
- ]
- )
- return translate(pose, Vector2D.from_array(translation))
diff --git a/d123/geometry/transform/se2_array.py b/d123/geometry/transform/transform_se2.py
similarity index 51%
rename from d123/geometry/transform/se2_array.py
rename to d123/geometry/transform/transform_se2.py
index 97ff8bee..c1f33cd4 100644
--- a/d123/geometry/transform/se2_array.py
+++ b/d123/geometry/transform/transform_se2.py
@@ -3,8 +3,10 @@
import numpy as np
import numpy.typing as npt
+from d123.geometry.geometry_index import Vector2DIndex
from d123.geometry.se import StateSE2, StateSE2Index
from d123.geometry.utils.rotation_utils import normalize_angle
+from d123.geometry.vector import Vector2D
# TODO: Refactor 2D and 3D transform functions in a more consistent and general way.
@@ -12,11 +14,13 @@
def convert_absolute_to_relative_se2_array(
origin: Union[StateSE2, npt.NDArray[np.float64]], state_se2_array: npt.NDArray[np.float64]
) -> npt.NDArray[np.float64]:
- """
- Converts an StateSE2 array from global to relative coordinates.
+ """Converts an StateSE2 array from global to relative coordinates.
+
:param origin: origin pose of relative coords system
- :param state_se2_array: array of SE2 states with (x,y,θ) in last dim
- :return: SE2 coords array in relative coordinates
+ :param state_se2_array: array of SE2 states with (x,y,yaw), indexed by \
+ :class:`~d123.geometry.geometry_index.StateSE2Index`, in last dim
+ :return: SE2 array, index by \
+ :class:`~d123.geometry.geometry_index.StateSE2Index`, in last dim
"""
if isinstance(origin, StateSE2):
origin_array = origin.array
@@ -28,11 +32,11 @@ def convert_absolute_to_relative_se2_array(
rotate_rad = -origin_array[StateSE2Index.YAW]
cos, sin = np.cos(rotate_rad), np.sin(rotate_rad)
- R = np.array([[cos, -sin], [sin, cos]])
+ R_inv = np.array([[cos, -sin], [sin, cos]])
state_se2_rel = state_se2_array - origin_array
- state_se2_rel[..., :2] = state_se2_rel[..., :2] @ R.T
- state_se2_rel[..., 2] = normalize_angle(state_se2_rel[..., 2])
+ state_se2_rel[..., StateSE2Index.XY] = state_se2_rel[..., StateSE2Index.XY] @ R_inv.T
+ state_se2_rel[..., StateSE2Index.YAW] = normalize_angle(state_se2_rel[..., StateSE2Index.YAW])
return state_se2_rel
@@ -40,8 +44,8 @@ def convert_absolute_to_relative_se2_array(
def convert_absolute_to_relative_point_2d_array(
origin: Union[StateSE2, npt.NDArray[np.float64]], point_2d_array: npt.NDArray[np.float64]
) -> npt.NDArray[np.float64]:
- """
- Converts an absolute 2D point array from global to relative coordinates.
+ """Converts an absolute 2D point array from global to relative coordinates.
+
:param origin: origin pose of relative coords system
:param point_2d_array: array of 2D points with (x,y) in last dim
:return: 2D points array in relative coordinates
@@ -86,8 +90,8 @@ def convert_relative_to_absolute_se2_array(
R = np.array([[cos, -sin], [sin, cos]])
state_se2_rel = state_se2_array + origin_array
- state_se2_rel[..., :2] = state_se2_rel[..., :2] @ R.T
- state_se2_rel[..., 2] = normalize_angle(state_se2_rel[..., 2])
+ state_se2_rel[..., StateSE2Index.XY] = state_se2_rel[..., StateSE2Index.XY] @ R.T
+ state_se2_rel[..., StateSE2Index.YAW] = normalize_angle(state_se2_rel[..., StateSE2Index.YAW])
return state_se2_rel
@@ -112,3 +116,68 @@ def convert_relative_to_absolute_point_2d_array(
point_2d_abs = point_2d_abs + origin_array[..., StateSE2Index.XY]
return point_2d_abs
+
+
+def translate_se2(state_se2: StateSE2, translation: Vector2D) -> StateSE2:
+ """Translate a single SE2 state by a 2D vector.
+
+ :param state_se2: SE2 state to translate
+ :param translation: 2D translation vector
+ :return: translated SE2 state
+ """
+ translated_xy = state_se2.array[StateSE2Index.XY] + translation.array[Vector2DIndex.XY]
+ return StateSE2(translated_xy[0], translated_xy[1], state_se2.array[StateSE2Index.YAW])
+
+
+def translate_se2_array(state_se2_array: npt.NDArray[np.float64], translation: Vector2D) -> npt.NDArray[np.float64]:
+ """Translate an array of SE2 states by a 2D vector.
+
+ :param state_se2_array: array of SE2 states, indexed by \
+ :class:`~d123.geometry.geometry_index.StateSE2Index`, in last dim
+ :param translation: 2D translation vector
+ :return: translated SE2 array
+ """
+ result = state_se2_array.copy()
+ result[..., StateSE2Index.XY] += translation.array[Vector2DIndex.XY]
+ return result
+
+
+def translate_se2_along_yaw(state_se2: StateSE2, translation: Vector2D) -> StateSE2:
+ """Translate a single SE2 state along its local coordinate frame.
+
+ :param state_se2: SE2 state to translate
+ :param translation: 2D translation in local frame (x: forward, y: left)
+ :return: translated SE2 state
+ """
+ yaw = state_se2.array[StateSE2Index.YAW]
+ cos_yaw, sin_yaw = np.cos(yaw), np.sin(yaw)
+
+ # Transform translation from local to global frame
+ global_translation = np.array(
+ [translation.x * cos_yaw - translation.y * sin_yaw, translation.x * sin_yaw + translation.y * cos_yaw]
+ )
+
+ return translate_se2(state_se2, Vector2D.from_array(global_translation))
+
+
+def translate_se2_array_along_yaw(
+ state_se2_array: npt.NDArray[np.float64], translation: Vector2D
+) -> npt.NDArray[np.float64]:
+ """Translate an array of SE2 states along their respective local coordinate frames.
+
+ :param state_se2_array: array of SE2 states with (x,y,yaw) in last dim
+ :param translation: 2D translation in local frame (x: forward, y: left)
+ :return: translated SE2 array
+ """
+ result = state_se2_array.copy()
+ yaws = state_se2_array[..., StateSE2Index.YAW]
+ cos_yaws, sin_yaws = np.cos(yaws), np.sin(yaws)
+
+ # Transform translation from local to global frame for each state
+ global_translation_x = translation.x * cos_yaws - translation.y * sin_yaws
+ global_translation_y = translation.x * sin_yaws + translation.y * cos_yaws
+
+ result[..., StateSE2Index.X] += global_translation_x
+ result[..., StateSE2Index.Y] += global_translation_y
+
+ return result
diff --git a/d123/geometry/transform/transform_se3.py b/d123/geometry/transform/transform_se3.py
new file mode 100644
index 00000000..67edd050
--- /dev/null
+++ b/d123/geometry/transform/transform_se3.py
@@ -0,0 +1,177 @@
+from typing import Union
+
+import numpy as np
+import numpy.typing as npt
+
+from d123.geometry import StateSE3, StateSE3Index, Vector3D
+from d123.geometry.geometry_index import Vector3DIndex
+from d123.geometry.rotation import EulerAngles
+from d123.geometry.utils.rotation_utils import (
+ get_rotation_matrices_from_euler_array,
+ get_rotation_matrix_from_euler_array,
+ normalize_angle,
+)
+
+
+def translate_se3_along_z(state_se3: StateSE3, distance: float) -> StateSE3:
+
+ R = state_se3.rotation_matrix
+ z_axis = R[:, 2]
+
+ state_se3_array = state_se3.array.copy()
+ state_se3_array[StateSE3Index.X] += distance * z_axis[0]
+ state_se3_array[StateSE3Index.Y] += distance * z_axis[1]
+ state_se3_array[StateSE3Index.Z] += distance * z_axis[2]
+ return StateSE3.from_array(state_se3_array)
+
+
+def translate_se3_along_y(state_se3: StateSE3, distance: float) -> StateSE3:
+
+ R = state_se3.rotation_matrix
+ y_axis = R[:, 1]
+
+ state_se3_array = state_se3.array.copy()
+ state_se3_array[StateSE3Index.X] += distance * y_axis[0]
+ state_se3_array[StateSE3Index.Y] += distance * y_axis[1]
+ state_se3_array[StateSE3Index.Z] += distance * y_axis[2]
+ return StateSE3.from_array(state_se3_array)
+
+
+def translate_se3_along_x(state_se3: StateSE3, distance: float) -> StateSE3:
+
+ R = state_se3.rotation_matrix
+ x_axis = R[:, 0]
+
+ state_se3_array = state_se3.array.copy()
+ state_se3_array[StateSE3Index.X] += distance * x_axis[0]
+ state_se3_array[StateSE3Index.Y] += distance * x_axis[1]
+ state_se3_array[StateSE3Index.Z] += distance * x_axis[2]
+
+ return StateSE3.from_array(state_se3_array)
+
+
+def translate_body_frame(state_se3: StateSE3, vector_3d: Vector3D) -> StateSE3:
+ R = state_se3.rotation_matrix
+
+ # Transform to world frame
+ world_translation = R @ vector_3d.array
+
+ state_se3_array = state_se3.array.copy()
+ state_se3_array[StateSE3Index.XYZ] += world_translation[Vector3DIndex.XYZ]
+
+ return StateSE3.from_array(state_se3_array)
+
+
+def convert_relative_to_absolute_points_3d_array(
+ origin: Union[StateSE3, npt.NDArray[np.float64]], points_3d_array: npt.NDArray[np.float64]
+) -> npt.NDArray[np.float64]:
+
+ # TODO: implement function for origin as np.ndarray
+ if isinstance(origin, StateSE3):
+ origin_array = origin.array
+ elif isinstance(origin, np.ndarray):
+ assert origin.ndim == 1 and origin.shape[-1] == len(StateSE3Index)
+ origin_array = origin
+ else:
+ raise TypeError(f"Expected StateSE3 or np.ndarray, got {type(origin)}")
+
+ R = EulerAngles.from_array(origin_array[StateSE3Index.EULER_ANGLES]).rotation_matrix
+ absolute_points = points_3d_array @ R.T + origin.point_3d.array
+ return absolute_points
+
+
+def convert_absolute_to_relative_se3_array(
+ origin: Union[StateSE3, npt.NDArray[np.float64]], se3_array: npt.NDArray[np.float64]
+) -> npt.NDArray[np.float64]:
+ if isinstance(origin, StateSE3):
+ origin_array = origin.array
+ t_origin = origin.point_3d.array
+ R_origin = origin.rotation_matrix
+ elif isinstance(origin, np.ndarray):
+ assert origin.ndim == 1 and origin.shape[-1] == len(StateSE3Index)
+ origin_array = origin
+ t_origin = origin_array[StateSE3Index.XYZ]
+ R_origin = get_rotation_matrix_from_euler_array(origin_array[StateSE3Index.EULER_ANGLES])
+ else:
+ raise TypeError(f"Expected StateSE3 or np.ndarray, got {type(origin)}")
+
+ assert se3_array.shape[-1] == len(StateSE3Index)
+
+ # Extract positions and orientations from se3_array
+ abs_positions = se3_array[..., StateSE3Index.XYZ]
+ abs_euler_angles = se3_array[..., StateSE3Index.EULER_ANGLES]
+
+ # Vectorized relative position calculation
+ rel_positions = (abs_positions - t_origin) @ R_origin
+
+ # Get rotation matrices for all absolute orientations
+ R_abs = get_rotation_matrices_from_euler_array(abs_euler_angles)
+
+ # Compute relative rotations: R_rel = R_origin^T @ R_abs
+ np.transpose(R_origin) @ R_abs
+
+ # Convert back to Euler angles (this may need a custom function)
+ # For now, using simple subtraction as approximation (this is incorrect for general rotations)
+ origin_euler = origin_array[StateSE3Index.EULER_ANGLES]
+ rel_euler_angles = abs_euler_angles - origin_euler
+
+ # Prepare output array
+ rel_se3_array = se3_array.copy()
+ rel_se3_array[..., StateSE3Index.XYZ] = rel_positions
+ rel_se3_array[..., StateSE3Index.EULER_ANGLES] = normalize_angle(rel_euler_angles)
+
+ return rel_se3_array
+
+
+def convert_relative_to_absolute_se3_array(
+ origin: StateSE3, se3_array: npt.NDArray[np.float64]
+) -> npt.NDArray[np.float64]:
+ if isinstance(origin, StateSE3):
+ origin_array = origin.array
+ t_origin = origin.point_3d.array
+ R_origin = origin.rotation_matrix
+ elif isinstance(origin, np.ndarray):
+ assert origin.ndim == 1 and origin.shape[-1] == len(StateSE3Index)
+ origin_array = origin
+ t_origin = origin_array[StateSE3Index.XYZ]
+ R_origin = get_rotation_matrix_from_euler_array(origin_array[StateSE3Index.EULER_ANGLES])
+ else:
+ raise TypeError(f"Expected StateSE3 or np.ndarray, got {type(origin)}")
+ assert se3_array.shape[-1] == len(StateSE3Index)
+
+ # Extract relative positions and orientations
+ rel_positions = se3_array[..., StateSE3Index.XYZ]
+ rel_euler_angles = se3_array[..., StateSE3Index.EULER_ANGLES]
+
+ # Vectorized absolute position calculation: rotate and translate
+ abs_positions = (R_origin @ rel_positions.T).T + t_origin
+
+ # Vectorized absolute orientation: add origin's euler angles
+ origin_euler = np.array([origin.roll, origin.pitch, origin.yaw], dtype=np.float64)
+ abs_euler_angles = rel_euler_angles + origin_euler
+
+ # Prepare output array
+ abs_se3_array = se3_array.copy()
+ abs_se3_array[..., StateSE3Index.XYZ] = abs_positions
+ abs_se3_array[..., StateSE3Index.EULER_ANGLES] = normalize_angle(abs_euler_angles)
+
+ return abs_se3_array
+
+
+def convert_absolute_to_relative_points_3d_array(
+ origin: Union[StateSE3, npt.NDArray[np.float64]], points_3d_array: npt.NDArray[np.float64]
+) -> npt.NDArray[np.float64]:
+
+ if isinstance(origin, StateSE3):
+ t_origin = origin.point_3d.array
+ R_origin = origin.rotation_matrix
+ elif isinstance(origin, np.ndarray):
+ assert origin.ndim == 1 and origin.shape[-1] == len(StateSE3Index)
+ t_origin = origin[StateSE3Index.XYZ]
+ R_origin = get_rotation_matrix_from_euler_array(origin[StateSE3Index.EULER_ANGLES])
+ else:
+ raise TypeError(f"Expected StateSE3 or np.ndarray, got {type(origin)}")
+
+ # Translate points to origin frame, then rotate to body frame
+ relative_points = (points_3d_array - t_origin) @ R_origin
+ return relative_points
diff --git a/d123/simulation/agents/smart_agents.py b/d123/simulation/agents/smart_agents.py
index 3af49624..9d2e2140 100644
--- a/d123/simulation/agents/smart_agents.py
+++ b/d123/simulation/agents/smart_agents.py
@@ -11,7 +11,7 @@
from d123.dataset.scene.abstract_scene import AbstractScene
from d123.geometry.bounding_box import BoundingBoxSE2
from d123.geometry.se import StateSE2
-from d123.geometry.transform.se2_array import convert_relative_to_absolute_point_2d_array
+from d123.geometry.transform.transform_se2 import convert_relative_to_absolute_point_2d_array
from d123.geometry.utils.rotation_utils import normalize_angle
from d123.simulation.agents.abstract_agents import AbstractAgents
from d123.training.feature_builder.smart_feature_builder import SMARTFeatureBuilder
diff --git a/d123/simulation/gym/environment/gym_observation/raster/raster_renderer.py b/d123/simulation/gym/environment/gym_observation/raster/raster_renderer.py
index ae8c1136..52da337c 100644
--- a/d123/simulation/gym/environment/gym_observation/raster/raster_renderer.py
+++ b/d123/simulation/gym/environment/gym_observation/raster/raster_renderer.py
@@ -13,8 +13,8 @@
from d123.common.datatypes.detection.detection import BoxDetectionSE2, TrafficLightStatus
from d123.geometry.se import StateSE2
-from d123.geometry.transform.se2_array import convert_absolute_to_relative_point_2d_array
from d123.geometry.transform.tranform_2d import translate_along_yaw
+from d123.geometry.transform.transform_se2 import convert_absolute_to_relative_point_2d_array
from d123.geometry.vector import Vector2D
from d123.simulation.gym.environment.helper.environment_area import AbstractEnvironmentArea, RectangleEnvironmentArea
from d123.simulation.gym.environment.helper.environment_cache import BoxDetectionCache, MapCache
diff --git a/d123/training/feature_builder/smart_feature_builder.py b/d123/training/feature_builder/smart_feature_builder.py
index 4286b990..9ea931bf 100644
--- a/d123/training/feature_builder/smart_feature_builder.py
+++ b/d123/training/feature_builder/smart_feature_builder.py
@@ -18,7 +18,7 @@
from d123.dataset.scene.abstract_scene import AbstractScene
from d123.geometry import BoundingBoxSE2, PolylineSE2, StateSE2
from d123.geometry.geometry_index import StateSE2Index
-from d123.geometry.transform.se2_array import convert_absolute_to_relative_se2_array
+from d123.geometry.transform.transform_se2 import convert_absolute_to_relative_se2_array
# TODO: Hind feature builder behind abstraction.
From ca194846608358df6de3e09d4bf5110527392c68 Mon Sep 17 00:00:00 2001
From: Daniel Dauner
Date: Mon, 25 Aug 2025 19:42:00 +0200
Subject: [PATCH 019/145] Fix wrong transformation matrix in SE3.
---
d123/common/visualization/viser/utils.py | 20 --------------------
d123/geometry/se.py | 2 +-
2 files changed, 1 insertion(+), 21 deletions(-)
diff --git a/d123/common/visualization/viser/utils.py b/d123/common/visualization/viser/utils.py
index f72fa666..ad49b88a 100644
--- a/d123/common/visualization/viser/utils.py
+++ b/d123/common/visualization/viser/utils.py
@@ -243,26 +243,6 @@ def get_camera_values(
return camera_position, camera_rotation, camera
-def _get_ego_frame_pose(scene: AbstractScene, iteration: int) -> StateSE3:
-
- initial_point_3d = scene.get_ego_state_at_iteration(0).center_se3.point_3d
- state_se3 = scene.get_ego_state_at_iteration(iteration).center_se3
-
- state_se3.x = state_se3.x - initial_point_3d.x
- state_se3.y = state_se3.y - initial_point_3d.y
- state_se3.z = state_se3.z - initial_point_3d.z
-
- return state_se3
-
-
-def euler_to_quaternion_scipy(roll: float, pitch: float, yaw: float) -> npt.NDArray[np.float64]:
- from scipy.spatial.transform import Rotation
-
- r = Rotation.from_euler("xyz", [roll, pitch, yaw], degrees=False)
- quat = r.as_quat(scalar_first=True)
- return quat
-
-
def get_lidar_points(
scene: AbstractScene, iteration: int, lidar_types: List[LiDARType]
) -> Tuple[npt.NDArray[np.float32], npt.NDArray[np.float32]]:
diff --git a/d123/geometry/se.py b/d123/geometry/se.py
index 5ab13283..378128ca 100644
--- a/d123/geometry/se.py
+++ b/d123/geometry/se.py
@@ -267,7 +267,7 @@ def transformation_matrix(self) -> npt.NDArray[np.float64]:
rotation_matrix = self.rotation_matrix
transformation_matrix = np.eye(4, dtype=np.float64)
transformation_matrix[:3, :3] = rotation_matrix
- transformation_matrix[3, :3] = self.array[StateSE3Index.XYZ]
+ transformation_matrix[:3, 3] = self.array[StateSE3Index.XYZ]
return transformation_matrix
@property
From 89565afcdb8f690d5790945fc1de6438d229908e Mon Sep 17 00:00:00 2001
From: Daniel Dauner
Date: Mon, 25 Aug 2025 20:48:37 +0200
Subject: [PATCH 020/145] Fix some stuff in the WOPD data converter. NOTE: It's
significantly faster to flatten lists in arrow.
---
d123/common/visualization/viser/server.py | 2 +-
d123/dataset/arrow/conversion.py | 6 ++-
.../waymo_map_utils/womp_boundary_utils.py | 8 ++--
.../wopd/wopd_data_converter.py | 37 ++++++++-----------
.../default_dataset_conversion.yaml | 4 +-
5 files changed, 27 insertions(+), 30 deletions(-)
diff --git a/d123/common/visualization/viser/server.py b/d123/common/visualization/viser/server.py
index 087a7e8b..686d3746 100644
--- a/d123/common/visualization/viser/server.py
+++ b/d123/common/visualization/viser/server.py
@@ -49,7 +49,7 @@
CAMERA_SCALE: float = 1.0
# Lidar config:
-LIDAR_AVAILABLE: bool = False
+LIDAR_AVAILABLE: bool = True
LIDAR_TYPES: List[LiDARType] = [
LiDARType.LIDAR_MERGED,
diff --git a/d123/dataset/arrow/conversion.py b/d123/dataset/arrow/conversion.py
index ab078545..c961ab7d 100644
--- a/d123/dataset/arrow/conversion.py
+++ b/d123/dataset/arrow/conversion.py
@@ -157,10 +157,12 @@ def get_lidar_from_arrow_table(
raise NotImplementedError(f"Loading LiDAR data for dataset {log_metadata.dataset} is not implemented.")
else:
+ # FIXME: This is a temporary fix for WOPD dataset. The lidar data is stored as a flattened array of float32.
+ # Ideally the lidar index should handle the dimension. But for now we hardcode it here.
+ lidar_data = np.array(lidar_data, dtype=np.float32).reshape(-1, 3)
+ lidar_data = np.concatenate([np.zeros_like(lidar_data), lidar_data], axis=-1)
if log_metadata.dataset == "wopd":
- lidar_data = np.array(lidar_data, dtype=np.float64)
lidar = LiDAR(metadata=lidar_metadata, point_cloud=lidar_data.T)
else:
raise NotImplementedError("Only string file paths for lidar data are supported.")
-
return lidar
diff --git a/d123/dataset/dataset_specific/wopd/waymo_map_utils/womp_boundary_utils.py b/d123/dataset/dataset_specific/wopd/waymo_map_utils/womp_boundary_utils.py
index 269b09b6..99e63192 100644
--- a/d123/dataset/dataset_specific/wopd/waymo_map_utils/womp_boundary_utils.py
+++ b/d123/dataset/dataset_specific/wopd/waymo_map_utils/womp_boundary_utils.py
@@ -6,7 +6,7 @@
import shapely.geometry as geom
from d123.geometry import OccupancyMap2D, Point3D, Polyline3D, PolylineSE2, StateSE2, Vector2D
-from d123.geometry.transform.tranform_2d import translate_along_yaw
+from d123.geometry.transform.transform_se2 import translate_se2_along_yaw
from d123.geometry.utils.rotation_utils import normalize_angle
MAX_LANE_WIDTH = 25.0 # meters
@@ -64,7 +64,7 @@ def _collect_perpendicular_hits(
assert sign in [1.0, -1.0], "Sign must be either 1.0 (left) or -1.0 (right)"
# perp_start_point = translate_along_yaw(lane_query_se2, Vector2D(0.0, sign * PERP_START_OFFSET))
perp_start_point = lane_query_se2
- perp_end_point = translate_along_yaw(lane_query_se2, Vector2D(0.0, sign * MAX_LANE_WIDTH / 2.0))
+ perp_end_point = translate_se2_along_yaw(lane_query_se2, Vector2D(0.0, sign * MAX_LANE_WIDTH / 2.0))
perp_linestring = geom.LineString([[perp_start_point.x, perp_start_point.y], [perp_end_point.x, perp_end_point.y]])
lane_linestring = occupancy_2d.geometries[occupancy_2d.id_to_idx[lane_token]]
@@ -261,7 +261,9 @@ def _get_default_boundary_point_3d(
lane_query_se2: StateSE2, lane_query_3d: Point3D, sign: float
) -> Point3D:
perp_boundary_distance = DEFAULT_LANE_WIDTH / 2.0
- boundary_point_se2 = translate_along_yaw(lane_query_se2, Vector2D(0.0, sign * perp_boundary_distance))
+ boundary_point_se2 = translate_se2_along_yaw(
+ lane_query_se2, Vector2D(0.0, sign * perp_boundary_distance)
+ )
return Point3D(boundary_point_se2.x, boundary_point_se2.y, lane_query_3d.z)
if no_boundary_ratio > 0.8:
diff --git a/d123/dataset/dataset_specific/wopd/wopd_data_converter.py b/d123/dataset/dataset_specific/wopd/wopd_data_converter.py
index c1d1698f..812d40c6 100644
--- a/d123/dataset/dataset_specific/wopd/wopd_data_converter.py
+++ b/d123/dataset/dataset_specific/wopd/wopd_data_converter.py
@@ -14,7 +14,6 @@
from pyquaternion import Quaternion
from waymo_open_dataset import dataset_pb2
-from d123.common.datatypes.detection.detection import TrafficLightStatus
from d123.common.datatypes.detection.detection_types import DetectionType
from d123.common.datatypes.sensor.camera import CameraMetadata, CameraType, camera_metadata_dict_to_json
from d123.common.datatypes.sensor.lidar import LiDARMetadata, LiDARType, lidar_metadata_dict_to_json
@@ -28,20 +27,18 @@
from d123.dataset.dataset_specific.wopd.wopd_utils import parse_range_image_and_camera_projection
from d123.dataset.logs.log_metadata import LogMetadata
from d123.geometry import BoundingBoxSE3Index, Point3D, StateSE3, Vector3D, Vector3DIndex
-from d123.geometry.transform.transform_se3 import convert_relative_to_absolute_se3_array, get_rotation_matrix
+from d123.geometry.transform.transform_se3 import convert_relative_to_absolute_se3_array
from d123.geometry.utils.constants import DEFAULT_PITCH, DEFAULT_ROLL
+# TODO: Make keep_polar_features an optional argument.
+# With polar features, the lidar loading time is SIGNIFICANTLY higher.
+
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
D123_MAPS_ROOT = Path(os.environ.get("D123_MAPS_ROOT"))
TARGET_DT: Final[float] = 0.1
SORT_BY_TIMESTAMP: Final[bool] = False
-NUPLAN_TRAFFIC_STATUS_DICT: Final[Dict[str, TrafficLightStatus]] = {
- "green": TrafficLightStatus.GREEN,
- "red": TrafficLightStatus.RED,
- "unknown": TrafficLightStatus.UNKNOWN,
-}
# https://github.com/waymo-research/waymo-open-dataset/blob/master/src/waymo_open_dataset/label.proto#L63
WOPD_DETECTION_NAME_DICT: Dict[int, DetectionType] = {
@@ -248,9 +245,7 @@ def convert_wopd_tfrecord_log_to_arrow(
if data_converter_config.lidar_store_option == "path":
raise NotImplementedError("Filepath lidar storage is not implemented.")
elif data_converter_config.lidar_store_option == "binary":
- schema_column_list.append(
- (lidar_type.serialize(), pa.list_(pa.list_(pa.float32(), len(WopdLidarIndex))))
- )
+ schema_column_list.append((lidar_type.serialize(), (pa.list_(pa.float32()))))
recording_schema = pa.schema(schema_column_list)
recording_schema = recording_schema.with_metadata(
@@ -491,16 +486,14 @@ def _extract_camera(
transform = np.array(calibration.extrinsic.transform).reshape(4, 4)
# FIXME: This is an ugly hack to convert to uniform camera convention.
- flip_camera = get_rotation_matrix(
- StateSE3(
- x=0.0,
- y=0.0,
- z=0.0,
- roll=np.deg2rad(0.0),
- pitch=np.deg2rad(90.0),
- yaw=np.deg2rad(-90.0),
- )
- )
+ flip_camera = StateSE3(
+ x=0.0,
+ y=0.0,
+ z=0.0,
+ roll=np.deg2rad(0.0),
+ pitch=np.deg2rad(90.0),
+ yaw=np.deg2rad(-90.0),
+ ).rotation_matrix
transform[:3, :3] = transform[:3, :3] @ flip_camera
context_extrinsic[camera_type] = transform
@@ -533,12 +526,12 @@ def _extract_lidar(
range_images=range_images,
camera_projections=camera_projections,
range_image_top_pose=range_image_top_pose,
- keep_polar_features=True,
+ keep_polar_features=False,
)
lidar_data: Dict[LiDARType, npt.NDArray[np.float32]] = {}
for lidar_idx, frame_lidar in enumerate(frame.lasers):
lidar_type = WOPD_LIDAR_TYPES[frame_lidar.name]
- lidar_data[lidar_type] = np.array(points[lidar_idx], dtype=np.float32)
+ lidar_data[lidar_type] = np.array(points[lidar_idx], dtype=np.float32).flatten()
return lidar_data
diff --git a/d123/script/config/dataset_conversion/default_dataset_conversion.yaml b/d123/script/config/dataset_conversion/default_dataset_conversion.yaml
index 0a4544da..b46e7c9d 100644
--- a/d123/script/config/dataset_conversion/default_dataset_conversion.yaml
+++ b/d123/script/config/dataset_conversion/default_dataset_conversion.yaml
@@ -17,8 +17,8 @@ defaults:
- datasets:
# - nuplan_private_dataset
# - carla_dataset
- # - wopd_dataset
- - av2_sensor_dataset
+ - wopd_dataset
+ # - av2_sensor_dataset
force_log_conversion: True
force_map_conversion: True
From 9b2899578ac3f49753b413fbb934aed9d922a438 Mon Sep 17 00:00:00 2001
From: Daniel Dauner
Date: Mon, 25 Aug 2025 20:59:20 +0200
Subject: [PATCH 021/145] Update `contains_vectorized` from OccupancyMap2D to
avoid deprecated function.
---
d123/geometry/occupancy_map.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/d123/geometry/occupancy_map.py b/d123/geometry/occupancy_map.py
index c9886a14..b28348e7 100644
--- a/d123/geometry/occupancy_map.py
+++ b/d123/geometry/occupancy_map.py
@@ -152,6 +152,6 @@ def contains_vectorized(self, points: npt.NDArray[np.float64]) -> npt.NDArray[np
"""
output = np.zeros((len(self._geometries), len(points)), dtype=bool)
for i, geometry in enumerate(self._geometries):
- output[i] = shapely.vectorized.contains(geometry, points[..., Point2DIndex.X], points[..., Point2DIndex.Y])
+ output[i] = shapely.contains_xy(geometry, points[..., Point2DIndex.X], points[..., Point2DIndex.Y])
return output
From 5c1365c1bdbcb2a49ae603e259155873b532ecf8 Mon Sep 17 00:00:00 2001
From: Daniel Dauner
Date: Mon, 25 Aug 2025 21:23:21 +0200
Subject: [PATCH 022/145] Add doctrings and (faulty) tests for transform
functions (#44)
---
d123/geometry/test/test_transform.py | 239 +++++++++++++++++++++++
d123/geometry/transform/transform_se3.py | 98 ++++++++--
2 files changed, 318 insertions(+), 19 deletions(-)
create mode 100644 d123/geometry/test/test_transform.py
diff --git a/d123/geometry/test/test_transform.py b/d123/geometry/test/test_transform.py
new file mode 100644
index 00000000..7377bc41
--- /dev/null
+++ b/d123/geometry/test/test_transform.py
@@ -0,0 +1,239 @@
+# import unittest
+# import numpy as np
+# import numpy.typing as npt
+
+# from d123.geometry.se import StateSE2, StateSE3
+# from d123.geometry.transform.transform_se2 import (
+# convert_absolute_to_relative_se2_array,
+# convert_absolute_to_relative_point_2d_array,
+# convert_relative_to_absolute_se2_array,
+# convert_relative_to_absolute_point_2d_array,
+# translate_se2,
+# translate_se2_array,
+# translate_se2_along_yaw,
+# translate_se2_array_along_yaw,
+# )
+# from d123.geometry.transform.transform_se3 import (
+# translate_se3_along_z,
+# translate_se3_along_y,
+# translate_se3_along_x,
+# translate_body_frame,
+# convert_absolute_to_relative_se3_array,
+# convert_relative_to_absolute_se3_array,
+# convert_absolute_to_relative_points_3d_array,
+# convert_relative_to_absolute_points_3d_array,
+# )
+# from d123.geometry.vector import Vector2D
+
+
+# class TestTransformSE2(unittest.TestCase):
+# def test_translate_se2(self) -> None:
+# pose: StateSE2 = StateSE2.from_array(np.array([1.0, 2.0, 0.0], dtype=float))
+# translation: Vector2D = Vector2D(1.0, 1.0)
+# result: StateSE2 = translate_se2(pose, translation)
+# expected: StateSE2 = StateSE2.from_array(np.array([2.0, 3.0, 0.0], dtype=float))
+# np.testing.assert_array_almost_equal(result.array, expected.array)
+
+# def test_translate_se2_array(self) -> None:
+# poses: npt.NDArray[np.float64] = np.array(
+# [[1.0, 2.0, 0.0], [0.0, 0.0, np.pi / 2]], dtype=float
+# )
+# translation: Vector2D = Vector2D(1.0, 1.0)
+# result: npt.NDArray[np.float64] = translate_se2_array(poses, translation)
+# expected: npt.NDArray[np.float64] = np.array(
+# [[2.0, 3.0, 0.0], [1.0, 1.0, np.pi / 2]], dtype=float
+# )
+# np.testing.assert_array_almost_equal(result, expected)
+
+# def test_translate_se2_along_yaw(self) -> None:
+# # Move 1 unit forward in the direction of yaw (pi/2 = 90 degrees = +Y direction)
+# pose: npt.NDArray[np.float64] = np.array([0.0, 0.0, np.pi / 2], dtype=float)
+# distance: float = 1.0
+# result: npt.NDArray[np.float64] = translate_se2_along_yaw(pose, distance)
+# expected: npt.NDArray[np.float64] = np.array([0.0, 1.0, np.pi / 2], dtype=float)
+# np.testing.assert_array_almost_equal(result, expected)
+
+# def test_translate_se2_array_along_yaw(self) -> None:
+# poses: npt.NDArray[np.float64] = np.array(
+# [[0.0, 0.0, 0.0], [0.0, 0.0, np.pi / 2]], dtype=float
+# )
+# distance: float = 1.0
+# result: npt.NDArray[np.float64] = translate_se2_array_along_yaw(poses, distance)
+# expected: npt.NDArray[np.float64] = np.array(
+# [[1.0, 0.0, 0.0], [0.0, 1.0, np.pi / 2]], dtype=float
+# )
+# np.testing.assert_array_almost_equal(result, expected)
+
+# def test_convert_absolute_to_relative_se2_array(self) -> None:
+# reference: npt.NDArray[np.float64] = np.array([1.0, 1.0, 0.0], dtype=float)
+# absolute_poses: npt.NDArray[np.float64] = np.array(
+# [[2.0, 2.0, 0.0], [0.0, 1.0, np.pi / 2]], dtype=float
+# )
+# result: npt.NDArray[np.float64] = convert_absolute_to_relative_se2_array(
+# reference, absolute_poses
+# )
+# expected: npt.NDArray[np.float64] = np.array(
+# [[1.0, 1.0, 0.0], [-1.0, 0.0, np.pi / 2]], dtype=float
+# )
+# np.testing.assert_array_almost_equal(result, expected)
+
+# def test_convert_relative_to_absolute_se2_array(self) -> None:
+# reference: npt.NDArray[np.float64] = np.array([1.0, 1.0, 0.0], dtype=float)
+# relative_poses: npt.NDArray[np.float64] = np.array(
+# [[1.0, 1.0, 0.0], [-1.0, 0.0, np.pi / 2]], dtype=float
+# )
+# result: npt.NDArray[np.float64] = convert_relative_to_absolute_se2_array(
+# reference, relative_poses
+# )
+# expected: npt.NDArray[np.float64] = np.array(
+# [[2.0, 2.0, 0.0], [0.0, 1.0, np.pi / 2]], dtype=float
+# )
+# np.testing.assert_array_almost_equal(result, expected)
+
+# def test_convert_absolute_to_relative_point_2d_array(self) -> None:
+# reference: npt.NDArray[np.float64] = np.array([1.0, 1.0, 0.0], dtype=float)
+# absolute_points: npt.NDArray[np.float64] = np.array(
+# [[2.0, 2.0], [0.0, 1.0]], dtype=float
+# )
+# result: npt.NDArray[np.float64] = convert_absolute_to_relative_point_2d_array(
+# reference, absolute_points
+# )
+# expected: npt.NDArray[np.float64] = np.array([[1.0, 1.0], [-1.0, 0.0]], dtype=float)
+# np.testing.assert_array_almost_equal(result, expected)
+
+# def test_convert_relative_to_absolute_point_2d_array(self) -> None:
+# reference: npt.NDArray[np.float64] = np.array([1.0, 1.0, 0.0], dtype=float)
+# relative_points: npt.NDArray[np.float64] = np.array(
+# [[1.0, 1.0], [-1.0, 0.0]], dtype=float
+# )
+# result: npt.NDArray[np.float64] = convert_relative_to_absolute_point_2d_array(
+# reference, relative_points
+# )
+# expected: npt.NDArray[np.float64] = np.array([[2.0, 2.0], [0.0, 1.0]], dtype=float)
+# np.testing.assert_array_almost_equal(result, expected)
+
+
+# class TestTransformSE3(unittest.TestCase):
+# def test_translate_se3_along_x(self) -> None:
+# pose: npt.NDArray[np.float64] = np.array(
+# [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0], dtype=float
+# )
+# distance: float = 1.0
+# result: npt.NDArray[np.float64] = translate_se3_along_x(pose, distance)
+# expected: npt.NDArray[np.float64] = np.array(
+# [1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0], dtype=float
+# )
+# np.testing.assert_array_almost_equal(result, expected)
+
+# def test_translate_se3_along_y(self) -> None:
+# pose: npt.NDArray[np.float64] = np.array(
+# [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0], dtype=float
+# )
+# distance: float = 1.0
+# result: npt.NDArray[np.float64] = translate_se3_along_y(pose, distance)
+# expected: npt.NDArray[np.float64] = np.array(
+# [0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0], dtype=float
+# )
+# np.testing.assert_array_almost_equal(result, expected)
+
+# def test_translate_se3_along_z(self) -> None:
+# pose: npt.NDArray[np.float64] = np.array(
+# [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0], dtype=float
+# )
+# distance: float = 1.0
+# result: npt.NDArray[np.float64] = translate_se3_along_z(pose, distance)
+# expected: npt.NDArray[np.float64] = np.array(
+# [0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0], dtype=float
+# )
+# np.testing.assert_array_almost_equal(result, expected)
+
+# def test_translate_body_frame(self) -> None:
+# pose: npt.NDArray[np.float64] = np.array(
+# [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0], dtype=float
+# )
+# translation: npt.NDArray[np.float64] = np.array([1.0, 0.0, 0.0], dtype=float)
+# result: npt.NDArray[np.float64] = translate_body_frame(pose, translation)
+# expected: npt.NDArray[np.float64] = np.array(
+# [1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0], dtype=float
+# )
+# np.testing.assert_array_almost_equal(result, expected)
+
+# def test_convert_absolute_to_relative_se3_array(self) -> None:
+# reference: npt.NDArray[np.float64] = np.array(
+# [1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0], dtype=float
+# )
+# absolute_poses: npt.NDArray[np.float64] = np.array(
+# [
+# [2.0, 2.0, 2.0, 0.0, 0.0, 0.0, 1.0],
+# [0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0],
+# ],
+# dtype=float,
+# )
+# result: npt.NDArray[np.float64] = convert_absolute_to_relative_se3_array(
+# reference, absolute_poses
+# )
+# expected: npt.NDArray[np.float64] = np.array(
+# [
+# [1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0],
+# [-1.0, 0.0, -1.0, 0.0, 0.0, 0.0, 1.0],
+# ],
+# dtype=float,
+# )
+# np.testing.assert_array_almost_equal(result, expected)
+
+# def test_convert_relative_to_absolute_se3_array(self) -> None:
+# reference: npt.NDArray[np.float64] = np.array(
+# [1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0], dtype=float
+# )
+# relative_poses: npt.NDArray[np.float64] = np.array(
+# [
+# [1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0],
+# [-1.0, 0.0, -1.0, 0.0, 0.0, 0.0, 1.0],
+# ],
+# dtype=float,
+# )
+# result: npt.NDArray[np.float64] = convert_relative_to_absolute_se3_array(
+# reference, relative_poses
+# )
+# expected: npt.NDArray[np.float64] = np.array(
+# [
+# [2.0, 2.0, 2.0, 0.0, 0.0, 0.0, 1.0],
+# [0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0],
+# ],
+# dtype=float,
+# )
+# np.testing.assert_array_almost_equal(result, expected)
+
+# def test_convert_absolute_to_relative_points_3d_array(self) -> None:
+# reference: npt.NDArray[np.float64] = np.array(
+# [1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0], dtype=float
+# )
+# absolute_points: npt.NDArray[np.float64] = np.array(
+# [[2.0, 2.0, 2.0], [0.0, 1.0, 0.0]], dtype=float
+# )
+# result: npt.NDArray[np.float64] = convert_absolute_to_relative_points_3d_array(
+# reference, absolute_points
+# )
+# expected: npt.NDArray[np.float64] = np.array(
+# [[1.0, 1.0, 1.0], [-1.0, 0.0, -1.0]], dtype=float
+# )
+# np.testing.assert_array_almost_equal(result, expected)
+
+# def test_convert_relative_to_absolute_points_3d_array(self) -> None:
+# reference: npt.NDArray[np.float64] = np.array(
+# [1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0], dtype=float
+# )
+# relative_points: npt.NDArray[np.float64] = np.array(
+# [[1.0, 1.0, 1.0], [-1.0, 0.0, -1.0]], dtype=float
+# )
+# result: npt.NDArray[np.float64] = convert_relative_to_absolute_points_3d_array(
+# reference, relative_points
+# )
+# expected: npt.NDArray[np.float64] = np.array(
+# [[2.0, 2.0, 2.0], [0.0, 1.0, 0.0]], dtype=float
+# )
+# np.testing.assert_array_almost_equal(result, expected)
+
+
+# if __name__ == "__main__":
+# unittest.main()
diff --git a/d123/geometry/transform/transform_se3.py b/d123/geometry/transform/transform_se3.py
index 67edd050..3a04b5b2 100644
--- a/d123/geometry/transform/transform_se3.py
+++ b/d123/geometry/transform/transform_se3.py
@@ -4,7 +4,7 @@
import numpy.typing as npt
from d123.geometry import StateSE3, StateSE3Index, Vector3D
-from d123.geometry.geometry_index import Vector3DIndex
+from d123.geometry.geometry_index import Point3DIndex, Vector3DIndex
from d123.geometry.rotation import EulerAngles
from d123.geometry.utils.rotation_utils import (
get_rotation_matrices_from_euler_array,
@@ -14,6 +14,12 @@
def translate_se3_along_z(state_se3: StateSE3, distance: float) -> StateSE3:
+ """Translates a SE3 state along the Z-axis.
+
+ :param state_se3: The SE3 state to translate.
+ :param distance: The distance to translate along the Z-axis.
+ :return: The translated SE3 state.
+ """
R = state_se3.rotation_matrix
z_axis = R[:, 2]
@@ -26,6 +32,12 @@ def translate_se3_along_z(state_se3: StateSE3, distance: float) -> StateSE3:
def translate_se3_along_y(state_se3: StateSE3, distance: float) -> StateSE3:
+ """Translates a SE3 state along the Y-axis.
+
+ :param state_se3: The SE3 state to translate.
+ :param distance: The distance to translate along the Y-axis.
+ :return: The translated SE3 state.
+ """
R = state_se3.rotation_matrix
y_axis = R[:, 1]
@@ -38,6 +50,12 @@ def translate_se3_along_y(state_se3: StateSE3, distance: float) -> StateSE3:
def translate_se3_along_x(state_se3: StateSE3, distance: float) -> StateSE3:
+ """Translates a SE3 state along the X-axis.
+
+ :param state_se3: The SE3 state to translate.
+ :param distance: The distance to translate along the X-axis.
+ :return: The translated SE3 state.
+ """
R = state_se3.rotation_matrix
x_axis = R[:, 0]
@@ -51,6 +69,13 @@ def translate_se3_along_x(state_se3: StateSE3, distance: float) -> StateSE3:
def translate_body_frame(state_se3: StateSE3, vector_3d: Vector3D) -> StateSE3:
+ """Translates a SE3 state along a vector in the body frame.
+
+ :param state_se3: The SE3 state to translate.
+ :param vector_3d: The vector to translate along in the body frame.
+ :return: The translated SE3 state.
+ """
+
R = state_se3.rotation_matrix
# Transform to world frame
@@ -62,27 +87,16 @@ def translate_body_frame(state_se3: StateSE3, vector_3d: Vector3D) -> StateSE3:
return StateSE3.from_array(state_se3_array)
-def convert_relative_to_absolute_points_3d_array(
- origin: Union[StateSE3, npt.NDArray[np.float64]], points_3d_array: npt.NDArray[np.float64]
-) -> npt.NDArray[np.float64]:
-
- # TODO: implement function for origin as np.ndarray
- if isinstance(origin, StateSE3):
- origin_array = origin.array
- elif isinstance(origin, np.ndarray):
- assert origin.ndim == 1 and origin.shape[-1] == len(StateSE3Index)
- origin_array = origin
- else:
- raise TypeError(f"Expected StateSE3 or np.ndarray, got {type(origin)}")
-
- R = EulerAngles.from_array(origin_array[StateSE3Index.EULER_ANGLES]).rotation_matrix
- absolute_points = points_3d_array @ R.T + origin.point_3d.array
- return absolute_points
-
-
def convert_absolute_to_relative_se3_array(
origin: Union[StateSE3, npt.NDArray[np.float64]], se3_array: npt.NDArray[np.float64]
) -> npt.NDArray[np.float64]:
+ """Converts an SE3 array from the absolute frame to the relative frame.
+
+ :param origin: The origin state in the absolute frame, as a StateSE3 or np.ndarray.
+ :param se3_array: The SE3 array in the absolute frame.
+ :raises TypeError: If the origin is not a StateSE3 or np.ndarray.
+ :return: The SE3 array in the relative frame, indexed by :class:`~d123.geometry.StateSE3Index`.
+ """
if isinstance(origin, StateSE3):
origin_array = origin.array
t_origin = origin.point_3d.array
@@ -95,6 +109,7 @@ def convert_absolute_to_relative_se3_array(
else:
raise TypeError(f"Expected StateSE3 or np.ndarray, got {type(origin)}")
+ assert se3_array.ndim >= 1
assert se3_array.shape[-1] == len(StateSE3Index)
# Extract positions and orientations from se3_array
@@ -126,6 +141,14 @@ def convert_absolute_to_relative_se3_array(
def convert_relative_to_absolute_se3_array(
origin: StateSE3, se3_array: npt.NDArray[np.float64]
) -> npt.NDArray[np.float64]:
+ """Converts an SE3 array from the relative frame to the absolute frame.
+
+ :param origin: The origin state in the relative frame, as a StateSE3 or np.ndarray.
+ :param se3_array: The SE3 array in the relative frame.
+ :raises TypeError: If the origin is not a StateSE3 or np.ndarray.
+ :return: The SE3 array in the absolute frame, indexed by :class:`~d123.geometry.StateSE3Index`.
+ """
+
if isinstance(origin, StateSE3):
origin_array = origin.array
t_origin = origin.point_3d.array
@@ -137,6 +160,8 @@ def convert_relative_to_absolute_se3_array(
R_origin = get_rotation_matrix_from_euler_array(origin_array[StateSE3Index.EULER_ANGLES])
else:
raise TypeError(f"Expected StateSE3 or np.ndarray, got {type(origin)}")
+
+ assert se3_array.ndim >= 1
assert se3_array.shape[-1] == len(StateSE3Index)
# Extract relative positions and orientations
@@ -161,6 +186,13 @@ def convert_relative_to_absolute_se3_array(
def convert_absolute_to_relative_points_3d_array(
origin: Union[StateSE3, npt.NDArray[np.float64]], points_3d_array: npt.NDArray[np.float64]
) -> npt.NDArray[np.float64]:
+ """Converts 3D points from the absolute frame to the relative frame.
+
+ :param origin: The origin state in the absolute frame, as a StateSE3 or np.ndarray.
+ :param points_3d_array: The 3D points in the absolute frame.
+ :raises TypeError: If the origin is not a StateSE3 or np.ndarray.
+ :return: The 3D points in the relative frame , indexed by :class:`~d123.geometry.Point3DIndex`.
+ """
if isinstance(origin, StateSE3):
t_origin = origin.point_3d.array
@@ -172,6 +204,34 @@ def convert_absolute_to_relative_points_3d_array(
else:
raise TypeError(f"Expected StateSE3 or np.ndarray, got {type(origin)}")
+ assert points_3d_array.ndim >= 1
+ assert points_3d_array.shape[-1] == len(Point3DIndex)
+
# Translate points to origin frame, then rotate to body frame
relative_points = (points_3d_array - t_origin) @ R_origin
return relative_points
+
+
+def convert_relative_to_absolute_points_3d_array(
+ origin: Union[StateSE3, npt.NDArray[np.float64]], points_3d_array: npt.NDArray[np.float64]
+) -> npt.NDArray[np.float64]:
+ """Converts 3D points from the relative frame to the absolute frame.
+
+ :param origin: The origin state in the absolute frame, as a StateSE3 or np.ndarray.
+ :param points_3d_array: The 3D points in the relative frame, indexed by :class:`~d123.geometry.Point3DIndex`.
+ :raises TypeError: If the origin is not a StateSE3 or np.ndarray.
+ :return: The 3D points in the absolute frame, indexed by :class:`~d123.geometry.Point3DIndex`.
+ """
+ if isinstance(origin, StateSE3):
+ origin_array = origin.array
+ elif isinstance(origin, np.ndarray):
+ assert origin.ndim == 1 and origin.shape[-1] == len(StateSE3Index)
+ origin_array = origin
+ else:
+ raise TypeError(f"Expected StateSE3 or np.ndarray, got {type(origin)}")
+
+ assert points_3d_array.shape[-1] == len(Point3DIndex)
+
+ R = EulerAngles.from_array(origin_array[StateSE3Index.EULER_ANGLES]).rotation_matrix
+ absolute_points = points_3d_array @ R.T + origin.point_3d.array
+ return absolute_points
From be6629dcf48fee58f7de2d357a836bad7a947da0 Mon Sep 17 00:00:00 2001
From: Daniel Dauner
Date: Mon, 25 Aug 2025 21:25:07 +0200
Subject: [PATCH 023/145] Fix wrong import of shapely.
---
d123/geometry/occupancy_map.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/d123/geometry/occupancy_map.py b/d123/geometry/occupancy_map.py
index b28348e7..097709b9 100644
--- a/d123/geometry/occupancy_map.py
+++ b/d123/geometry/occupancy_map.py
@@ -4,7 +4,7 @@
import numpy as np
import numpy.typing as npt
-import shapely.vectorized
+import shapely
from shapely.geometry.base import BaseGeometry
from shapely.strtree import STRtree
From 91fd900bc9bc3551c5f97b3b2a75018664045f35 Mon Sep 17 00:00:00 2001
From: DanielDauner
Date: Tue, 26 Aug 2025 14:32:04 +0200
Subject: [PATCH 024/145] Add some tests for transform functions (#44)
---
.../av2/av2_data_converter.py | 2 +-
d123/geometry/test/test_point.py | 2 -
d123/geometry/test/test_transform.py | 530 ++++++++++--------
3 files changed, 292 insertions(+), 242 deletions(-)
diff --git a/d123/dataset/dataset_specific/av2/av2_data_converter.py b/d123/dataset/dataset_specific/av2/av2_data_converter.py
index d57b4e52..f5e5e44a 100644
--- a/d123/dataset/dataset_specific/av2/av2_data_converter.py
+++ b/d123/dataset/dataset_specific/av2/av2_data_converter.py
@@ -35,7 +35,7 @@
from d123.dataset.dataset_specific.raw_data_converter import DataConverterConfig, RawDataConverter
from d123.dataset.logs.log_metadata import LogMetadata
from d123.geometry import BoundingBoxSE3Index, StateSE3, Vector3D, Vector3DIndex
-from d123.geometry.transform.se3 import convert_relative_to_absolute_se3_array, get_rotation_matrix
+from d123.geometry.transform.transform_se3 import convert_relative_to_absolute_se3_array, get_rotation_matrix
from d123.geometry.utils.constants import DEFAULT_PITCH, DEFAULT_ROLL
diff --git a/d123/geometry/test/test_point.py b/d123/geometry/test/test_point.py
index d3141bbb..94162e93 100644
--- a/d123/geometry/test/test_point.py
+++ b/d123/geometry/test/test_point.py
@@ -7,8 +7,6 @@
from d123.geometry.geometry_index import Point3DIndex
from d123.geometry.point import Point3D
-# Point3D, Point3DIndex
-
class TestPoint2D(unittest.TestCase):
"""Unit tests for Point2D class."""
diff --git a/d123/geometry/test/test_transform.py b/d123/geometry/test/test_transform.py
index 7377bc41..2151317d 100644
--- a/d123/geometry/test/test_transform.py
+++ b/d123/geometry/test/test_transform.py
@@ -1,239 +1,291 @@
-# import unittest
-# import numpy as np
-# import numpy.typing as npt
-
-# from d123.geometry.se import StateSE2, StateSE3
-# from d123.geometry.transform.transform_se2 import (
-# convert_absolute_to_relative_se2_array,
-# convert_absolute_to_relative_point_2d_array,
-# convert_relative_to_absolute_se2_array,
-# convert_relative_to_absolute_point_2d_array,
-# translate_se2,
-# translate_se2_array,
-# translate_se2_along_yaw,
-# translate_se2_array_along_yaw,
-# )
-# from d123.geometry.transform.transform_se3 import (
-# translate_se3_along_z,
-# translate_se3_along_y,
-# translate_se3_along_x,
-# translate_body_frame,
-# convert_absolute_to_relative_se3_array,
-# convert_relative_to_absolute_se3_array,
-# convert_absolute_to_relative_points_3d_array,
-# convert_relative_to_absolute_points_3d_array,
-# )
-# from d123.geometry.vector import Vector2D
-
-
-# class TestTransformSE2(unittest.TestCase):
-# def test_translate_se2(self) -> None:
-# pose: StateSE2 = StateSE2.from_array(np.array([1.0, 2.0, 0.0], dtype=float))
-# translation: Vector2D = Vector2D(1.0, 1.0)
-# result: StateSE2 = translate_se2(pose, translation)
-# expected: StateSE2 = StateSE2.from_array(np.array([2.0, 3.0, 0.0], dtype=float))
-# np.testing.assert_array_almost_equal(result.array, expected.array)
-
-# def test_translate_se2_array(self) -> None:
-# poses: npt.NDArray[np.float64] = np.array(
-# [[1.0, 2.0, 0.0], [0.0, 0.0, np.pi / 2]], dtype=float
-# )
-# translation: Vector2D = Vector2D(1.0, 1.0)
-# result: npt.NDArray[np.float64] = translate_se2_array(poses, translation)
-# expected: npt.NDArray[np.float64] = np.array(
-# [[2.0, 3.0, 0.0], [1.0, 1.0, np.pi / 2]], dtype=float
-# )
-# np.testing.assert_array_almost_equal(result, expected)
-
-# def test_translate_se2_along_yaw(self) -> None:
-# # Move 1 unit forward in the direction of yaw (pi/2 = 90 degrees = +Y direction)
-# pose: npt.NDArray[np.float64] = np.array([0.0, 0.0, np.pi / 2], dtype=float)
-# distance: float = 1.0
-# result: npt.NDArray[np.float64] = translate_se2_along_yaw(pose, distance)
-# expected: npt.NDArray[np.float64] = np.array([0.0, 1.0, np.pi / 2], dtype=float)
-# np.testing.assert_array_almost_equal(result, expected)
-
-# def test_translate_se2_array_along_yaw(self) -> None:
-# poses: npt.NDArray[np.float64] = np.array(
-# [[0.0, 0.0, 0.0], [0.0, 0.0, np.pi / 2]], dtype=float
-# )
-# distance: float = 1.0
-# result: npt.NDArray[np.float64] = translate_se2_array_along_yaw(poses, distance)
-# expected: npt.NDArray[np.float64] = np.array(
-# [[1.0, 0.0, 0.0], [0.0, 1.0, np.pi / 2]], dtype=float
-# )
-# np.testing.assert_array_almost_equal(result, expected)
-
-# def test_convert_absolute_to_relative_se2_array(self) -> None:
-# reference: npt.NDArray[np.float64] = np.array([1.0, 1.0, 0.0], dtype=float)
-# absolute_poses: npt.NDArray[np.float64] = np.array(
-# [[2.0, 2.0, 0.0], [0.0, 1.0, np.pi / 2]], dtype=float
-# )
-# result: npt.NDArray[np.float64] = convert_absolute_to_relative_se2_array(
-# reference, absolute_poses
-# )
-# expected: npt.NDArray[np.float64] = np.array(
-# [[1.0, 1.0, 0.0], [-1.0, 0.0, np.pi / 2]], dtype=float
-# )
-# np.testing.assert_array_almost_equal(result, expected)
-
-# def test_convert_relative_to_absolute_se2_array(self) -> None:
-# reference: npt.NDArray[np.float64] = np.array([1.0, 1.0, 0.0], dtype=float)
-# relative_poses: npt.NDArray[np.float64] = np.array(
-# [[1.0, 1.0, 0.0], [-1.0, 0.0, np.pi / 2]], dtype=float
-# )
-# result: npt.NDArray[np.float64] = convert_relative_to_absolute_se2_array(
-# reference, relative_poses
-# )
-# expected: npt.NDArray[np.float64] = np.array(
-# [[2.0, 2.0, 0.0], [0.0, 1.0, np.pi / 2]], dtype=float
-# )
-# np.testing.assert_array_almost_equal(result, expected)
-
-# def test_convert_absolute_to_relative_point_2d_array(self) -> None:
-# reference: npt.NDArray[np.float64] = np.array([1.0, 1.0, 0.0], dtype=float)
-# absolute_points: npt.NDArray[np.float64] = np.array(
-# [[2.0, 2.0], [0.0, 1.0]], dtype=float
-# )
-# result: npt.NDArray[np.float64] = convert_absolute_to_relative_point_2d_array(
-# reference, absolute_points
-# )
-# expected: npt.NDArray[np.float64] = np.array([[1.0, 1.0], [-1.0, 0.0]], dtype=float)
-# np.testing.assert_array_almost_equal(result, expected)
-
-# def test_convert_relative_to_absolute_point_2d_array(self) -> None:
-# reference: npt.NDArray[np.float64] = np.array([1.0, 1.0, 0.0], dtype=float)
-# relative_points: npt.NDArray[np.float64] = np.array(
-# [[1.0, 1.0], [-1.0, 0.0]], dtype=float
-# )
-# result: npt.NDArray[np.float64] = convert_relative_to_absolute_point_2d_array(
-# reference, relative_points
-# )
-# expected: npt.NDArray[np.float64] = np.array([[2.0, 2.0], [0.0, 1.0]], dtype=float)
-# np.testing.assert_array_almost_equal(result, expected)
-
-
-# class TestTransformSE3(unittest.TestCase):
-# def test_translate_se3_along_x(self) -> None:
-# pose: npt.NDArray[np.float64] = np.array(
-# [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0], dtype=float
-# )
-# distance: float = 1.0
-# result: npt.NDArray[np.float64] = translate_se3_along_x(pose, distance)
-# expected: npt.NDArray[np.float64] = np.array(
-# [1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0], dtype=float
-# )
-# np.testing.assert_array_almost_equal(result, expected)
-
-# def test_translate_se3_along_y(self) -> None:
-# pose: npt.NDArray[np.float64] = np.array(
-# [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0], dtype=float
-# )
-# distance: float = 1.0
-# result: npt.NDArray[np.float64] = translate_se3_along_y(pose, distance)
-# expected: npt.NDArray[np.float64] = np.array(
-# [0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0], dtype=float
-# )
-# np.testing.assert_array_almost_equal(result, expected)
-
-# def test_translate_se3_along_z(self) -> None:
-# pose: npt.NDArray[np.float64] = np.array(
-# [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0], dtype=float
-# )
-# distance: float = 1.0
-# result: npt.NDArray[np.float64] = translate_se3_along_z(pose, distance)
-# expected: npt.NDArray[np.float64] = np.array(
-# [0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0], dtype=float
-# )
-# np.testing.assert_array_almost_equal(result, expected)
-
-# def test_translate_body_frame(self) -> None:
-# pose: npt.NDArray[np.float64] = np.array(
-# [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0], dtype=float
-# )
-# translation: npt.NDArray[np.float64] = np.array([1.0, 0.0, 0.0], dtype=float)
-# result: npt.NDArray[np.float64] = translate_body_frame(pose, translation)
-# expected: npt.NDArray[np.float64] = np.array(
-# [1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0], dtype=float
-# )
-# np.testing.assert_array_almost_equal(result, expected)
-
-# def test_convert_absolute_to_relative_se3_array(self) -> None:
-# reference: npt.NDArray[np.float64] = np.array(
-# [1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0], dtype=float
-# )
-# absolute_poses: npt.NDArray[np.float64] = np.array(
-# [
-# [2.0, 2.0, 2.0, 0.0, 0.0, 0.0, 1.0],
-# [0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0],
-# ],
-# dtype=float,
-# )
-# result: npt.NDArray[np.float64] = convert_absolute_to_relative_se3_array(
-# reference, absolute_poses
-# )
-# expected: npt.NDArray[np.float64] = np.array(
-# [
-# [1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0],
-# [-1.0, 0.0, -1.0, 0.0, 0.0, 0.0, 1.0],
-# ],
-# dtype=float,
-# )
-# np.testing.assert_array_almost_equal(result, expected)
-
-# def test_convert_relative_to_absolute_se3_array(self) -> None:
-# reference: npt.NDArray[np.float64] = np.array(
-# [1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0], dtype=float
-# )
-# relative_poses: npt.NDArray[np.float64] = np.array(
-# [
-# [1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0],
-# [-1.0, 0.0, -1.0, 0.0, 0.0, 0.0, 1.0],
-# ],
-# dtype=float,
-# )
-# result: npt.NDArray[np.float64] = convert_relative_to_absolute_se3_array(
-# reference, relative_poses
-# )
-# expected: npt.NDArray[np.float64] = np.array(
-# [
-# [2.0, 2.0, 2.0, 0.0, 0.0, 0.0, 1.0],
-# [0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0],
-# ],
-# dtype=float,
-# )
-# np.testing.assert_array_almost_equal(result, expected)
-
-# def test_convert_absolute_to_relative_points_3d_array(self) -> None:
-# reference: npt.NDArray[np.float64] = np.array(
-# [1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0], dtype=float
-# )
-# absolute_points: npt.NDArray[np.float64] = np.array(
-# [[2.0, 2.0, 2.0], [0.0, 1.0, 0.0]], dtype=float
-# )
-# result: npt.NDArray[np.float64] = convert_absolute_to_relative_points_3d_array(
-# reference, absolute_points
-# )
-# expected: npt.NDArray[np.float64] = np.array(
-# [[1.0, 1.0, 1.0], [-1.0, 0.0, -1.0]], dtype=float
-# )
-# np.testing.assert_array_almost_equal(result, expected)
-
-# def test_convert_relative_to_absolute_points_3d_array(self) -> None:
-# reference: npt.NDArray[np.float64] = np.array(
-# [1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0], dtype=float
-# )
-# relative_points: npt.NDArray[np.float64] = np.array(
-# [[1.0, 1.0, 1.0], [-1.0, 0.0, -1.0]], dtype=float
-# )
-# result: npt.NDArray[np.float64] = convert_relative_to_absolute_points_3d_array(
-# reference, relative_points
-# )
-# expected: npt.NDArray[np.float64] = np.array(
-# [[2.0, 2.0, 2.0], [0.0, 1.0, 0.0]], dtype=float
-# )
-# np.testing.assert_array_almost_equal(result, expected)
-
-
-# if __name__ == "__main__":
-# unittest.main()
+import unittest
+
+import numpy as np
+import numpy.typing as npt
+
+from d123.geometry.se import StateSE2, StateSE3
+from d123.geometry.transform.transform_se2 import (
+ convert_absolute_to_relative_point_2d_array,
+ convert_absolute_to_relative_se2_array,
+ convert_relative_to_absolute_point_2d_array,
+ convert_relative_to_absolute_se2_array,
+ translate_se2,
+ translate_se2_along_yaw,
+ translate_se2_array,
+ translate_se2_array_along_yaw,
+)
+from d123.geometry.transform.transform_se3 import (
+ convert_absolute_to_relative_points_3d_array,
+ convert_absolute_to_relative_se3_array,
+ convert_relative_to_absolute_points_3d_array,
+ convert_relative_to_absolute_se3_array,
+ translate_body_frame,
+ translate_se3_along_x,
+ translate_se3_along_y,
+ translate_se3_along_z,
+)
+from d123.geometry.vector import Vector2D, Vector3D
+
+
+class TestTransformSE2(unittest.TestCase):
+
+ def setUp(self):
+ self.decimal = 6 # Decimal places for np.testing.assert_array_almost_equal
+
+ def test_translate_se2(self) -> None:
+ pose: StateSE2 = StateSE2.from_array(np.array([1.0, 2.0, 0.0], dtype=np.float64))
+ translation: Vector2D = Vector2D(1.0, 1.0)
+
+ result: StateSE2 = translate_se2(pose, translation)
+ expected: StateSE2 = StateSE2.from_array(np.array([2.0, 3.0, 0.0], dtype=np.float64))
+ np.testing.assert_array_almost_equal(result.array, expected.array, decimal=self.decimal)
+
+ def test_translate_se2_negative_translation(self) -> None:
+ pose: StateSE2 = StateSE2.from_array(np.array([1.0, 2.0, 0.0], dtype=np.float64))
+ translation: Vector2D = Vector2D(-0.5, -1.5)
+ result: StateSE2 = translate_se2(pose, translation)
+ expected: StateSE2 = StateSE2.from_array(np.array([0.5, 0.5, 0.0], dtype=np.float64))
+ np.testing.assert_array_almost_equal(result.array, expected.array, decimal=self.decimal)
+
+ def test_translate_se2_with_rotation(self) -> None:
+ pose: StateSE2 = StateSE2.from_array(np.array([0.0, 0.0, np.pi / 4], dtype=np.float64))
+ translation: Vector2D = Vector2D(1.0, 0.0)
+ result: StateSE2 = translate_se2(pose, translation)
+ expected: StateSE2 = StateSE2.from_array(np.array([1.0, 0.0, np.pi / 4], dtype=np.float64))
+ np.testing.assert_array_almost_equal(result.array, expected.array, decimal=self.decimal)
+
+ def test_translate_se2_array(self) -> None:
+ poses: npt.NDArray[np.float64] = np.array([[1.0, 2.0, 0.0], [0.0, 0.0, np.pi / 2]], dtype=np.float64)
+ translation: Vector2D = Vector2D(1.0, 1.0)
+ result: npt.NDArray[np.float64] = translate_se2_array(poses, translation)
+ expected: npt.NDArray[np.float64] = np.array([[2.0, 3.0, 0.0], [1.0, 1.0, np.pi / 2]], dtype=np.float64)
+ np.testing.assert_array_almost_equal(result, expected)
+
+ def test_translate_se2_array_zero_translation(self) -> None:
+ poses: npt.NDArray[np.float64] = np.array([[1.0, 2.0, 0.0], [0.0, 0.0, np.pi / 2]], dtype=np.float64)
+ translation: Vector2D = Vector2D(0.0, 0.0)
+ result: npt.NDArray[np.float64] = translate_se2_array(poses, translation)
+ expected: npt.NDArray[np.float64] = poses.copy()
+ np.testing.assert_array_almost_equal(result, expected)
+
+ def test_translate_se2_along_yaw(self) -> None:
+ # Move 1 unit forward in the direction of yaw (pi/2 = 90 degrees = +Y direction)
+ pose: StateSE2 = StateSE2.from_array(np.array([0.0, 0.0, np.deg2rad(90)], dtype=np.float64))
+ vector: Vector2D = Vector2D(1.0, 0.0)
+ result: StateSE2 = translate_se2_along_yaw(pose, vector)
+ expected: StateSE2 = StateSE2.from_array(np.array([0.0, 1.0, np.deg2rad(90)], dtype=np.float64))
+ np.testing.assert_array_almost_equal(result.array, expected.array, decimal=self.decimal)
+
+ def test_translate_se2_along_yaw_backward(self) -> None:
+ pose: StateSE2 = StateSE2.from_array(np.array([0.0, 0.0, 0.0], dtype=np.float64))
+ vector: Vector2D = Vector2D(-1.0, 0.0)
+ result: StateSE2 = translate_se2_along_yaw(pose, vector)
+ expected: StateSE2 = StateSE2.from_array(np.array([-1.0, 0.0, 0.0], dtype=np.float64))
+ np.testing.assert_array_almost_equal(result.array, expected.array, decimal=self.decimal)
+
+ def test_translate_se2_along_yaw_diagonal(self) -> None:
+ pose: StateSE2 = StateSE2.from_array(np.array([1.0, 0.0, np.deg2rad(45)], dtype=np.float64))
+ vector: Vector2D = Vector2D(1.0, 0.0)
+ result: StateSE2 = translate_se2_along_yaw(pose, vector)
+ expected: StateSE2 = StateSE2.from_array(
+ np.array([1.0 + np.sqrt(2.0) / 2, 0.0 + np.sqrt(2.0) / 2, np.deg2rad(45)], dtype=np.float64)
+ )
+ np.testing.assert_array_almost_equal(result.array, expected.array, decimal=self.decimal)
+
+ def test_translate_se2_array_along_yaw(self) -> None:
+ poses: npt.NDArray[np.float64] = np.array([[0.0, 0.0, 0.0], [0.0, 0.0, np.pi / 2]], dtype=np.float64)
+ distance: float = Vector2D(1.0, 0.0)
+ result: npt.NDArray[np.float64] = translate_se2_array_along_yaw(poses, distance)
+ expected: npt.NDArray[np.float64] = np.array([[1.0, 0.0, 0.0], [0.0, 1.0, np.pi / 2]], dtype=np.float64)
+ np.testing.assert_array_almost_equal(result, expected)
+
+ def test_translate_se2_array_along_yaw_multiple_distances(self) -> None:
+ poses: npt.NDArray[np.float64] = np.array([[0.0, 0.0, 0.0], [0.0, 0.0, np.pi]], dtype=np.float64)
+ distance: float = Vector2D(2.0, 0.0)
+ result: npt.NDArray[np.float64] = translate_se2_array_along_yaw(poses, distance)
+ expected: npt.NDArray[np.float64] = np.array([[2.0, 0.0, 0.0], [-2.0, 0.0, np.pi]], dtype=np.float64)
+ np.testing.assert_array_almost_equal(result, expected)
+
+ def test_convert_absolute_to_relative_se2_array(self) -> None:
+ origin: StateSE2 = StateSE2.from_array(np.array([1.0, 1.0, 0.0], dtype=np.float64))
+ absolute_poses: npt.NDArray[np.float64] = np.array([[2.0, 2.0, 0.0], [0.0, 1.0, np.pi / 2]], dtype=np.float64)
+ result: npt.NDArray[np.float64] = convert_absolute_to_relative_se2_array(origin, absolute_poses)
+ expected: npt.NDArray[np.float64] = np.array([[1.0, 1.0, 0.0], [-1.0, 0.0, np.pi / 2]], dtype=np.float64)
+ np.testing.assert_array_almost_equal(result, expected)
+
+ def test_convert_absolute_to_relative_se2_array_with_rotation(self) -> None:
+ reference: StateSE2 = StateSE2.from_array(np.array([0.0, 0.0, np.pi / 2], dtype=np.float64))
+ absolute_poses: npt.NDArray[np.float64] = np.array([[1.0, 0.0, np.pi / 2]], dtype=np.float64)
+ result: npt.NDArray[np.float64] = convert_absolute_to_relative_se2_array(reference, absolute_poses)
+ expected: npt.NDArray[np.float64] = np.array([[0.0, -1.0, 0.0]], dtype=np.float64)
+ np.testing.assert_array_almost_equal(result, expected)
+
+ def test_convert_relative_to_absolute_se2_array(self) -> None:
+ reference: StateSE2 = StateSE2.from_array(np.array([1.0, 1.0, 0.0], dtype=np.float64))
+ relative_poses: npt.NDArray[np.float64] = np.array([[1.0, 1.0, 0.0], [-1.0, 0.0, np.pi / 2]], dtype=np.float64)
+ result: npt.NDArray[np.float64] = convert_relative_to_absolute_se2_array(reference, relative_poses)
+ expected: npt.NDArray[np.float64] = np.array([[2.0, 2.0, 0.0], [0.0, 1.0, np.pi / 2]], dtype=np.float64)
+ np.testing.assert_array_almost_equal(result, expected)
+
+ def test_convert_absolute_to_relative_point_2d_array(self) -> None:
+ reference: StateSE2 = StateSE2.from_array(np.array([1.0, 1.0, 0.0], dtype=np.float64))
+ absolute_points: npt.NDArray[np.float64] = np.array([[2.0, 2.0], [0.0, 1.0]], dtype=np.float64)
+ result: npt.NDArray[np.float64] = convert_absolute_to_relative_point_2d_array(reference, absolute_points)
+ expected: npt.NDArray[np.float64] = np.array([[1.0, 1.0], [-1.0, 0.0]], dtype=np.float64)
+ np.testing.assert_array_almost_equal(result, expected)
+
+ def test_convert_absolute_to_relative_point_2d_array_with_rotation(self) -> None:
+ reference: StateSE2 = StateSE2.from_array(np.array([0.0, 0.0, np.pi / 2], dtype=np.float64))
+ absolute_points: npt.NDArray[np.float64] = np.array([[0.0, 1.0], [1.0, 0.0]], dtype=np.float64)
+ result: npt.NDArray[np.float64] = convert_absolute_to_relative_point_2d_array(reference, absolute_points)
+ expected: npt.NDArray[np.float64] = np.array([[1.0, 0.0], [0.0, -1.0]], dtype=np.float64)
+ np.testing.assert_array_almost_equal(result, expected)
+
+ def test_convert_relative_to_absolute_point_2d_array(self) -> None:
+ reference: StateSE2 = StateSE2.from_array(np.array([1.0, 1.0, 0.0], dtype=np.float64))
+ relative_points: npt.NDArray[np.float64] = np.array([[1.0, 1.0], [-1.0, 0.0]], dtype=np.float64)
+ result: npt.NDArray[np.float64] = convert_relative_to_absolute_point_2d_array(reference, relative_points)
+ expected: npt.NDArray[np.float64] = np.array([[2.0, 2.0], [0.0, 1.0]], dtype=np.float64)
+ np.testing.assert_array_almost_equal(result, expected)
+
+
+class TestTransformSE3(unittest.TestCase):
+ def test_translate_se3_along_x(self) -> None:
+ pose: StateSE3 = StateSE3.from_array(np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float64))
+ distance: float = 1.0
+ result: StateSE3 = translate_se3_along_x(pose, distance)
+ expected: StateSE3 = StateSE3.from_array(np.array([1.0, 0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float64))
+ np.testing.assert_array_almost_equal(result.array, expected.array)
+
+ def test_translate_se3_along_x_negative(self) -> None:
+ pose: StateSE3 = StateSE3.from_array(np.array([1.0, 2.0, 3.0, 0.0, 0.0, 0.0], dtype=np.float64))
+ distance: float = -0.5
+ result: StateSE3 = translate_se3_along_x(pose, distance)
+ expected: StateSE3 = StateSE3.from_array(np.array([0.5, 2.0, 3.0, 0.0, 0.0, 0.0], dtype=np.float64))
+ np.testing.assert_array_almost_equal(result.array, expected.array)
+
+ def test_translate_se3_along_y(self) -> None:
+ pose: StateSE3 = StateSE3.from_array(np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float64))
+ distance: float = 1.0
+ result: StateSE3 = translate_se3_along_y(pose, distance)
+ expected: StateSE3 = StateSE3.from_array(np.array([0.0, 1.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float64))
+ np.testing.assert_array_almost_equal(result.array, expected.array)
+
+ def test_translate_se3_along_y_with_existing_position(self) -> None:
+ pose: StateSE3 = StateSE3.from_array(np.array([1.0, 2.0, 3.0, 0.0, 0.0, 0.0], dtype=np.float64))
+ distance: float = 2.5
+ result: StateSE3 = translate_se3_along_y(pose, distance)
+ expected: StateSE3 = StateSE3.from_array(np.array([1.0, 4.5, 3.0, 0.0, 0.0, 0.0], dtype=np.float64))
+ np.testing.assert_array_almost_equal(result.array, expected.array)
+
+ def test_translate_se3_along_z(self) -> None:
+ pose: StateSE3 = StateSE3.from_array(np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float64))
+ distance: float = 1.0
+ result: StateSE3 = translate_se3_along_z(pose, distance)
+ expected: StateSE3 = StateSE3.from_array(np.array([0.0, 0.0, 1.0, 0.0, 0.0, 0.0], dtype=np.float64))
+ np.testing.assert_array_almost_equal(result.array, expected.array)
+
+ def test_translate_se3_along_z_large_distance(self) -> None:
+ pose: StateSE3 = StateSE3.from_array(np.array([0.0, 0.0, 5.0, 0.0, 0.0, 0.0], dtype=np.float64))
+ distance: float = 10.0
+ result: StateSE3 = translate_se3_along_z(pose, distance)
+ expected: StateSE3 = StateSE3.from_array(np.array([0.0, 0.0, 15.0, 0.0, 0.0, 0.0], dtype=np.float64))
+ np.testing.assert_array_almost_equal(result.array, expected.array)
+
+ def test_translate_body_frame(self) -> None:
+ pose: StateSE3 = StateSE3.from_array(np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float64))
+ translation: Vector3D = Vector3D.from_array(np.array([1.0, 0.0, 0.0], dtype=np.float64))
+ result: StateSE3 = translate_body_frame(pose, translation)
+ expected: StateSE3 = StateSE3.from_array(np.array([1.0, 0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float64))
+ np.testing.assert_array_almost_equal(result.array, expected.array)
+
+ def test_translate_body_frame_multiple_axes(self) -> None:
+ pose: StateSE3 = StateSE3.from_array(np.array([1.0, 2.0, 3.0, 0.0, 0.0, 0.0], dtype=np.float64))
+ translation: Vector3D = Vector3D.from_array(np.array([0.5, -1.0, 2.0], dtype=np.float64))
+ result: StateSE3 = translate_body_frame(pose, translation)
+ expected: StateSE3 = StateSE3.from_array(np.array([1.5, 1.0, 5.0, 0.0, 0.0, 0.0], dtype=np.float64))
+ np.testing.assert_array_almost_equal(result.array, expected.array)
+
+ def test_translate_body_frame_zero_translation(self) -> None:
+ pose: StateSE3 = StateSE3.from_array(np.array([1.0, 2.0, 3.0, 0.0, 0.0, 0.0], dtype=np.float64))
+ translation: Vector3D = Vector3D.from_array(np.array([0.0, 0.0, 0.0], dtype=np.float64))
+ result: StateSE3 = translate_body_frame(pose, translation)
+ expected: StateSE3 = StateSE3.from_array(np.array([1.0, 2.0, 3.0, 0.0, 0.0, 0.0], dtype=np.float64))
+ np.testing.assert_array_almost_equal(result.array, expected.array)
+
+ def test_convert_absolute_to_relative_se3_array(self) -> None:
+ reference: StateSE3 = StateSE3.from_array(np.array([1.0, 1.0, 1.0, 0.0, 0.0, 0.0], dtype=np.float64))
+ absolute_poses: npt.NDArray[np.float64] = np.array(
+ [
+ [2.0, 2.0, 2.0, 0.0, 0.0, 0.0],
+ [0.0, 1.0, 0.0, 0.0, 0.0, 0.0],
+ ],
+ dtype=np.float64,
+ )
+ result: npt.NDArray[np.float64] = convert_absolute_to_relative_se3_array(reference, absolute_poses)
+ expected: npt.NDArray[np.float64] = np.array(
+ [
+ [1.0, 1.0, 1.0, 0.0, 0.0, 0.0],
+ [-1.0, 0.0, -1.0, 0.0, 0.0, 0.0],
+ ],
+ dtype=np.float64,
+ )
+ np.testing.assert_array_almost_equal(result, expected)
+
+ def test_convert_absolute_to_relative_se3_array_single_pose(self) -> None:
+ reference: StateSE3 = StateSE3.from_array(np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float64))
+ absolute_poses: npt.NDArray[np.float64] = np.array([[1.0, 2.0, 3.0, 0.0, 0.0, 0.0]], dtype=np.float64)
+ result: npt.NDArray[np.float64] = convert_absolute_to_relative_se3_array(reference, absolute_poses)
+ expected: npt.NDArray[np.float64] = np.array([[1.0, 2.0, 3.0, 0.0, 0.0, 0.0]], dtype=np.float64)
+ np.testing.assert_array_almost_equal(result, expected)
+
+ def test_convert_relative_to_absolute_se3_array(self) -> None:
+ reference: StateSE3 = StateSE3.from_array(np.array([1.0, 1.0, 1.0, 0.0, 0.0, 0.0], dtype=np.float64))
+ relative_poses: npt.NDArray[np.float64] = np.array(
+ [
+ [1.0, 1.0, 1.0, 0.0, 0.0, 0.0],
+ [-1.0, 0.0, -1.0, 0.0, 0.0, 0.0],
+ ],
+ dtype=np.float64,
+ )
+ result: npt.NDArray[np.float64] = convert_relative_to_absolute_se3_array(reference, relative_poses)
+ expected: npt.NDArray[np.float64] = np.array(
+ [
+ [2.0, 2.0, 2.0, 0.0, 0.0, 0.0],
+ [0.0, 1.0, 0.0, 0.0, 0.0, 0.0],
+ ],
+ dtype=np.float64,
+ )
+ np.testing.assert_array_almost_equal(result, expected)
+
+ def test_convert_absolute_to_relative_points_3d_array(self) -> None:
+ reference: StateSE3 = StateSE3.from_array(np.array([1.0, 1.0, 1.0, 0.0, 0.0, 0.0], dtype=np.float64))
+ absolute_points: npt.NDArray[np.float64] = np.array([[2.0, 2.0, 2.0], [0.0, 1.0, 0.0]], dtype=np.float64)
+ result: npt.NDArray[np.float64] = convert_absolute_to_relative_points_3d_array(reference, absolute_points)
+ expected: npt.NDArray[np.float64] = np.array([[1.0, 1.0, 1.0], [-1.0, 0.0, -1.0]], dtype=np.float64)
+ np.testing.assert_array_almost_equal(result, expected)
+
+ def test_convert_absolute_to_relative_points_3d_array_origin_reference(self) -> None:
+ reference: StateSE3 = StateSE3.from_array(np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float64))
+ absolute_points: npt.NDArray[np.float64] = np.array([[1.0, 2.0, 3.0], [-1.0, -2.0, -3.0]], dtype=np.float64)
+ result: npt.NDArray[np.float64] = convert_absolute_to_relative_points_3d_array(reference, absolute_points)
+ expected: npt.NDArray[np.float64] = np.array([[1.0, 2.0, 3.0], [-1.0, -2.0, -3.0]], dtype=np.float64)
+ np.testing.assert_array_almost_equal(result, expected)
+
+ def test_convert_relative_to_absolute_points_3d_array(self) -> None:
+ reference: StateSE3 = StateSE3.from_array(np.array([1.0, 1.0, 1.0, 0.0, 0.0, 0.0], dtype=np.float64))
+ relative_points: npt.NDArray[np.float64] = np.array([[1.0, 1.0, 1.0], [-1.0, 0.0, -1.0]], dtype=np.float64)
+ result: npt.NDArray[np.float64] = convert_relative_to_absolute_points_3d_array(reference, relative_points)
+ expected: npt.NDArray[np.float64] = np.array([[2.0, 2.0, 2.0], [0.0, 1.0, 0.0]], dtype=np.float64)
+ np.testing.assert_array_almost_equal(result, expected)
+
+ def test_convert_relative_to_absolute_points_3d_array_empty(self) -> None:
+ reference: StateSE3 = StateSE3.from_array(np.array([1.0, 1.0, 1.0, 0.0, 0.0, 0.0], dtype=np.float64))
+ relative_points: npt.NDArray[np.float64] = np.array([], dtype=np.float64).reshape(0, 3)
+ result: npt.NDArray[np.float64] = convert_relative_to_absolute_points_3d_array(reference, relative_points)
+ expected: npt.NDArray[np.float64] = np.array([], dtype=np.float64).reshape(0, 3)
+ np.testing.assert_array_almost_equal(result, expected)
+
+
+if __name__ == "__main__":
+ unittest.main()
From 87f48bda37fbd97225f8b27649463ac7c88e5743 Mon Sep 17 00:00:00 2001
From: Daniel Dauner
Date: Tue, 26 Aug 2025 16:01:35 +0200
Subject: [PATCH 025/145] Refactor transform se2 and se3. Rename important
functions for a consistent 2D/3D scheme. (#44)
---
.../vehicle_state/vehicle_parameters.py | 30 +-
.../visualization/matplotlib/observation.py | 8 +-
d123/common/visualization/viser/utils_v2.py | 4 +-
.../waymo_map_utils/womp_boundary_utils.py | 6 +-
d123/geometry/bounding_box.py | 8 +
d123/geometry/occupancy_map.py | 11 +-
d123/geometry/rotation.py | 12 +-
d123/geometry/se.py | 6 +-
d123/geometry/test/test_transform.py | 328 +++++++++++++++++-
d123/geometry/transform/transform_se2.py | 96 ++---
d123/geometry/transform/transform_se3.py | 26 +-
notebooks/viz/bev_matplotlib.ipynb | 2 +-
notebooks/viz/video_example.ipynb | 319 +++++++++++++++++
13 files changed, 747 insertions(+), 109 deletions(-)
create mode 100644 notebooks/viz/video_example.ipynb
diff --git a/d123/common/datatypes/vehicle_state/vehicle_parameters.py b/d123/common/datatypes/vehicle_state/vehicle_parameters.py
index 19ff334b..152b9382 100644
--- a/d123/common/datatypes/vehicle_state/vehicle_parameters.py
+++ b/d123/common/datatypes/vehicle_state/vehicle_parameters.py
@@ -1,8 +1,8 @@
from dataclasses import dataclass
-from d123.geometry import StateSE2, StateSE3, Vector2D
-from d123.geometry.transform.transform_se2 import translate_se2_along_yaw
-from d123.geometry.transform.transform_se3 import translate_se3_along_x, translate_se3_along_z
+from d123.geometry import StateSE2, StateSE3, Vector2D, Vector3D
+from d123.geometry.transform.transform_se2 import translate_se2_along_body_frame
+from d123.geometry.transform.transform_se3 import translate_se3_along_body_frame
# TODO: Add more vehicle parameters, potentially extend the parameters.
@@ -83,12 +83,13 @@ def center_se3_to_rear_axle_se3(center_se3: StateSE3, vehicle_parameters: Vehicl
:param vehicle_parameters: The vehicle parameters.
:return: The rear axle state.
"""
- return translate_se3_along_z(
- translate_se3_along_x(
- center_se3,
+ return translate_se3_along_body_frame(
+ center_se3,
+ Vector3D(
-vehicle_parameters.rear_axle_to_center_longitudinal,
+ 0,
+ -vehicle_parameters.rear_axle_to_center_vertical,
),
- -vehicle_parameters.rear_axle_to_center_vertical,
)
@@ -99,12 +100,13 @@ def rear_axle_se3_to_center_se3(rear_axle_se3: StateSE3, vehicle_parameters: Veh
:param vehicle_parameters: The vehicle parameters.
:return: The center state.
"""
- return translate_se3_along_x(
- translate_se3_along_z(
- rear_axle_se3,
+ return translate_se3_along_body_frame(
+ rear_axle_se3,
+ Vector3D(
+ vehicle_parameters.rear_axle_to_center_longitudinal,
+ 0,
vehicle_parameters.rear_axle_to_center_vertical,
),
- vehicle_parameters.rear_axle_to_center_longitudinal,
)
@@ -115,7 +117,7 @@ def center_se2_to_rear_axle_se2(center_se2: StateSE2, vehicle_parameters: Vehicl
:param vehicle_parameters: The vehicle parameters.
:return: The rear axle state in 2D.
"""
- return translate_se2_along_yaw(center_se2, Vector2D(-vehicle_parameters.rear_axle_to_center_longitudinal, 0))
+ return translate_se2_along_body_frame(center_se2, Vector2D(-vehicle_parameters.rear_axle_to_center_longitudinal, 0))
def rear_axle_se2_to_center_se2(rear_axle_se2: StateSE2, vehicle_parameters: VehicleParameters) -> StateSE2:
@@ -125,4 +127,6 @@ def rear_axle_se2_to_center_se2(rear_axle_se2: StateSE2, vehicle_parameters: Veh
:param vehicle_parameters: The vehicle parameters.
:return: The center state in 2D.
"""
- return translate_se2_along_yaw(rear_axle_se2, Vector2D(vehicle_parameters.rear_axle_to_center_longitudinal, 0))
+ return translate_se2_along_body_frame(
+ rear_axle_se2, Vector2D(vehicle_parameters.rear_axle_to_center_longitudinal, 0)
+ )
diff --git a/d123/common/visualization/matplotlib/observation.py b/d123/common/visualization/matplotlib/observation.py
index 6814f600..27a2fff1 100644
--- a/d123/common/visualization/matplotlib/observation.py
+++ b/d123/common/visualization/matplotlib/observation.py
@@ -28,7 +28,7 @@
from d123.dataset.scene.abstract_scene import AbstractScene
from d123.geometry import BoundingBoxSE2, BoundingBoxSE3, Point2D
from d123.geometry.geometry_index import StateSE2Index
-from d123.geometry.transform.transform_se2 import translate_se2_along_yaw
+from d123.geometry.transform.transform_se2 import translate_se2_along_body_frame
from d123.geometry.vector import Vector2D
@@ -153,12 +153,10 @@ def add_bounding_box_to_ax(
if plot_config.marker_style is not None:
assert plot_config.marker_style in ["-", "^"], f"Unknown marker style: {plot_config.marker_style}"
if plot_config.marker_style == "-":
- center_se2 = (
- bounding_box.center if isinstance(bounding_box, BoundingBoxSE2) else bounding_box.center.state_se2
- )
+ center_se2 = bounding_box.center_se2
arrow = np.zeros((2, 2), dtype=np.float64)
arrow[0] = center_se2.point_2d.array
- arrow[1] = translate_se2_along_yaw(
+ arrow[1] = translate_se2_along_body_frame(
center_se2,
Vector2D(bounding_box.length / 2.0 + 0.5, 0.0),
).array[StateSE2Index.XY]
diff --git a/d123/common/visualization/viser/utils_v2.py b/d123/common/visualization/viser/utils_v2.py
index c88faf3e..6a747698 100644
--- a/d123/common/visualization/viser/utils_v2.py
+++ b/d123/common/visualization/viser/utils_v2.py
@@ -7,7 +7,7 @@
# from d123.common.datatypes.sensor.camera_parameters import get_nuplan_camera_parameters
from d123.geometry import BoundingBoxSE3, Corners3DIndex, Point3D, Point3DIndex, Vector3D
-from d123.geometry.transform.transform_se3 import translate_body_frame
+from d123.geometry.transform.transform_se3 import translate_se3_along_body_frame
# TODO: Refactor this file.
# TODO: Add general utilities for 3D primitives and mesh support.
@@ -31,7 +31,7 @@ def _get_bounding_box_corners(bounding_box: BoundingBoxSE3) -> npt.NDArray[np.fl
bounding_box_extent = np.array([bounding_box.length, bounding_box.width, bounding_box.height], dtype=np.float64)
for idx, vec in corner_extent_factors.items():
vector_3d = Vector3D.from_array(bounding_box_extent * vec.array)
- corners[idx] = translate_body_frame(bounding_box.center, vector_3d).point_3d.array
+ corners[idx] = translate_se3_along_body_frame(bounding_box.center, vector_3d).point_3d.array
return corners
diff --git a/d123/dataset/dataset_specific/wopd/waymo_map_utils/womp_boundary_utils.py b/d123/dataset/dataset_specific/wopd/waymo_map_utils/womp_boundary_utils.py
index 99e63192..84fe34ed 100644
--- a/d123/dataset/dataset_specific/wopd/waymo_map_utils/womp_boundary_utils.py
+++ b/d123/dataset/dataset_specific/wopd/waymo_map_utils/womp_boundary_utils.py
@@ -6,7 +6,7 @@
import shapely.geometry as geom
from d123.geometry import OccupancyMap2D, Point3D, Polyline3D, PolylineSE2, StateSE2, Vector2D
-from d123.geometry.transform.transform_se2 import translate_se2_along_yaw
+from d123.geometry.transform.transform_se2 import translate_se2_along_body_frame
from d123.geometry.utils.rotation_utils import normalize_angle
MAX_LANE_WIDTH = 25.0 # meters
@@ -64,7 +64,7 @@ def _collect_perpendicular_hits(
assert sign in [1.0, -1.0], "Sign must be either 1.0 (left) or -1.0 (right)"
# perp_start_point = translate_along_yaw(lane_query_se2, Vector2D(0.0, sign * PERP_START_OFFSET))
perp_start_point = lane_query_se2
- perp_end_point = translate_se2_along_yaw(lane_query_se2, Vector2D(0.0, sign * MAX_LANE_WIDTH / 2.0))
+ perp_end_point = translate_se2_along_body_frame(lane_query_se2, Vector2D(0.0, sign * MAX_LANE_WIDTH / 2.0))
perp_linestring = geom.LineString([[perp_start_point.x, perp_start_point.y], [perp_end_point.x, perp_end_point.y]])
lane_linestring = occupancy_2d.geometries[occupancy_2d.id_to_idx[lane_token]]
@@ -261,7 +261,7 @@ def _get_default_boundary_point_3d(
lane_query_se2: StateSE2, lane_query_3d: Point3D, sign: float
) -> Point3D:
perp_boundary_distance = DEFAULT_LANE_WIDTH / 2.0
- boundary_point_se2 = translate_se2_along_yaw(
+ boundary_point_se2 = translate_se2_along_body_frame(
lane_query_se2, Vector2D(0.0, sign * perp_boundary_distance)
)
return Point3D(boundary_point_se2.x, boundary_point_se2.y, lane_query_3d.z)
diff --git a/d123/geometry/bounding_box.py b/d123/geometry/bounding_box.py
index dd211ca0..459e56bf 100644
--- a/d123/geometry/bounding_box.py
+++ b/d123/geometry/bounding_box.py
@@ -198,6 +198,14 @@ def center_se3(self) -> StateSE3:
"""
return self.center
+ @property
+ def center_se2(self) -> StateSE2:
+ """The center of the bounding box as a StateSE2 instance.
+
+ :return: The center of the bounding box as a StateSE2 instance.
+ """
+ return self.center_se3.state_se2
+
@property
def length(self) -> float:
"""The length of the bounding box along the x-axis in the local frame.
diff --git a/d123/geometry/occupancy_map.py b/d123/geometry/occupancy_map.py
index 097709b9..648ea91a 100644
--- a/d123/geometry/occupancy_map.py
+++ b/d123/geometry/occupancy_map.py
@@ -99,7 +99,16 @@ def query(
self,
geometry: Union[BaseGeometry, np.ndarray],
predicate: Optional[
- Literal["intersects", "within", "contains", "overlaps", "crosses", "touches", "covers", "covered_by"]
+ Literal[
+ "intersects",
+ "within",
+ "contains",
+ "overlaps",
+ "crosses",
+ "touches",
+ "covers",
+ "covered_by",
+ ]
] = None,
distance: Optional[float] = None,
) -> npt.NDArray[np.int64]:
diff --git a/d123/geometry/rotation.py b/d123/geometry/rotation.py
index f98a601d..3fc4afa9 100644
--- a/d123/geometry/rotation.py
+++ b/d123/geometry/rotation.py
@@ -80,20 +80,16 @@ def yaw(self) -> float:
"""
return self._array[EulerAnglesIndex.YAW]
- @cached_property
+ @property
def array(self) -> npt.NDArray[np.float64]:
"""Converts the EulerAngles instance to a numpy array.
:return: A numpy array of shape (3,) containing the Euler angles [roll, pitch, yaw], indexed by \
:class:`~d123.geometry.EulerAnglesIndex`.
"""
- array = np.zeros(len(EulerAnglesIndex), dtype=np.float64)
- array[EulerAnglesIndex.ROLL] = self.roll
- array[EulerAnglesIndex.PITCH] = self.pitch
- array[EulerAnglesIndex.YAW] = self.yaw
- return array
+ return self._array
- @property
+ @cached_property
def rotation_matrix(self) -> npt.NDArray[np.float64]:
"""Returns the 3x3 rotation matrix representation of the Euler angles.
NOTE: The rotation order is intrinsic Z-Y'-X'' (yaw-pitch-roll).
@@ -206,7 +202,7 @@ def array(self) -> npt.NDArray[np.float64]:
"""
return self._array
- @property
+ @cached_property
def pyquaternion(self) -> pyquaternion.Quaternion:
"""Returns the pyquaternion.Quaternion representation of the quaternion.
diff --git a/d123/geometry/se.py b/d123/geometry/se.py
index 378128ca..1eb021c6 100644
--- a/d123/geometry/se.py
+++ b/d123/geometry/se.py
@@ -256,7 +256,7 @@ def rotation_matrix(self) -> npt.NDArray[np.float64]:
:return: A 3x3 numpy array representing the rotation matrix.
"""
- return EulerAngles.from_array(self.array[StateSE3Index.EULER_ANGLES]).rotation_matrix
+ return self.euler_angles.rotation_matrix
@property
def transformation_matrix(self) -> npt.NDArray[np.float64]:
@@ -270,6 +270,10 @@ def transformation_matrix(self) -> npt.NDArray[np.float64]:
transformation_matrix[:3, 3] = self.array[StateSE3Index.XYZ]
return transformation_matrix
+ @cached_property
+ def euler_angles(self) -> EulerAngles:
+ return EulerAngles.from_array(self.array[StateSE3Index.EULER_ANGLES])
+
@property
def quaternion(self) -> npt.NDArray[np.float64]:
"""Returns the quaternion (w, x, y, z) representation of the state's orientation.
diff --git a/d123/geometry/test/test_transform.py b/d123/geometry/test/test_transform.py
index 2151317d..f126e33a 100644
--- a/d123/geometry/test/test_transform.py
+++ b/d123/geometry/test/test_transform.py
@@ -3,6 +3,7 @@
import numpy as np
import numpy.typing as npt
+from d123.geometry.geometry_index import EulerAnglesIndex, Point2DIndex, Point3DIndex, StateSE2Index, StateSE3Index
from d123.geometry.se import StateSE2, StateSE3
from d123.geometry.transform.transform_se2 import (
convert_absolute_to_relative_point_2d_array,
@@ -10,16 +11,16 @@
convert_relative_to_absolute_point_2d_array,
convert_relative_to_absolute_se2_array,
translate_se2,
- translate_se2_along_yaw,
+ translate_se2_along_body_frame,
translate_se2_array,
- translate_se2_array_along_yaw,
+ translate_se2_array_along_body_frame,
)
from d123.geometry.transform.transform_se3 import (
convert_absolute_to_relative_points_3d_array,
convert_absolute_to_relative_se3_array,
convert_relative_to_absolute_points_3d_array,
convert_relative_to_absolute_se3_array,
- translate_body_frame,
+ translate_se3_along_body_frame,
translate_se3_along_x,
translate_se3_along_y,
translate_se3_along_z,
@@ -72,21 +73,21 @@ def test_translate_se2_along_yaw(self) -> None:
# Move 1 unit forward in the direction of yaw (pi/2 = 90 degrees = +Y direction)
pose: StateSE2 = StateSE2.from_array(np.array([0.0, 0.0, np.deg2rad(90)], dtype=np.float64))
vector: Vector2D = Vector2D(1.0, 0.0)
- result: StateSE2 = translate_se2_along_yaw(pose, vector)
+ result: StateSE2 = translate_se2_along_body_frame(pose, vector)
expected: StateSE2 = StateSE2.from_array(np.array([0.0, 1.0, np.deg2rad(90)], dtype=np.float64))
np.testing.assert_array_almost_equal(result.array, expected.array, decimal=self.decimal)
def test_translate_se2_along_yaw_backward(self) -> None:
pose: StateSE2 = StateSE2.from_array(np.array([0.0, 0.0, 0.0], dtype=np.float64))
vector: Vector2D = Vector2D(-1.0, 0.0)
- result: StateSE2 = translate_se2_along_yaw(pose, vector)
+ result: StateSE2 = translate_se2_along_body_frame(pose, vector)
expected: StateSE2 = StateSE2.from_array(np.array([-1.0, 0.0, 0.0], dtype=np.float64))
np.testing.assert_array_almost_equal(result.array, expected.array, decimal=self.decimal)
def test_translate_se2_along_yaw_diagonal(self) -> None:
pose: StateSE2 = StateSE2.from_array(np.array([1.0, 0.0, np.deg2rad(45)], dtype=np.float64))
vector: Vector2D = Vector2D(1.0, 0.0)
- result: StateSE2 = translate_se2_along_yaw(pose, vector)
+ result: StateSE2 = translate_se2_along_body_frame(pose, vector)
expected: StateSE2 = StateSE2.from_array(
np.array([1.0 + np.sqrt(2.0) / 2, 0.0 + np.sqrt(2.0) / 2, np.deg2rad(45)], dtype=np.float64)
)
@@ -95,14 +96,14 @@ def test_translate_se2_along_yaw_diagonal(self) -> None:
def test_translate_se2_array_along_yaw(self) -> None:
poses: npt.NDArray[np.float64] = np.array([[0.0, 0.0, 0.0], [0.0, 0.0, np.pi / 2]], dtype=np.float64)
distance: float = Vector2D(1.0, 0.0)
- result: npt.NDArray[np.float64] = translate_se2_array_along_yaw(poses, distance)
+ result: npt.NDArray[np.float64] = translate_se2_array_along_body_frame(poses, distance)
expected: npt.NDArray[np.float64] = np.array([[1.0, 0.0, 0.0], [0.0, 1.0, np.pi / 2]], dtype=np.float64)
np.testing.assert_array_almost_equal(result, expected)
def test_translate_se2_array_along_yaw_multiple_distances(self) -> None:
poses: npt.NDArray[np.float64] = np.array([[0.0, 0.0, 0.0], [0.0, 0.0, np.pi]], dtype=np.float64)
distance: float = Vector2D(2.0, 0.0)
- result: npt.NDArray[np.float64] = translate_se2_array_along_yaw(poses, distance)
+ result: npt.NDArray[np.float64] = translate_se2_array_along_body_frame(poses, distance)
expected: npt.NDArray[np.float64] = np.array([[2.0, 0.0, 0.0], [-2.0, 0.0, np.pi]], dtype=np.float64)
np.testing.assert_array_almost_equal(result, expected)
@@ -150,6 +151,11 @@ def test_convert_relative_to_absolute_point_2d_array(self) -> None:
class TestTransformSE3(unittest.TestCase):
+
+ def setUp(self):
+ self.decimal = 6 # Decimal places for np.testing.assert_array_almost_equal
+ self.num_consistency_tests = 10 # Number of random test cases for consistency checks
+
def test_translate_se3_along_x(self) -> None:
pose: StateSE3 = StateSE3.from_array(np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float64))
distance: float = 1.0
@@ -195,24 +201,84 @@ def test_translate_se3_along_z_large_distance(self) -> None:
def test_translate_body_frame(self) -> None:
pose: StateSE3 = StateSE3.from_array(np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float64))
translation: Vector3D = Vector3D.from_array(np.array([1.0, 0.0, 0.0], dtype=np.float64))
- result: StateSE3 = translate_body_frame(pose, translation)
+ result: StateSE3 = translate_se3_along_body_frame(pose, translation)
expected: StateSE3 = StateSE3.from_array(np.array([1.0, 0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float64))
np.testing.assert_array_almost_equal(result.array, expected.array)
def test_translate_body_frame_multiple_axes(self) -> None:
pose: StateSE3 = StateSE3.from_array(np.array([1.0, 2.0, 3.0, 0.0, 0.0, 0.0], dtype=np.float64))
translation: Vector3D = Vector3D.from_array(np.array([0.5, -1.0, 2.0], dtype=np.float64))
- result: StateSE3 = translate_body_frame(pose, translation)
+ result: StateSE3 = translate_se3_along_body_frame(pose, translation)
expected: StateSE3 = StateSE3.from_array(np.array([1.5, 1.0, 5.0, 0.0, 0.0, 0.0], dtype=np.float64))
np.testing.assert_array_almost_equal(result.array, expected.array)
def test_translate_body_frame_zero_translation(self) -> None:
pose: StateSE3 = StateSE3.from_array(np.array([1.0, 2.0, 3.0, 0.0, 0.0, 0.0], dtype=np.float64))
translation: Vector3D = Vector3D.from_array(np.array([0.0, 0.0, 0.0], dtype=np.float64))
- result: StateSE3 = translate_body_frame(pose, translation)
+ result: StateSE3 = translate_se3_along_body_frame(pose, translation)
expected: StateSE3 = StateSE3.from_array(np.array([1.0, 2.0, 3.0, 0.0, 0.0, 0.0], dtype=np.float64))
np.testing.assert_array_almost_equal(result.array, expected.array)
+ def test_translate_body_frame_consistency(self) -> None:
+
+ for _ in range(self.num_consistency_tests):
+ # Generate random parameters
+ x_distance: float = np.random.uniform(-10.0, 10.0)
+ y_distance: float = np.random.uniform(-10.0, 10.0)
+ z_distance: float = np.random.uniform(-10.0, 10.0)
+
+ start_x: float = np.random.uniform(-5.0, 5.0)
+ start_y: float = np.random.uniform(-5.0, 5.0)
+ start_z: float = np.random.uniform(-5.0, 5.0)
+
+ start_roll: float = np.random.uniform(-np.pi, np.pi)
+ start_pitch: float = np.random.uniform(-np.pi, np.pi)
+ start_yaw: float = np.random.uniform(-np.pi, np.pi)
+
+ original_pose: StateSE3 = StateSE3.from_array(
+ np.array(
+ [
+ start_x,
+ start_y,
+ start_z,
+ start_roll,
+ start_pitch,
+ start_yaw,
+ ],
+ dtype=np.float64,
+ )
+ )
+
+ # x-axis translation
+ translation_x: Vector3D = Vector3D.from_array(np.array([x_distance, 0.0, 0.0], dtype=np.float64))
+ result_body_frame_x: StateSE3 = translate_se3_along_body_frame(original_pose, translation_x)
+ result_axis_x: StateSE3 = translate_se3_along_x(original_pose, x_distance)
+ np.testing.assert_array_almost_equal(result_body_frame_x.array, result_axis_x.array, decimal=self.decimal)
+
+ # y-axis translation
+ translation_y: Vector3D = Vector3D.from_array(np.array([0.0, y_distance, 0.0], dtype=np.float64))
+ result_body_frame_y: StateSE3 = translate_se3_along_body_frame(original_pose, translation_y)
+ result_axis_y: StateSE3 = translate_se3_along_y(original_pose, y_distance)
+ np.testing.assert_array_almost_equal(result_body_frame_y.array, result_axis_y.array, decimal=self.decimal)
+
+ # z-axis translation
+ translation_z: Vector3D = Vector3D.from_array(np.array([0.0, 0.0, z_distance], dtype=np.float64))
+ result_body_frame_z: StateSE3 = translate_se3_along_body_frame(original_pose, translation_z)
+ result_axis_z: StateSE3 = translate_se3_along_z(original_pose, z_distance)
+ np.testing.assert_array_almost_equal(result_body_frame_z.array, result_axis_z.array, decimal=self.decimal)
+
+ # all axes translation
+ translation_all: Vector3D = Vector3D.from_array(
+ np.array([x_distance, y_distance, z_distance], dtype=np.float64)
+ )
+ result_body_frame_all: StateSE3 = translate_se3_along_body_frame(original_pose, translation_all)
+ intermediate_pose: StateSE3 = translate_se3_along_x(original_pose, x_distance)
+ intermediate_pose = translate_se3_along_y(intermediate_pose, y_distance)
+ result_axis_all: StateSE3 = translate_se3_along_z(intermediate_pose, z_distance)
+ np.testing.assert_array_almost_equal(
+ result_body_frame_all.array, result_axis_all.array, decimal=self.decimal
+ )
+
def test_convert_absolute_to_relative_se3_array(self) -> None:
reference: StateSE3 = StateSE3.from_array(np.array([1.0, 1.0, 1.0, 0.0, 0.0, 0.0], dtype=np.float64))
absolute_poses: npt.NDArray[np.float64] = np.array(
@@ -287,5 +353,245 @@ def test_convert_relative_to_absolute_points_3d_array_empty(self) -> None:
np.testing.assert_array_almost_equal(result, expected)
+class TestTransformConsistency(unittest.TestCase):
+ def setUp(self):
+ self.decimal = 6 # Decimal places for np.testing.assert_array_almost_equal
+ self.num_consistency_tests = 10 # Number of random test cases for consistency checks
+
+ self.max_pose_xyz = 100.0
+ self.min_random_poses = 1
+ self.max_random_poses = 20
+
+ def _get_random_se2_array(self, size: int) -> npt.NDArray[np.float64]:
+ """Generate a random SE2 pose"""
+ random_se2_array = np.random.uniform(-self.max_pose_xyz, self.max_pose_xyz, (size, len(StateSE2Index)))
+ random_se2_array[:, StateSE2Index.YAW] = np.random.uniform(-np.pi, np.pi, size) # yaw angles
+ return random_se2_array
+
+ def _get_random_se3_array(self, size: int) -> npt.NDArray[np.float64]:
+ """Generate a random SE3 poses"""
+ random_se3_array = np.zeros((size, len(StateSE3Index)), dtype=np.float64)
+ random_se3_array[:, StateSE3Index.XYZ] = np.random.uniform(-self.max_pose_xyz, self.max_pose_xyz, (size, 3))
+ random_se3_array[:, StateSE3Index.EULER_ANGLES] = np.random.uniform(
+ -np.pi, np.pi, (size, len(EulerAnglesIndex))
+ )
+ return random_se3_array
+
+ def test_se2_absolute_relative_conversion_consistency(self) -> None:
+ """Test that converting absolute->relative->absolute returns original poses"""
+ for _ in range(self.num_consistency_tests):
+ # Generate random reference pose
+ reference = StateSE2.from_array(self._get_random_se2_array(1)[0])
+
+ # Generate random absolute poses
+ num_poses = np.random.randint(self.min_random_poses, self.max_random_poses)
+ absolute_poses = self._get_random_se2_array(num_poses)
+
+ # Convert absolute -> relative -> absolute
+ relative_poses = convert_absolute_to_relative_se2_array(reference, absolute_poses)
+ recovered_absolute = convert_relative_to_absolute_se2_array(reference, relative_poses)
+
+ np.testing.assert_array_almost_equal(absolute_poses, recovered_absolute, decimal=self.decimal)
+
+ def test_se2_points_absolute_relative_conversion_consistency(self) -> None:
+ """Test that converting absolute->relative->absolute returns original points"""
+ for _ in range(self.num_consistency_tests):
+ # Generate random reference pose
+ reference = StateSE2.from_array(self._get_random_se2_array(1)[0])
+
+ # Generate random absolute points
+ num_points = np.random.randint(self.min_random_poses, self.max_random_poses)
+ absolute_points = self._get_random_se2_array(num_points)[:, StateSE2Index.XY]
+
+ # Convert absolute -> relative -> absolute
+ relative_points = convert_absolute_to_relative_point_2d_array(reference, absolute_points)
+ recovered_absolute = convert_relative_to_absolute_point_2d_array(reference, relative_points)
+
+ np.testing.assert_array_almost_equal(absolute_points, recovered_absolute, decimal=self.decimal)
+
+ def test_se2_points_consistency(self) -> None:
+ """Test whether SE2 point and pose conversions are consistent"""
+ for _ in range(self.num_consistency_tests):
+ # Generate random reference pose
+ reference = StateSE2.from_array(self._get_random_se2_array(1)[0])
+
+ # Generate random absolute points
+ num_poses = np.random.randint(self.min_random_poses, self.max_random_poses)
+ absolute_se2 = self._get_random_se2_array(num_poses)
+
+ # Convert absolute -> relative -> absolute
+ relative_se2 = convert_absolute_to_relative_se2_array(reference, absolute_se2)
+ relative_points = convert_absolute_to_relative_point_2d_array(
+ reference, absolute_se2[..., StateSE2Index.XY]
+ )
+ np.testing.assert_array_almost_equal(
+ relative_se2[..., StateSE2Index.XY], relative_points, decimal=self.decimal
+ )
+
+ recovered_absolute_se2 = convert_relative_to_absolute_se2_array(reference, relative_se2)
+ absolute_points = convert_relative_to_absolute_point_2d_array(reference, relative_points)
+ np.testing.assert_array_almost_equal(
+ recovered_absolute_se2[..., StateSE2Index.XY], absolute_points, decimal=self.decimal
+ )
+
+ def test_se3_absolute_relative_conversion_consistency(self) -> None:
+ """Test that converting absolute->relative->absolute returns original poses"""
+ for _ in range(self.num_consistency_tests):
+ # Generate random reference pose
+ reference = StateSE3.from_array(self._get_random_se3_array(1)[0])
+
+ # Generate random absolute poses
+ num_poses = np.random.randint(self.min_random_poses, self.max_random_poses)
+ absolute_poses = self._get_random_se3_array(num_poses)
+
+ # Convert absolute -> relative -> absolute
+ relative_poses = convert_absolute_to_relative_se3_array(reference, absolute_poses)
+ recovered_absolute = convert_relative_to_absolute_se3_array(reference, relative_poses)
+
+ np.testing.assert_array_almost_equal(absolute_poses, recovered_absolute, decimal=self.decimal)
+
+ def test_se3_points_absolute_relative_conversion_consistency(self) -> None:
+ """Test that converting absolute->relative->absolute returns original points"""
+ for _ in range(self.num_consistency_tests):
+ # Generate random reference pose
+ reference = StateSE3.from_array(self._get_random_se3_array(1)[0])
+
+ # Generate random absolute points
+ num_points = np.random.randint(self.min_random_poses, self.max_random_poses)
+ absolute_points = self._get_random_se3_array(num_points)[:, StateSE3Index.XYZ]
+
+ # Convert absolute -> relative -> absolute
+ relative_points = convert_absolute_to_relative_points_3d_array(reference, absolute_points)
+ recovered_absolute = convert_relative_to_absolute_points_3d_array(reference, relative_points)
+
+ np.testing.assert_array_almost_equal(absolute_points, recovered_absolute, decimal=self.decimal)
+
+ def test_se3_points_consistency(self) -> None:
+ """Test whether SE3 point and pose conversions are consistent"""
+ for _ in range(self.num_consistency_tests):
+ # Generate random reference pose
+ reference = StateSE3.from_array(self._get_random_se3_array(1)[0])
+
+ # Generate random absolute points
+ num_poses = np.random.randint(self.min_random_poses, self.max_random_poses)
+ absolute_se3 = self._get_random_se3_array(num_poses)
+
+ # Convert absolute -> relative -> absolute
+ relative_se3 = convert_absolute_to_relative_se3_array(reference, absolute_se3)
+ relative_points = convert_absolute_to_relative_points_3d_array(
+ reference, absolute_se3[..., StateSE3Index.XYZ]
+ )
+ np.testing.assert_array_almost_equal(
+ relative_se3[..., StateSE3Index.XYZ], relative_points, decimal=self.decimal
+ )
+
+ recovered_absolute_se3 = convert_relative_to_absolute_se3_array(reference, relative_se3)
+ absolute_points = convert_relative_to_absolute_points_3d_array(reference, relative_points)
+ np.testing.assert_array_almost_equal(
+ recovered_absolute_se3[..., StateSE3Index.XYZ], absolute_points, decimal=self.decimal
+ )
+
+ def test_se2_se3_translation_along_body_consistency(self) -> None:
+ """Test that SE2 and SE3 translations are consistent when SE3 has no z-component or rotation"""
+ for _ in range(self.num_consistency_tests):
+ # Create equivalent SE2 and SE3 poses (SE3 with z=0 and no rotations except yaw)
+
+ pose_se2 = StateSE2.from_array(self._get_random_se2_array(1)[0])
+ pose_se3 = StateSE3.from_array(
+ np.array([pose_se2.x, pose_se2.y, 0.0, 0.0, 0.0, pose_se2.yaw], dtype=np.float64)
+ )
+
+ # Test translation along x-axis
+ dx = np.random.uniform(-5.0, 5.0)
+ translated_se2_x = translate_se2_along_body_frame(pose_se2, Vector2D(dx, 0.0))
+ translated_se3_x = translate_se3_along_x(pose_se3, dx)
+
+ np.testing.assert_array_almost_equal(
+ translated_se2_x.array[StateSE2Index.XY], translated_se3_x.array[StateSE3Index.XY], decimal=self.decimal
+ )
+ np.testing.assert_almost_equal(
+ translated_se2_x.array[StateSE2Index.YAW],
+ translated_se3_x.array[StateSE3Index.YAW],
+ decimal=self.decimal,
+ )
+
+ # Test translation along y-axis
+ dy = np.random.uniform(-5.0, 5.0)
+ translated_se2_y = translate_se2_along_body_frame(pose_se2, Vector2D(0.0, dy))
+ translated_se3_y = translate_se3_along_y(pose_se3, dy)
+
+ np.testing.assert_array_almost_equal(
+ translated_se2_y.array[StateSE2Index.XY], translated_se3_y.array[StateSE3Index.XY], decimal=self.decimal
+ )
+ np.testing.assert_almost_equal(
+ translated_se2_y.array[StateSE2Index.YAW],
+ translated_se3_y.array[StateSE3Index.YAW],
+ decimal=self.decimal,
+ )
+
+ # Test translation along x- and y-axis
+ dx = np.random.uniform(-5.0, 5.0)
+ dy = np.random.uniform(-5.0, 5.0)
+ translated_se2_xy = translate_se2_along_body_frame(pose_se2, Vector2D(dx, dy))
+ translated_se3_xy = translate_se3_along_body_frame(pose_se3, Vector3D(dx, dy, 0.0))
+ np.testing.assert_array_almost_equal(
+ translated_se2_xy.array[StateSE2Index.XY],
+ translated_se3_xy.array[StateSE3Index.XY],
+ decimal=self.decimal,
+ )
+ np.testing.assert_almost_equal(
+ translated_se2_xy.array[StateSE2Index.YAW],
+ translated_se3_xy.array[StateSE3Index.YAW],
+ decimal=self.decimal,
+ )
+
+ def test_se2_se3_point_conversion_consistency(self) -> None:
+ """Test that SE2 and SE3 point conversions are consistent for 2D points embedded in 3D"""
+ for _ in range(self.num_consistency_tests):
+ # Create equivalent SE2 and SE3 reference poses
+ x = np.random.uniform(-10.0, 10.0)
+ y = np.random.uniform(-10.0, 10.0)
+ yaw = np.random.uniform(-np.pi, np.pi)
+
+ reference_se2 = StateSE2.from_array(np.array([x, y, yaw], dtype=np.float64))
+ reference_se3 = StateSE3.from_array(np.array([x, y, 0.0, 0.0, 0.0, yaw], dtype=np.float64))
+
+ # Generate 2D points and embed them in 3D with z=0
+ num_points = np.random.randint(1, 8)
+ points_2d = np.random.uniform(-20.0, 20.0, (num_points, len(Point2DIndex)))
+ points_3d = np.column_stack([points_2d, np.zeros(num_points)])
+
+ # Convert using SE2 functions
+ relative_2d = convert_absolute_to_relative_point_2d_array(reference_se2, points_2d)
+ absolute_2d_recovered = convert_relative_to_absolute_point_2d_array(reference_se2, relative_2d)
+
+ # Convert using SE3 functions
+ relative_3d = convert_absolute_to_relative_points_3d_array(reference_se3, points_3d)
+ absolute_3d_recovered = convert_relative_to_absolute_points_3d_array(reference_se3, relative_3d)
+
+ # Check that SE2 and SE3 results are consistent (ignoring z-component)
+ np.testing.assert_array_almost_equal(
+ relative_2d,
+ relative_3d[..., Point3DIndex.XY],
+ decimal=self.decimal,
+ )
+ np.testing.assert_array_almost_equal(
+ absolute_2d_recovered,
+ absolute_3d_recovered[..., Point3DIndex.XY],
+ decimal=self.decimal,
+ )
+ # Z-component should remain zero
+ np.testing.assert_array_almost_equal(
+ relative_3d[..., Point3DIndex.Z],
+ np.zeros(num_points),
+ decimal=self.decimal,
+ )
+ np.testing.assert_array_almost_equal(
+ absolute_3d_recovered[..., Point3DIndex.Z],
+ np.zeros(num_points),
+ decimal=self.decimal,
+ )
+
+
if __name__ == "__main__":
unittest.main()
diff --git a/d123/geometry/transform/transform_se2.py b/d123/geometry/transform/transform_se2.py
index c1f33cd4..6331bd16 100644
--- a/d123/geometry/transform/transform_se2.py
+++ b/d123/geometry/transform/transform_se2.py
@@ -30,6 +30,8 @@ def convert_absolute_to_relative_se2_array(
else:
raise TypeError(f"Expected StateSE2 or np.ndarray, got {type(origin)}")
+ assert len(StateSE2Index) == state_se2_array.shape[-1]
+
rotate_rad = -origin_array[StateSE2Index.YAW]
cos, sin = np.cos(rotate_rad), np.sin(rotate_rad)
R_inv = np.array([[cos, -sin], [sin, cos]])
@@ -41,14 +43,14 @@ def convert_absolute_to_relative_se2_array(
return state_se2_rel
-def convert_absolute_to_relative_point_2d_array(
- origin: Union[StateSE2, npt.NDArray[np.float64]], point_2d_array: npt.NDArray[np.float64]
+def convert_relative_to_absolute_se2_array(
+ origin: Union[StateSE2, npt.NDArray[np.float64]], state_se2_array: npt.NDArray[np.float64]
) -> npt.NDArray[np.float64]:
- """Converts an absolute 2D point array from global to relative coordinates.
-
+ """
+ Converts an StateSE2 array from global to relative coordinates.
:param origin: origin pose of relative coords system
- :param point_2d_array: array of 2D points with (x,y) in last dim
- :return: 2D points array in relative coordinates
+ :param state_se2_array: array of SE2 states with (x,y,θ) in last dim
+ :return: SE2 coords array in relative coordinates
"""
if isinstance(origin, StateSE2):
origin_array = origin.array
@@ -58,24 +60,30 @@ def convert_absolute_to_relative_point_2d_array(
else:
raise TypeError(f"Expected StateSE2 or np.ndarray, got {type(origin)}")
- rotate_rad = -origin_array[StateSE2Index.YAW]
+ assert len(StateSE2Index) == state_se2_array.shape[-1]
+
+ rotate_rad = origin_array[StateSE2Index.YAW]
cos, sin = np.cos(rotate_rad), np.sin(rotate_rad)
R = np.array([[cos, -sin], [sin, cos]])
- point_2d_rel = point_2d_array - origin_array[..., StateSE2Index.XY]
- point_2d_rel = point_2d_rel @ R.T
+ state_se2_abs = np.zeros_like(state_se2_array, dtype=np.float64)
+ state_se2_abs[..., StateSE2Index.XY] = state_se2_array[..., StateSE2Index.XY] @ R.T
+ state_se2_abs[..., StateSE2Index.XY] += origin_array[..., StateSE2Index.XY]
+ state_se2_abs[..., StateSE2Index.YAW] = normalize_angle(
+ state_se2_array[..., StateSE2Index.YAW] + origin_array[..., StateSE2Index.YAW]
+ )
- return point_2d_rel
+ return state_se2_abs
-def convert_relative_to_absolute_se2_array(
- origin: Union[StateSE2, npt.NDArray[np.float64]], state_se2_array: npt.NDArray[np.float64]
+def convert_absolute_to_relative_point_2d_array(
+ origin: Union[StateSE2, npt.NDArray[np.float64]], point_2d_array: npt.NDArray[np.float64]
) -> npt.NDArray[np.float64]:
- """
- Converts an StateSE2 array from global to relative coordinates.
+ """Converts an absolute 2D point array from global to relative coordinates.
+
:param origin: origin pose of relative coords system
- :param state_se2_array: array of SE2 states with (x,y,θ) in last dim
- :return: SE2 coords array in relative coordinates
+ :param point_2d_array: array of 2D points with (x,y) in last dim
+ :return: 2D points array in relative coordinates
"""
if isinstance(origin, StateSE2):
origin_array = origin.array
@@ -85,15 +93,14 @@ def convert_relative_to_absolute_se2_array(
else:
raise TypeError(f"Expected StateSE2 or np.ndarray, got {type(origin)}")
- rotate_rad = origin_array[StateSE2Index.YAW]
+ rotate_rad = -origin_array[StateSE2Index.YAW]
cos, sin = np.cos(rotate_rad), np.sin(rotate_rad)
- R = np.array([[cos, -sin], [sin, cos]])
+ R = np.array([[cos, -sin], [sin, cos]], dtype=np.float64)
- state_se2_rel = state_se2_array + origin_array
- state_se2_rel[..., StateSE2Index.XY] = state_se2_rel[..., StateSE2Index.XY] @ R.T
- state_se2_rel[..., StateSE2Index.YAW] = normalize_angle(state_se2_rel[..., StateSE2Index.YAW])
+ point_2d_rel = point_2d_array - origin_array[..., StateSE2Index.XY]
+ point_2d_rel = point_2d_rel @ R.T
- return state_se2_rel
+ return point_2d_rel
def convert_relative_to_absolute_point_2d_array(
@@ -110,7 +117,7 @@ def convert_relative_to_absolute_point_2d_array(
rotate_rad = origin_array[StateSE2Index.YAW]
cos, sin = np.cos(rotate_rad), np.sin(rotate_rad)
- R = np.array([[cos, -sin], [sin, cos]])
+ R = np.array([[cos, -sin], [sin, cos]], dtype=np.float64)
point_2d_abs = point_2d_array @ R.T
point_2d_abs = point_2d_abs + origin_array[..., StateSE2Index.XY]
@@ -137,30 +144,13 @@ def translate_se2_array(state_se2_array: npt.NDArray[np.float64], translation: V
:param translation: 2D translation vector
:return: translated SE2 array
"""
+ assert len(StateSE2Index) == state_se2_array.shape[-1]
result = state_se2_array.copy()
result[..., StateSE2Index.XY] += translation.array[Vector2DIndex.XY]
return result
-def translate_se2_along_yaw(state_se2: StateSE2, translation: Vector2D) -> StateSE2:
- """Translate a single SE2 state along its local coordinate frame.
-
- :param state_se2: SE2 state to translate
- :param translation: 2D translation in local frame (x: forward, y: left)
- :return: translated SE2 state
- """
- yaw = state_se2.array[StateSE2Index.YAW]
- cos_yaw, sin_yaw = np.cos(yaw), np.sin(yaw)
-
- # Transform translation from local to global frame
- global_translation = np.array(
- [translation.x * cos_yaw - translation.y * sin_yaw, translation.x * sin_yaw + translation.y * cos_yaw]
- )
-
- return translate_se2(state_se2, Vector2D.from_array(global_translation))
-
-
-def translate_se2_array_along_yaw(
+def translate_se2_array_along_body_frame(
state_se2_array: npt.NDArray[np.float64], translation: Vector2D
) -> npt.NDArray[np.float64]:
"""Translate an array of SE2 states along their respective local coordinate frames.
@@ -169,15 +159,29 @@ def translate_se2_array_along_yaw(
:param translation: 2D translation in local frame (x: forward, y: left)
:return: translated SE2 array
"""
+ assert len(StateSE2Index) == state_se2_array.shape[-1]
result = state_se2_array.copy()
yaws = state_se2_array[..., StateSE2Index.YAW]
cos_yaws, sin_yaws = np.cos(yaws), np.sin(yaws)
# Transform translation from local to global frame for each state
- global_translation_x = translation.x * cos_yaws - translation.y * sin_yaws
- global_translation_y = translation.x * sin_yaws + translation.y * cos_yaws
+ # Create rotation matrices for each state
+ R = np.stack([cos_yaws, -sin_yaws, sin_yaws, cos_yaws], axis=-1).reshape(*cos_yaws.shape, 2, 2)
- result[..., StateSE2Index.X] += global_translation_x
- result[..., StateSE2Index.Y] += global_translation_y
+ # Transform translation vector from local to global frame
+ translation_vector = translation.array[Vector2DIndex.XY] # [x, y]
+ global_translation = np.einsum("...ij,...j->...i", R, translation_vector)
+
+ result[..., StateSE2Index.XY] += global_translation
return result
+
+
+def translate_se2_along_body_frame(state_se2: StateSE2, translation: Vector2D) -> StateSE2:
+ """Translate a single SE2 state along its local coordinate frame.
+
+ :param state_se2: SE2 state to translate
+ :param translation: 2D translation in local frame (x: forward, y: left)
+ :return: translated SE2 state
+ """
+ return StateSE2.from_array(translate_se2_array_along_body_frame(state_se2.array, translation), copy=False)
diff --git a/d123/geometry/transform/transform_se3.py b/d123/geometry/transform/transform_se3.py
index 3a04b5b2..1412d61a 100644
--- a/d123/geometry/transform/transform_se3.py
+++ b/d123/geometry/transform/transform_se3.py
@@ -25,10 +25,8 @@ def translate_se3_along_z(state_se3: StateSE3, distance: float) -> StateSE3:
z_axis = R[:, 2]
state_se3_array = state_se3.array.copy()
- state_se3_array[StateSE3Index.X] += distance * z_axis[0]
- state_se3_array[StateSE3Index.Y] += distance * z_axis[1]
- state_se3_array[StateSE3Index.Z] += distance * z_axis[2]
- return StateSE3.from_array(state_se3_array)
+ state_se3_array[StateSE3Index.XYZ] += distance * z_axis[Vector3DIndex.XYZ]
+ return StateSE3.from_array(state_se3_array, copy=False)
def translate_se3_along_y(state_se3: StateSE3, distance: float) -> StateSE3:
@@ -43,10 +41,8 @@ def translate_se3_along_y(state_se3: StateSE3, distance: float) -> StateSE3:
y_axis = R[:, 1]
state_se3_array = state_se3.array.copy()
- state_se3_array[StateSE3Index.X] += distance * y_axis[0]
- state_se3_array[StateSE3Index.Y] += distance * y_axis[1]
- state_se3_array[StateSE3Index.Z] += distance * y_axis[2]
- return StateSE3.from_array(state_se3_array)
+ state_se3_array[StateSE3Index.XYZ] += distance * y_axis[Vector3DIndex.XYZ]
+ return StateSE3.from_array(state_se3_array, copy=False)
def translate_se3_along_x(state_se3: StateSE3, distance: float) -> StateSE3:
@@ -61,14 +57,11 @@ def translate_se3_along_x(state_se3: StateSE3, distance: float) -> StateSE3:
x_axis = R[:, 0]
state_se3_array = state_se3.array.copy()
- state_se3_array[StateSE3Index.X] += distance * x_axis[0]
- state_se3_array[StateSE3Index.Y] += distance * x_axis[1]
- state_se3_array[StateSE3Index.Z] += distance * x_axis[2]
+ state_se3_array[StateSE3Index.XYZ] += distance * x_axis[Vector3DIndex.XYZ]
+ return StateSE3.from_array(state_se3_array, copy=False)
- return StateSE3.from_array(state_se3_array)
-
-def translate_body_frame(state_se3: StateSE3, vector_3d: Vector3D) -> StateSE3:
+def translate_se3_along_body_frame(state_se3: StateSE3, vector_3d: Vector3D) -> StateSE3:
"""Translates a SE3 state along a vector in the body frame.
:param state_se3: The SE3 state to translate.
@@ -77,14 +70,11 @@ def translate_body_frame(state_se3: StateSE3, vector_3d: Vector3D) -> StateSE3:
"""
R = state_se3.rotation_matrix
-
- # Transform to world frame
world_translation = R @ vector_3d.array
state_se3_array = state_se3.array.copy()
state_se3_array[StateSE3Index.XYZ] += world_translation[Vector3DIndex.XYZ]
-
- return StateSE3.from_array(state_se3_array)
+ return StateSE3.from_array(state_se3_array, copy=False)
def convert_absolute_to_relative_se3_array(
diff --git a/notebooks/viz/bev_matplotlib.ipynb b/notebooks/viz/bev_matplotlib.ipynb
index 32328cbf..3627a511 100644
--- a/notebooks/viz/bev_matplotlib.ipynb
+++ b/notebooks/viz/bev_matplotlib.ipynb
@@ -343,7 +343,7 @@
],
"metadata": {
"kernelspec": {
- "display_name": "d123_dev",
+ "display_name": "d123",
"language": "python",
"name": "python3"
},
diff --git a/notebooks/viz/video_example.ipynb b/notebooks/viz/video_example.ipynb
new file mode 100644
index 00000000..f9202dd7
--- /dev/null
+++ b/notebooks/viz/video_example.ipynb
@@ -0,0 +1,319 @@
+{
+ "cells": [
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "0",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from d123.dataset.scene.scene_builder import ArrowSceneBuilder\n",
+ "from d123.dataset.scene.scene_filter import SceneFilter\n",
+ "\n",
+ "from d123.common.multithreading.worker_sequential import Sequential\n",
+ "from d123.common.datatypes.sensor.camera import CameraType"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "1",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from d123.geometry import Point2D\n",
+ "import numpy as np\n",
+ "\n",
+ "import torch\n",
+ "\n",
+ "from d123.geometry.polyline import Polyline2D"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "2",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# split = \"nuplan_private_test\"\n",
+ "# log_names = [\"2021.09.29.17.35.58_veh-44_00066_00432\"]\n",
+ "\n",
+ "\n",
+ "# splits = [\"wopd_train\"]\n",
+ "# splits = [\"carla\"]\n",
+ "splits = [\"nuplan_private_test\"]\n",
+ "# splits = [\"av2-sensor-mini_train\"]\n",
+ "# log_names = None\n",
+ "\n",
+ "\n",
+ "log_names = None\n",
+ "scene_tokens = None\n",
+ "\n",
+ "scene_filter = SceneFilter(\n",
+ " split_names=splits,\n",
+ " log_names=log_names,\n",
+ " scene_tokens=scene_tokens,\n",
+ " duration_s=20,\n",
+ " history_s=0.0,\n",
+ " timestamp_threshold_s=20,\n",
+ " shuffle=True,\n",
+ " camera_types=[CameraType.CAM_F0],\n",
+ ")\n",
+ "scene_builder = ArrowSceneBuilder(\"/home/daniel/d123_workspace/data\")\n",
+ "worker = Sequential()\n",
+ "# worker = RayDistributed()\n",
+ "scenes = scene_builder.get_scenes(scene_filter, worker)\n",
+ "\n",
+ "print(f\"Found {len(scenes)} scenes\")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "3",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from typing import List, Optional, Tuple\n",
+ "import matplotlib.pyplot as plt\n",
+ "import numpy as np\n",
+ "from d123.common.visualization.matplotlib.camera import add_camera_ax\n",
+ "from d123.geometry import Point2D\n",
+ "from d123.common.visualization.color.color import BLACK, DARK_GREY, DARKER_GREY, LIGHT_GREY, NEW_TAB_10, TAB_10\n",
+ "from d123.common.visualization.color.config import PlotConfig\n",
+ "from d123.common.visualization.color.default import CENTERLINE_CONFIG, MAP_SURFACE_CONFIG, ROUTE_CONFIG\n",
+ "from d123.common.visualization.matplotlib.observation import (\n",
+ " add_box_detections_to_ax,\n",
+ " add_default_map_on_ax,\n",
+ " add_ego_vehicle_to_ax,\n",
+ " add_traffic_lights_to_ax,\n",
+ ")\n",
+ "from d123.common.visualization.matplotlib.utils import add_shapely_linestring_to_ax, add_shapely_polygon_to_ax\n",
+ "from d123.dataset.maps.abstract_map import AbstractMap\n",
+ "from d123.dataset.maps.abstract_map_objects import AbstractLane, AbstractLaneGroup\n",
+ "from d123.dataset.maps.gpkg.gpkg_map_objects import GPKGIntersection\n",
+ "from d123.dataset.maps.map_datatypes import MapLayer\n",
+ "from d123.dataset.scene.abstract_scene import AbstractScene\n",
+ "\n",
+ "\n",
+ "import shapely.geometry as geom\n",
+ "\n",
+ "\n",
+ "def _plot_scene_on_ax(ax: plt.Axes, scene: AbstractScene, iteration: int = 0, radius: float = 80) -> plt.Axes:\n",
+ "\n",
+ " ego_vehicle_state = scene.get_ego_state_at_iteration(iteration)\n",
+ " box_detections = scene.get_box_detections_at_iteration(iteration)\n",
+ "\n",
+ " point_2d = ego_vehicle_state.bounding_box.center.state_se2.point_2d\n",
+ " # add_debug_map_on_ax(ax, scene.map_api, point_2d, radius=radius, route_lane_group_ids=None)\n",
+ " add_default_map_on_ax(ax, scene.map_api, point_2d, radius=radius, route_lane_group_ids=None)\n",
+ " # add_traffic_lights_to_ax(ax, traffic_light_detections, scene.map_api)\n",
+ "\n",
+ " add_box_detections_to_ax(ax, box_detections)\n",
+ " add_ego_vehicle_to_ax(ax, ego_vehicle_state)\n",
+ "\n",
+ " zoom = 1.0\n",
+ " ax.set_xlim(point_2d.x - radius * zoom, point_2d.x + radius * zoom)\n",
+ " ax.set_ylim(point_2d.y - radius * zoom, point_2d.y + radius * zoom)\n",
+ "\n",
+ " ax.set_aspect(\"equal\", adjustable=\"box\")\n",
+ " return ax\n",
+ "\n",
+ "\n",
+ "def plot_scene_at_iteration(\n",
+ " scene: AbstractScene, iteration: int = 0, radius: float = 80\n",
+ ") -> Tuple[plt.Figure, plt.Axes]:\n",
+ "\n",
+ " fig, ax = plt.subplots(1, 2, figsize=(18, 5))\n",
+ "\n",
+ " camera = scene.get_camera_at_iteration(iteration=iteration, camera_type=CameraType.CAM_F0)\n",
+ " add_camera_ax(ax[0], camera)\n",
+ "\n",
+ " _plot_scene_on_ax(ax[1], scene, iteration, radius)\n",
+ " for ax_ in ax:\n",
+ " ax_.set_xticks([])\n",
+ " ax_.set_yticks([])\n",
+ " fig.subplots_adjust(wspace=-0.5) # Make the border between axes super small\n",
+ " fig.tight_layout()\n",
+ " return fig, ax\n",
+ "\n",
+ "\n",
+ "scene_index = 6\n",
+ "\n",
+ "iteration = 99\n",
+ "fig, ax = plot_scene_at_iteration(scenes[scene_index], iteration=iteration, radius=35)\n",
+ "plt.show()\n",
+ "\n",
+ "camera = scenes[scene_index].get_camera_at_iteration(\n",
+ " iteration=iteration, camera_type=CameraType.CAM_F0\n",
+ ")\n",
+ "\n",
+ "plt.imshow(camera.image, cmap=\"gray\", vmin=0, vmax=255)\n",
+ "# # fig.savefig(f\"/home/daniel/scene_{scene_index}_iteration_1.pdf\", dpi=300, bbox_inches=\"tight\")\n",
+ "\n",
+ "scenes[scene_index].log_name"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "4",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from pathlib import Path\n",
+ "from typing import Union\n",
+ "\n",
+ "from matplotlib import animation\n",
+ "from tqdm import tqdm\n",
+ "\n",
+ "\n",
+ "def render_scene_animation(\n",
+ " scene: AbstractScene,\n",
+ " output_path: Union[str, Path],\n",
+ " start_idx: int = 0,\n",
+ " end_idx: Optional[int] = None,\n",
+ " step: int = 5,\n",
+ " fps: float = 2.0,\n",
+ " dpi: int = 300,\n",
+ " format: str = \"gif\",\n",
+ " radius: float = 35,\n",
+ ") -> None:\n",
+ " assert format in [\"mp4\", \"gif\"], \"Format must be either 'mp4' or 'gif'.\"\n",
+ " output_path = Path(output_path)\n",
+ " output_path.mkdir(parents=True, exist_ok=True)\n",
+ "\n",
+ " scene.open()\n",
+ "\n",
+ " if end_idx is None:\n",
+ " end_idx = scene.get_number_of_iterations()\n",
+ " end_idx = min(end_idx, scene.get_number_of_iterations())\n",
+ " fig, ax = plt.subplots(1, 2, figsize=(18, 5))\n",
+ " gs = fig.add_gridspec(1, 2, width_ratios=[6, 1])\n",
+ " ax[0].set_position(gs[0].get_position(fig))\n",
+ " ax[1].set_position(gs[1].get_position(fig))\n",
+ "\n",
+ " def update(i):\n",
+ " ax[0].clear()\n",
+ " ax[1].clear()\n",
+ " for ax_ in ax:\n",
+ " ax_.set_xticks([])\n",
+ " ax_.set_yticks([])\n",
+ " _plot_scene_on_ax(ax[1], scene, i, radius)\n",
+ " camera = scene.get_camera_at_iteration(iteration=i, camera_type=CameraType.CAM_F0)\n",
+ " add_camera_ax(ax[0], camera)\n",
+ " fig.subplots_adjust(wspace=-0.33, hspace=0.0)\n",
+ " plt.subplots_adjust(left=0.01, right=0.99, top=0.99, bottom=0.01) # Remove all margins\n",
+ " pbar.update(1)\n",
+ "\n",
+ " frames = list(range(start_idx, end_idx, step))\n",
+ " pbar = tqdm(total=len(frames), desc=f\"Rendering {scene.log_name} as {format}\")\n",
+ " ani = animation.FuncAnimation(fig, update, frames=frames, repeat=False)\n",
+ "\n",
+ " ani.save(output_path / f\"{scene.log_name}_{scene.token}.{format}\", writer=\"ffmpeg\", fps=fps, dpi=dpi)\n",
+ " plt.close(fig)\n",
+ " scene.close()\n",
+ "\n",
+ "\n",
+ "render_scene_animation(scenes[scene_index], output_path=\"/home/daniel/scene_renders\", format=\"gif\", dpi=100, end_idx=200)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "5",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "\n",
+ "\n",
+ "\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "6",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from d123.dataset.conversion.map.road_edge.road_edge_2d_utils import get_road_edge_linear_rings\n",
+ "from d123.dataset.maps.gpkg.gpkg_map import GPKGMap\n",
+ "\n",
+ "\n",
+ "map_api: GPKGMap = scenes[scene_index].map_api\n",
+ "\n",
+ "drivable_polygons = map_api._gpd_dataframes[MapLayer.LANE]\n",
+ "\n",
+ "\n",
+ "\n",
+ "linear_rings = get_road_edge_linear_rings(drivable_polygons.geometry.tolist())\n",
+ "rings_lengths = [ring.length for ring in linear_rings]\n",
+ "max_length_idx = np.argmax(rings_lengths)\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "size = 16\n",
+ "fig, ax = plt.subplots(figsize=(size, size))\n",
+ "\n",
+ "for idx, ring in enumerate(linear_rings):\n",
+ " if idx == max_length_idx:\n",
+ " ax.plot(*ring.xy, color=\"black\", linewidth=2, label=\"Longest Road Edge\")\n",
+ " else:\n",
+ " ax.plot(*ring.xy)\n",
+ "\n",
+ "\n",
+ "ax.set_aspect(\"equal\", adjustable=\"box\")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "7",
+ "metadata": {},
+ "outputs": [],
+ "source": []
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "8",
+ "metadata": {},
+ "outputs": [],
+ "source": []
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "9",
+ "metadata": {},
+ "outputs": [],
+ "source": []
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "d123",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.12.11"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}
From 48b11a96113a73b703d1a2fa52200864f374bdff Mon Sep 17 00:00:00 2001
From: jbwang <1159270049@qq.com>
Date: Wed, 27 Aug 2025 16:31:55 +0800
Subject: [PATCH 026/145] finish preprocess detection script
---
.../kitti_360/kitti_360_data_converter.py | 63 +++---
.../kitti_360/kitti_360_helper.py | 41 ++--
.../kitti_360/preprocess_detection.py | 189 ++++++++++++++++++
.../default_dataset_conversion.yaml | 2 +-
jbwang_test2.py | 11 +-
5 files changed, 253 insertions(+), 53 deletions(-)
create mode 100644 d123/dataset/dataset_specific/kitti_360/preprocess_detection.py
diff --git a/d123/dataset/dataset_specific/kitti_360/kitti_360_data_converter.py b/d123/dataset/dataset_specific/kitti_360/kitti_360_data_converter.py
index 6433ca89..03e5bd37 100644
--- a/d123/dataset/dataset_specific/kitti_360/kitti_360_data_converter.py
+++ b/d123/dataset/dataset_specific/kitti_360/kitti_360_data_converter.py
@@ -9,6 +9,7 @@
from typing import Any, Dict, Final, List, Optional, Tuple, Union
import numpy as np
+import pickle
from collections import defaultdict
import datetime
import hashlib
@@ -27,14 +28,12 @@
from d123.common.datatypes.time.time_point import TimePoint
from d123.common.datatypes.vehicle_state.ego_state import DynamicStateSE3, EgoStateSE3, EgoStateSE3Index
from d123.common.datatypes.vehicle_state.vehicle_parameters import get_kitti360_station_wagon_parameters,rear_axle_se3_to_center_se3
-from d123.common.geometry.base import StateSE3
-from d123.common.geometry.bounding_box.bounding_box import BoundingBoxSE3Index
-from d123.common.geometry.vector import Vector3D, Vector3DIndex
from d123.dataset.arrow.helper import open_arrow_table, write_arrow_table
from d123.dataset.dataset_specific.raw_data_converter import DataConverterConfig, RawDataConverter
from d123.dataset.logs.log_metadata import LogMetadata
from d123.dataset.dataset_specific.kitti_360.kitti_360_helper import KITTI360Bbox3D, KITTI3602NUPLAN_IMU_CALIBRATION
from d123.dataset.dataset_specific.kitti_360.labels import KIITI360_DETECTION_NAME_DICT,kittiId2label
+from d123.geometry import BoundingBoxSE3, BoundingBoxSE3Index, StateSE3, Vector3D, Vector3DIndex
KITTI360_DT: Final[float] = 0.1
SORT_BY_TIMESTAMP: Final[bool] = True
@@ -74,6 +73,9 @@
DIR_3D_BBOX: PATH_3D_BBOX_ROOT / "train",
}
+D123_DEVKIT_ROOT = Path(os.environ["D123_DEVKIT_ROOT"])
+PREPOCESS_DETECTION_DIR = D123_DEVKIT_ROOT / "d123" / "dataset" / "dataset_specific" / "kitti_360" / "detection_preprocess"
+
def create_token(input_data: str) -> str:
# TODO: Refactor this function.
# TODO: Add a general function to create tokens from arbitrary data.
@@ -316,7 +318,15 @@ def _readYAMLFile(fileName):
def get_kitti360_lidar_metadata() -> Dict[LiDARType, LiDARMetadata]:
metadata: Dict[LiDARType, LiDARMetadata] = {}
+ extrinsic = get_lidar_extrinsic()
+ metadata[LiDARType.LIDAR_TOP] = LiDARMetadata(
+ lidar_type=LiDARType.LIDAR_TOP,
+ lidar_index=Kitti360LidarIndex,
+ extrinsic=extrinsic,
+ )
+ return metadata
+def get_lidar_extrinsic() -> np.ndarray:
cam2pose_txt = PATH_CALIB_ROOT / "calib_cam_to_pose.txt"
if not cam2pose_txt.exists():
raise FileNotFoundError(f"calib_cam_to_pose.txt file not found: {cam2pose_txt}")
@@ -336,13 +346,7 @@ def get_kitti360_lidar_metadata() -> Dict[LiDARType, LiDARMetadata]:
cam2velo = np.concatenate((np.loadtxt(cam2velo_txt).reshape(3,4), lastrow))
extrinsic = cam2pose @ np.linalg.inv(cam2velo)
-
- metadata[LiDARType.LIDAR_TOP] = LiDARMetadata(
- lidar_type=LiDARType.LIDAR_TOP,
- lidar_index=Kitti360LidarIndex,
- extrinsic=extrinsic,
- )
- return metadata
+ return extrinsic
def _write_recording_table(
log_name: str,
@@ -405,11 +409,10 @@ def _write_recording_table(
#TODO Synchronization all other sequences)
def _read_timestamps(log_name: str) -> Optional[List[TimePoint]]:
# unix
- # default using velodyne timestamps,if not available, use camera timestamps
ts_files = [
- PATH_3D_RAW_ROOT / log_name / "velodyne_points" / "timestamps.txt",
PATH_2D_RAW_ROOT / log_name / "image_00" / "timestamps.txt",
PATH_2D_RAW_ROOT / log_name / "image_01" / "timestamps.txt",
+ PATH_3D_RAW_ROOT / log_name / "velodyne_points" / "timestamps.txt",
]
for ts_file in ts_files:
if ts_file.exists():
@@ -531,16 +534,13 @@ def _extract_detections(
dynamic_groups: Dict[int, List[KITTI360Bbox3D]] = defaultdict(list)
-
- # lidra_data_all = []
- # for index in range(ts_len):
- # lidar_full_path = PATH_3D_RAW_ROOT / log_name / "velodyne_points" / "data" / f"{index:010d}.bin"
- # if not lidar_full_path.exists():
- # logging.warning(f"LiDAR file not found for frame {index}: {lidar_full_path}")
- # continue
- # lidar_data = np.fromfile(lidar_full_path, dtype=np.float32)
- # lidar_data = lidar_data.reshape(-1, 4)[:, :3] # Keep only x, y, z coordinates
- # lidra_data_all.append(lidar_data)
+ detection_preprocess_path = PREPOCESS_DETECTION_DIR / f"{log_name}_detection_preprocessed.pkl"
+ if detection_preprocess_path.exists():
+ with open(detection_preprocess_path, "rb") as f:
+ detection_preprocess_result = pickle.load(f)
+ records_dict = {record_item["global_id"]: record_item for record_item in detection_preprocess_result["records"]}
+ else:
+ detection_preprocess_result = None
for child in root:
semanticIdKITTI = int(child.find('semanticId').text)
@@ -552,14 +552,12 @@ def _extract_detections(
#static object
if obj.timestamp == -1:
- # first filter by radius
- obj.filter_by_radius(ego_states_xyz,radius=50.0)
- # then filter by pointcloud
- for frame in obj.valid_radius_frames:
- # TODO in the future, now is too slow because cpu in the server is not free
- # or using config?
- # lidar_data = lidra_data_all[frame]
- # if obj.box_visible_in_point_cloud(lidar_data):
+ if detection_preprocess_result is None:
+ obj.filter_by_radius(ego_states_xyz,radius=50.0)
+ else:
+ obj.load_detection_preprocess(records_dict)
+ for record in obj.valid_frames["records"]:
+ frame = record["timestamp"]
detections_states[frame].append(obj.get_state_array())
detections_velocity[frame].append([0.0, 0.0, 0.0])
detections_tokens[frame].append(str(obj.globalID))
@@ -606,6 +604,11 @@ def _extract_detections(
#TODO lidar extraction now only velo
def _extract_lidar(log_name: str, idx: int, data_converter_config: DataConverterConfig) -> Dict[LiDARType, Optional[str]]:
+
+ #NOTE special case for sequence 2013_05_28_drive_0002_sync which has no lidar data before frame 4391
+ if log_name == "2013_05_28_drive_0002_sync" and idx <= 4390:
+ return {LiDARType.LIDAR_TOP: None}
+
lidar: Optional[str] = None
lidar_full_path = PATH_3D_RAW_ROOT / log_name / "velodyne_points" / "data" / f"{idx:010d}.bin"
if lidar_full_path.exists():
diff --git a/d123/dataset/dataset_specific/kitti_360/kitti_360_helper.py b/d123/dataset/dataset_specific/kitti_360/kitti_360_helper.py
index 7edcd6af..76e3c9e0 100644
--- a/d123/dataset/dataset_specific/kitti_360/kitti_360_helper.py
+++ b/d123/dataset/dataset_specific/kitti_360/kitti_360_helper.py
@@ -1,13 +1,11 @@
import numpy as np
from collections import defaultdict
-
+from typing import Dict, Optional, Any, List
from scipy.linalg import polar
from scipy.spatial.transform import Rotation as R
-from d123.common.geometry.base import StateSE3
-from d123.common.geometry.bounding_box.bounding_box import BoundingBoxSE3
-from d123.common.geometry.transform.se3 import get_rotation_matrix
+from d123.geometry import BoundingBoxSE3, StateSE3
from d123.dataset.dataset_specific.kitti_360.labels import kittiId2label
DEFAULT_ROLL = 0.0
@@ -51,7 +49,6 @@ def __init__(self):
# the window that contains the bbox
self.start_frame = -1
self.end_frame = -1
- self.valid_radius_frames = []
# timestamp of the bbox (-1 if statis)
self.timestamp = -1
@@ -92,6 +89,9 @@ def parseBbox(self, child):
self.label = child.find('label').text
self.globalID = local2global(self.semanticId, self.instanceId)
+
+ self.valid_frames = {"global_id": self.globalID, "records": []}
+
self.parseVertices(child)
self.parse_scale_rotation()
@@ -119,11 +119,6 @@ def parse_scale_rotation(self):
self.yaw = yaw
self.pitch = pitch
self.roll = roll
-
- # self.pose = np.eye(4, dtype=np.float64)
- # self.pose[:3, :3] = self.Rm
- # self.pose[:3, 3] = self.T
- # self.w2e = np.linalg.inv(self.pose)
def get_state_array(self):
center = StateSE3(
@@ -140,16 +135,17 @@ def get_state_array(self):
return bounding_box_se3.array
def filter_by_radius(self,ego_state_xyz,radius=50.0):
- # first stage of detection, used to filter out detections by radius
-
- for index in range(len(ego_state_xyz)):
- ego_state = ego_state_xyz[index]
- distance = np.linalg.norm(ego_state[:3] - self.T)
- if distance <= radius:
- self.valid_radius_frames.append(index)
+ ''' first stage of detection, used to filter out detections by radius '''
+ d = np.linalg.norm(ego_state_xyz - self.T[None, :], axis=1)
+ idxs = np.where(d <= radius)[0]
+ for idx in idxs:
+ self.valid_frames["records"].append({
+ "timestamp": idx,
+ "points_in_box": None,
+ })
def box_visible_in_point_cloud(self, points):
- # points: (N,3) , box: (8,3)
+ ''' points: (N,3) , box: (8,3) '''
box = self.vertices
O, A, B, C = box[0], box[1], box[2], box[5]
OA = A - O
@@ -159,4 +155,11 @@ def box_visible_in_point_cloud(self, points):
mask = (np.dot(O, OA) < POA) & (POA < np.dot(A, OA)) & \
(np.dot(O, OB) < POB) & (POB < np.dot(B, OB)) & \
(np.dot(O, OC) < POC) & (POC < np.dot(C, OC))
- return True if np.sum(mask) > 100 else False
\ No newline at end of file
+
+ points_in_box = np.sum(mask)
+ visible = True if points_in_box > 50 else False
+ return visible, points_in_box
+
+ def load_detection_preprocess(self, records_dict: Dict[int, Any]):
+ if self.globalID in records_dict:
+ self.valid_frames["records"] = records_dict[self.globalID]["records"]
\ No newline at end of file
diff --git a/d123/dataset/dataset_specific/kitti_360/preprocess_detection.py b/d123/dataset/dataset_specific/kitti_360/preprocess_detection.py
new file mode 100644
index 00000000..e45e76d9
--- /dev/null
+++ b/d123/dataset/dataset_specific/kitti_360/preprocess_detection.py
@@ -0,0 +1,189 @@
+"""
+This script precomputes static detection records for KITTI-360:
+ - Stage 1: radius filtering using ego positions (from poses.txt).
+ - Stage 2: LiDAR visibility check to fill per-frame point counts.
+It writes a pickle containing, for each static object, all feasible frames and
+their point counts to avoid recomputation in later pipelines.
+We have precomputed and saved the pickle for all training logs, you can either
+download them or run this script to generate
+"""
+
+from __future__ import annotations
+import os
+import pickle
+import logging
+from pathlib import Path
+from typing import Dict, List, Tuple, Optional, Any
+from collections import defaultdict
+
+import numpy as np
+import numpy.typing as npt
+import xml.etree.ElementTree as ET
+
+KITTI360_DATA_ROOT = Path(os.environ["KITTI360_DATA_ROOT"])
+DIR_3D_RAW = "data_3d_raw"
+DIR_3D_BBOX = "data_3d_bboxes"
+DIR_POSES = "data_poses"
+
+PATH_3D_RAW_ROOT = KITTI360_DATA_ROOT / DIR_3D_RAW
+PATH_3D_BBOX_ROOT = KITTI360_DATA_ROOT / DIR_3D_BBOX
+PATH_POSES_ROOT = KITTI360_DATA_ROOT / DIR_POSES
+
+from d123.dataset.dataset_specific.kitti_360.kitti_360_helper import KITTI360Bbox3D, KITTI3602NUPLAN_IMU_CALIBRATION
+from d123.dataset.dataset_specific.kitti_360.labels import KIITI360_DETECTION_NAME_DICT, kittiId2label
+from d123.dataset.dataset_specific.kitti_360.kitti_360_data_converter import get_lidar_extrinsic
+
+def _bbox_xml_path(log_name: str) -> Path:
+ return PATH_3D_BBOX_ROOT / "train" / f"{log_name}.xml"
+
+def _lidar_frame_path(log_name: str, frame_idx: int) -> Path:
+ return PATH_3D_RAW_ROOT / log_name / "velodyne_points" / "data" / f"{frame_idx:010d}.bin"
+
+def _load_lidar_xyz(filepath: Path) -> np.ndarray:
+ """Load one LiDAR frame and return Nx3 xyz."""
+ arr = np.fromfile(filepath, dtype=np.float32)
+ return arr.reshape(-1, 4)[:, :3]
+
+def _collect_static_objects(log_name: str) -> List[KITTI360Bbox3D]:
+ """Parse XML and collect static objects with valid class names."""
+ xml_path = _bbox_xml_path(log_name)
+ if not xml_path.exists():
+ raise FileNotFoundError(f"BBox 3D file not found: {xml_path}")
+ tree = ET.parse(xml_path)
+ root = tree.getroot()
+ objs: List[KITTI360Bbox3D] = []
+ for child in root:
+ sem_id = int(child.find("semanticId").text)
+ name = kittiId2label[sem_id].name
+ timestamp = int(child.find('timestamp').text) # -1 for static objects
+ if child.find("transform") is None or name not in KIITI360_DETECTION_NAME_DICT or timestamp != -1:
+ continue
+ obj = KITTI360Bbox3D()
+ obj.parseBbox(child)
+ objs.append(obj)
+ return objs
+
+def _collect_ego_states(log_name: str,length: int) -> npt.NDArray[np.float64]:
+ """Load ego states from poses.txt."""
+
+ pose_file = PATH_POSES_ROOT / log_name / "poses.txt"
+ if not pose_file.exists():
+ raise FileNotFoundError(f"Pose file not found: {pose_file}")
+
+ poses = np.loadtxt(pose_file)
+ poses_time = poses[:, 0] - 1 # Adjusting time to start from 0
+
+ pose_idx = 0
+ poses_time_len = len(poses_time)
+
+ ego_states = []
+
+ for time_idx in range(length):
+ while pose_idx + 1 < poses_time_len and poses_time[pose_idx + 1] < time_idx:
+ pose_idx += 1
+ pos = pose_idx
+ state_item = np.eye(4)
+ r00, r01, r02 = poses[pos, 1:4]
+ r10, r11, r12 = poses[pos, 5:8]
+ r20, r21, r22 = poses[pos, 9:12]
+ R_mat = np.array([[r00, r01, r02],
+ [r10, r11, r12],
+ [r20, r21, r22]], dtype=np.float64)
+ R_mat_cali = R_mat @ KITTI3602NUPLAN_IMU_CALIBRATION[:3,:3]
+ ego_state_xyz = np.array([
+ poses[pos, 4],
+ poses[pos, 8],
+ poses[pos, 12],
+ ])
+
+ state_item[:3, :3] = R_mat_cali
+ state_item[:3, 3] = ego_state_xyz
+ ego_states.append(state_item)
+
+ return np.array(ego_states) # [N,4,4]
+
+
+def process_detection(
+ log_name: str,
+ radius_m: float = 50.0,
+ output_dir: Optional[Path] = None,
+) -> None:
+ """
+ Precompute static detections filtering:
+ 1) filter by ego-centered radius over all frames
+ 2) filter by LiDAR point cloud visibility
+ Save per-frame static detections to a pickle to avoid recomputation.
+ """
+
+ lidar_dir = PATH_3D_RAW_ROOT / log_name / "velodyne_points" / "data"
+ if not lidar_dir.exists():
+ raise FileNotFoundError(f"LiDAR data folder not found: {lidar_dir}")
+ ts_len = len(list(lidar_dir.glob("*.bin")))
+ logging.info(f"[preprocess] {log_name}: found {ts_len} lidar frames")
+
+ # 1) Parse static objects from XML
+ static_objs = _collect_static_objects(log_name)
+ logging.info(f"[preprocess] {log_name}: static objects = {len(static_objs)}")
+
+ # 2) Filter by ego-centered radius
+ ego_states = _collect_ego_states(log_name,ts_len)
+ logging.info(f"[preprocess] {log_name}: ego states = {len(ego_states)}")
+ for obj in static_objs:
+ obj.filter_by_radius(ego_states[:, :3, 3], radius_m)
+
+ # 3) Filter by LiDAR point cloud visibility
+ lidar_extrinsic = get_lidar_extrinsic()
+ for time_idx in range(ts_len):
+ logging.info(f"[preprocess] {log_name}: t={time_idx}")
+ lidar_path = _lidar_frame_path(log_name, time_idx)
+ lidar_xyz = _load_lidar_xyz(lidar_path)
+
+ # lidar to pose
+ lidar_h = np.concatenate((lidar_xyz, np.ones((lidar_xyz.shape[0], 1), dtype=lidar_xyz.dtype)), axis=1)
+ lidar_in_imu = lidar_h @ lidar_extrinsic.T
+ lidar_in_imu = lidar_in_imu[:,:3]
+
+ # pose to world
+ lidar_in_world = lidar_in_imu @ ego_states[time_idx][:3,:3].T + ego_states[time_idx][:3,3]
+
+ for obj in static_objs:
+ if not any(record["timestamp"] == time_idx for record in obj.valid_frames["records"]):
+ continue
+ visible, points_in_box = obj.box_visible_in_point_cloud(lidar_in_world)
+ if not visible:
+ obj.valid_frames["records"] = [record for record in obj.valid_frames["records"] if record["timestamp"] != time_idx]
+ else:
+ for record in obj.valid_frames["records"]:
+ if record["timestamp"] == time_idx:
+ record["points_in_box"] = points_in_box
+ break
+
+ # 4) Save pickle
+ records: List[Dict[str, Any]] = []
+ for obj in static_objs:
+ records.append(obj.valid_frames)
+ if output_dir is None:
+ output_dir = PATH_3D_BBOX_ROOT / "preprocessed"
+ output_dir.mkdir(parents=True, exist_ok=True)
+ out_path = output_dir / f"{log_name}_detection_preprocessed.pkl"
+ payload = {
+ "log_name": log_name,
+ "records": records
+ }
+ with open(out_path, "wb") as f:
+ pickle.dump(payload, f)
+ logging.info(f"[preprocess] saved: {out_path}")
+
+if __name__ == "__main__":
+ import argparse
+ logging.basicConfig(level=logging.INFO)
+ parser = argparse.ArgumentParser(description="Precompute KITTI-360 static detections filters")
+ parser.add_argument("--log_name", default="2013_05_28_drive_0000_sync")
+ parser.add_argument("--radius", type=float, default=60.0)
+ parser.add_argument("--out", type=Path, default=None, help="output directory for pkl")
+ args = parser.parse_args()
+ process_detection(
+ log_name=args.log_name,
+ radius_m=args.radius,
+ output_dir=args.out,
+ )
diff --git a/d123/script/config/dataset_conversion/default_dataset_conversion.yaml b/d123/script/config/dataset_conversion/default_dataset_conversion.yaml
index 2c474fe8..52915f13 100644
--- a/d123/script/config/dataset_conversion/default_dataset_conversion.yaml
+++ b/d123/script/config/dataset_conversion/default_dataset_conversion.yaml
@@ -22,4 +22,4 @@ defaults:
- kitti360_dataset
force_log_conversion: True
-force_map_conversion: True
+force_map_conversion: False
diff --git a/jbwang_test2.py b/jbwang_test2.py
index f9748db5..183df813 100644
--- a/jbwang_test2.py
+++ b/jbwang_test2.py
@@ -213,12 +213,17 @@ def get_rotation_matrix(roll,pitch,yaw):
[0.0, -1.0, 0.0],
[0.0, 0.0, -1.0]], dtype=np.float64)
R_mat = R_mat @ calib
+ from d123.geometry.rotation import EulerAngles
if idx <= 300:
# print("R_mat",R_mat)
+
new_yaw, new_pitch, new_roll = Quaternion(matrix=R_mat[:3, :3]).yaw_pitch_roll
+ R = EulerAngles.from_array(np.array([new_roll, new_pitch, new_yaw])).rotation_matrix
+ # print("R from yaw_pitch_roll",R)
+ print(R_mat - R)
# new_yaw,new_pitch,new_roll = R.from_matrix(R_mat).as_euler('yxz', degrees=False)
- print("new",new_roll,new_pitch,new_yaw)
- print("roll,pitch,yaw",oxts_data[3:6]) # 前6个元素是位置和速度
- roll, pitch, yaw = oxts_data[3:6]
+ # print("new",new_roll,new_pitch,new_yaw)
+ # print("roll,pitch,yaw",oxts_data[3:6]) # 前6个元素是位置和速度
+ # roll, pitch, yaw = oxts_data[3:6]
# print("true",get_rotation_matrix(roll,pitch,yaw))
# print("new",roll,pitch,yaw)
\ No newline at end of file
From 6dd5de2494b0afc38d30fe324efbdf46d2298a92 Mon Sep 17 00:00:00 2001
From: Daniel Dauner
Date: Wed, 27 Aug 2025 20:38:45 +0200
Subject: [PATCH 027/145] A few changes for refactoring and readability.
---
d123/__init__.py | 2 +-
d123/common/utils/mixin.py | 11 +++--
.../map/opendrive/opendrive_converter.py | 0
.../map/opendrive/parser/reference.py | 7 ++-
.../map/opendrive/utils/lane_helper.py | 6 +--
d123/geometry/geometry_index.py | 6 +--
d123/geometry/se.py | 22 ++++++++-
d123/geometry/transform/__init__.py | 15 ++++++
d123/geometry/transform/transform_se2.py | 49 +++++++++----------
d123/geometry/transform/transform_se3.py | 7 ---
docs/geometry.rst | 34 ++++++++++++-
pyproject.toml | 6 ++-
12 files changed, 112 insertions(+), 53 deletions(-)
delete mode 100644 d123/dataset/conversion/map/opendrive/opendrive_converter.py
diff --git a/d123/__init__.py b/d123/__init__.py
index b1a19e32..6526deb4 100644
--- a/d123/__init__.py
+++ b/d123/__init__.py
@@ -1 +1 @@
-__version__ = "0.0.5"
+__version__ = "0.0.7"
diff --git a/d123/common/utils/mixin.py b/d123/common/utils/mixin.py
index 9290242b..252a4eee 100644
--- a/d123/common/utils/mixin.py
+++ b/d123/common/utils/mixin.py
@@ -1,7 +1,5 @@
from __future__ import annotations
-import copy as pycopy
-
import numpy as np
import numpy.typing as npt
@@ -9,6 +7,11 @@
class ArrayMixin:
"""Abstract base class for geometric entities."""
+ @classmethod
+ def from_array(cls, array: npt.NDArray[np.float64], copy: bool = True) -> ArrayMixin:
+ """Create an instance from a NumPy array."""
+ raise NotImplementedError
+
@property
def array(self) -> npt.NDArray[np.float64]:
"""The array representation of the geometric entity."""
@@ -43,6 +46,4 @@ def tolist(self) -> list:
def copy(self) -> ArrayMixin:
"""Return a copy of the object with a copied array."""
- obj = pycopy.copy(self)
- obj.array = self.array.copy()
- return obj
+ return self.__class__.from_array(self.array, copy=True)
diff --git a/d123/dataset/conversion/map/opendrive/opendrive_converter.py b/d123/dataset/conversion/map/opendrive/opendrive_converter.py
deleted file mode 100644
index e69de29b..00000000
diff --git a/d123/dataset/conversion/map/opendrive/parser/reference.py b/d123/dataset/conversion/map/opendrive/parser/reference.py
index 5fe9211b..bc13537d 100644
--- a/d123/dataset/conversion/map/opendrive/parser/reference.py
+++ b/d123/dataset/conversion/map/opendrive/parser/reference.py
@@ -60,7 +60,7 @@ def interpolate_se2(self, s: float, t: float = 0.0, lane_section_end: bool = Fal
s = self.length
else:
raise ValueError(
- f"s={s} is beyond the end of the plan view (length={self.length}) with tolerance={TOLERANCE}."
+ f"PlanView: s={s} is beyond the end of the plan view (length={self.length}) with tolerance={TOLERANCE}."
)
# Find the geometry segment containing s
@@ -131,6 +131,11 @@ def _find_polynomial(s: float, polynomials: List[Polynomial], lane_section_end:
out_polynomial = polynomial
break
+ # s_values = np.array([poly.s for poly in polynomials])
+ # side = "left" if lane_section_end else "right"
+ # poly_idx = np.searchsorted(s_values, s, side=side) - 1
+ # poly_idx = int(np.clip(poly_idx, 0, len(polynomials) - 1))
+ # return polynomials[poly_idx]
return out_polynomial
def interpolate_se2(self, s: float, t: float = 0.0, lane_section_end: bool = False) -> npt.NDArray[np.float64]:
diff --git a/d123/dataset/conversion/map/opendrive/utils/lane_helper.py b/d123/dataset/conversion/map/opendrive/utils/lane_helper.py
index edd05423..a21bd625 100644
--- a/d123/dataset/conversion/map/opendrive/utils/lane_helper.py
+++ b/d123/dataset/conversion/map/opendrive/utils/lane_helper.py
@@ -50,9 +50,9 @@ def type(self) -> str:
def _s_positions(self) -> npt.NDArray[np.float64]:
length = self.s_range[1] - self.s_range[0]
_s_positions = np.linspace(
- self.s_range[0],
- self.s_range[1],
- int(np.ceil(length / self.interpolation_step_size)) + 1,
+ start=self.s_range[0],
+ stop=self.s_range[1],
+ num=int(np.ceil(length / self.interpolation_step_size)) + 1,
endpoint=True,
dtype=np.float64,
)
diff --git a/d123/geometry/geometry_index.py b/d123/geometry/geometry_index.py
index e5c61154..5f56abd9 100644
--- a/d123/geometry/geometry_index.py
+++ b/d123/geometry/geometry_index.py
@@ -117,10 +117,6 @@ def XY(cls) -> slice:
def XYZ(cls) -> slice:
return slice(cls.X, cls.Z + 1)
- @classproperty
- def ROTATION_XYZ(cls) -> slice:
- return slice(cls.ROLL, cls.YAW + 1)
-
@classproperty
def EULER_ANGLES(cls) -> slice:
return slice(cls.ROLL, cls.YAW + 1)
@@ -208,7 +204,7 @@ def STATE_SE3(cls) -> slice:
return slice(cls.X, cls.YAW + 1)
@classproperty
- def ROTATION_XYZ(cls) -> slice:
+ def EULER_ANGLES(cls) -> slice:
return slice(cls.ROLL, cls.YAW + 1)
@classproperty
diff --git a/d123/geometry/se.py b/d123/geometry/se.py
index 1eb021c6..2bc56da9 100644
--- a/d123/geometry/se.py
+++ b/d123/geometry/se.py
@@ -274,6 +274,18 @@ def transformation_matrix(self) -> npt.NDArray[np.float64]:
def euler_angles(self) -> EulerAngles:
return EulerAngles.from_array(self.array[StateSE3Index.EULER_ANGLES])
+ @property
+ def quaternion_se3(self) -> QuaternionSE3:
+ """Returns the QuaternionSE3 representation of the state.
+
+ :return: A QuaternionSE3 instance representing the quaternion.
+ """
+ quaternion_se3_array = np.zeros(len(QuaternionSE3Index), dtype=np.float64)
+ quaternion_se3_array[QuaternionSE3Index.XYZ] = self.array[StateSE3Index.XYZ]
+ quaternion_se3_array[QuaternionSE3Index.QUATERNION] = Quaternion.from_euler_angles(self.euler_angles)
+
+ return QuaternionSE3.from_array(quaternion_se3_array)
+
@property
def quaternion(self) -> npt.NDArray[np.float64]:
"""Returns the quaternion (w, x, y, z) representation of the state's orientation.
@@ -299,7 +311,7 @@ def __matmul__(self, other: StateSE3) -> StateSE3:
return StateSE3.from_transformation_matrix(self.transformation_matrix @ other.transformation_matrix)
-class QuaternionSE3:
+class QuaternionSE3(ArrayMixin):
"""Class representing a quaternion in SE3 space.
TODO: Implement and replace StateSE3.
@@ -438,3 +450,11 @@ def quaternion(self) -> Quaternion:
:return: A Quaternion instance representing the quaternion.
"""
return Quaternion.from_array(self.array[QuaternionSE3Index.QUATERNION])
+
+ @property
+ def rotation_matrix(self) -> npt.NDArray[np.float64]:
+ """Returns the 3x3 rotation matrix representation of the state's orientation.
+
+ :return: A 3x3 numpy array representing the rotation matrix.
+ """
+ return self.quaternion.rotation_matrix
diff --git a/d123/geometry/transform/__init__.py b/d123/geometry/transform/__init__.py
index e69de29b..96088028 100644
--- a/d123/geometry/transform/__init__.py
+++ b/d123/geometry/transform/__init__.py
@@ -0,0 +1,15 @@
+from d123.geometry.transform.transform_se2 import (
+ convert_absolute_to_relative_se2_array,
+ convert_relative_to_absolute_se2_array,
+ translate_se2_along_body_frame,
+ translate_se2_along_x,
+ translate_se2_along_y,
+)
+from d123.geometry.transform.transform_se3 import (
+ convert_absolute_to_relative_se3_array,
+ convert_relative_to_absolute_se3_array,
+ translate_se3_along_body_frame,
+ translate_se3_along_x,
+ translate_se3_along_y,
+ translate_se3_along_z,
+)
diff --git a/d123/geometry/transform/transform_se2.py b/d123/geometry/transform/transform_se2.py
index 6331bd16..faaa3587 100644
--- a/d123/geometry/transform/transform_se2.py
+++ b/d123/geometry/transform/transform_se2.py
@@ -8,8 +8,6 @@
from d123.geometry.utils.rotation_utils import normalize_angle
from d123.geometry.vector import Vector2D
-# TODO: Refactor 2D and 3D transform functions in a more consistent and general way.
-
def convert_absolute_to_relative_se2_array(
origin: Union[StateSE2, npt.NDArray[np.float64]], state_se2_array: npt.NDArray[np.float64]
@@ -125,31 +123,6 @@ def convert_relative_to_absolute_point_2d_array(
return point_2d_abs
-def translate_se2(state_se2: StateSE2, translation: Vector2D) -> StateSE2:
- """Translate a single SE2 state by a 2D vector.
-
- :param state_se2: SE2 state to translate
- :param translation: 2D translation vector
- :return: translated SE2 state
- """
- translated_xy = state_se2.array[StateSE2Index.XY] + translation.array[Vector2DIndex.XY]
- return StateSE2(translated_xy[0], translated_xy[1], state_se2.array[StateSE2Index.YAW])
-
-
-def translate_se2_array(state_se2_array: npt.NDArray[np.float64], translation: Vector2D) -> npt.NDArray[np.float64]:
- """Translate an array of SE2 states by a 2D vector.
-
- :param state_se2_array: array of SE2 states, indexed by \
- :class:`~d123.geometry.geometry_index.StateSE2Index`, in last dim
- :param translation: 2D translation vector
- :return: translated SE2 array
- """
- assert len(StateSE2Index) == state_se2_array.shape[-1]
- result = state_se2_array.copy()
- result[..., StateSE2Index.XY] += translation.array[Vector2DIndex.XY]
- return result
-
-
def translate_se2_array_along_body_frame(
state_se2_array: npt.NDArray[np.float64], translation: Vector2D
) -> npt.NDArray[np.float64]:
@@ -185,3 +158,25 @@ def translate_se2_along_body_frame(state_se2: StateSE2, translation: Vector2D) -
:return: translated SE2 state
"""
return StateSE2.from_array(translate_se2_array_along_body_frame(state_se2.array, translation), copy=False)
+
+
+def translate_se2_along_x(state_se2: StateSE2, distance: float) -> StateSE2:
+ """Translate a single SE2 state along its local X-axis.
+
+ :param state_se2: SE2 state to translate
+ :param distance: distance to translate along the local X-axis
+ :return: translated SE2 state
+ """
+ translation = Vector2D.from_array(np.array([distance, 0.0], dtype=np.float64))
+ return StateSE2.from_array(translate_se2_array_along_body_frame(state_se2.array, translation), copy=False)
+
+
+def translate_se2_along_y(state_se2: StateSE2, distance: float) -> StateSE2:
+ """Translate a single SE2 state along its local Y-axis.
+
+ :param state_se2: SE2 state to translate
+ :param distance: distance to translate along the local Y-axis
+ :return: translated SE2 state
+ """
+ translation = Vector2D.from_array(np.array([0.0, distance], dtype=np.float64))
+ return StateSE2.from_array(translate_se2_array_along_body_frame(state_se2.array, translation), copy=False)
diff --git a/d123/geometry/transform/transform_se3.py b/d123/geometry/transform/transform_se3.py
index 1412d61a..abed7552 100644
--- a/d123/geometry/transform/transform_se3.py
+++ b/d123/geometry/transform/transform_se3.py
@@ -7,7 +7,6 @@
from d123.geometry.geometry_index import Point3DIndex, Vector3DIndex
from d123.geometry.rotation import EulerAngles
from d123.geometry.utils.rotation_utils import (
- get_rotation_matrices_from_euler_array,
get_rotation_matrix_from_euler_array,
normalize_angle,
)
@@ -109,12 +108,6 @@ def convert_absolute_to_relative_se3_array(
# Vectorized relative position calculation
rel_positions = (abs_positions - t_origin) @ R_origin
- # Get rotation matrices for all absolute orientations
- R_abs = get_rotation_matrices_from_euler_array(abs_euler_angles)
-
- # Compute relative rotations: R_rel = R_origin^T @ R_abs
- np.transpose(R_origin) @ R_abs
-
# Convert back to Euler angles (this may need a custom function)
# For now, using simple subtraction as approximation (this is incorrect for general rotations)
origin_euler = origin_array[StateSE3Index.EULER_ANGLES]
diff --git a/docs/geometry.rst b/docs/geometry.rst
index 957db541..61b3f65d 100644
--- a/docs/geometry.rst
+++ b/docs/geometry.rst
@@ -17,8 +17,8 @@ Vectors
.. autoclass:: d123.geometry.Vector3D()
-Super Euclidean States
-~~~~~~~~~~~~~~~~~~~~~~
+Special Euclidean Group
+~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: d123.geometry.StateSE2()
.. autoclass:: d123.geometry.StateSE3()
@@ -52,6 +52,36 @@ Indexing Enums
.. autoclass:: d123.geometry.Corners3DIndex()
+Transformations
+---------------
+
+Transformations in 2D
+~~~~~~~~~~~~~~~~~~~~~
+.. autofunction:: d123.geometry.transform.convert_absolute_to_relative_se2_array
+
+.. autofunction:: d123.geometry.transform.convert_relative_to_absolute_se2_array
+
+.. autofunction:: d123.geometry.transform.translate_se2_along_body_frame
+
+.. autofunction:: d123.geometry.transform.translate_se2_along_x
+
+.. autofunction:: d123.geometry.transform.translate_se2_along_y
+
+
+Transformations in 3D
+~~~~~~~~~~~~~~~~~~~~~
+.. autofunction:: d123.geometry.transform.convert_absolute_to_relative_se3_array
+
+.. autofunction:: d123.geometry.transform.convert_relative_to_absolute_se3_array
+
+.. autofunction:: d123.geometry.transform.translate_se3_along_body_frame
+
+.. autofunction:: d123.geometry.transform.translate_se3_along_x
+
+.. autofunction:: d123.geometry.transform.translate_se3_along_y
+
+.. autofunction:: d123.geometry.transform.translate_se3_along_z
+
Occupancy Map
-------------
.. autoclass:: d123.geometry.OccupancyMap2D()
diff --git a/pyproject.toml b/pyproject.toml
index ad02829a..dda67ef7 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -13,7 +13,7 @@ classifiers = [
"License :: OSI Approved :: Apache Software License",
]
name = "d123"
-version = "v0.0.6"
+version = "v0.0.7"
authors = [{ name = "Daniel Dauner", email = "daniel.dauner@gmail.com" }]
description = "TODO"
readme = "README.md"
@@ -64,6 +64,10 @@ dependencies = [
"viser",
]
+[project.scripts]
+d123-viser = "d123.script.run_viser:main"
+
+
[project.optional-dependencies]
dev = [
"black",
From 6ed982db5553e929d7963d5d08c49d256a8ea4f1 Mon Sep 17 00:00:00 2001
From: Daniel Dauner
Date: Wed, 27 Aug 2025 21:00:19 +0200
Subject: [PATCH 028/145] Add more transformation tests.
---
d123/geometry/test/test_transform.py | 444 +++++++++++++++++++++++----
1 file changed, 382 insertions(+), 62 deletions(-)
diff --git a/d123/geometry/test/test_transform.py b/d123/geometry/test/test_transform.py
index f126e33a..89161153 100644
--- a/d123/geometry/test/test_transform.py
+++ b/d123/geometry/test/test_transform.py
@@ -10,9 +10,9 @@
convert_absolute_to_relative_se2_array,
convert_relative_to_absolute_point_2d_array,
convert_relative_to_absolute_se2_array,
- translate_se2,
translate_se2_along_body_frame,
- translate_se2_array,
+ translate_se2_along_x,
+ translate_se2_along_y,
translate_se2_array_along_body_frame,
)
from d123.geometry.transform.transform_se3 import (
@@ -33,58 +33,73 @@ class TestTransformSE2(unittest.TestCase):
def setUp(self):
self.decimal = 6 # Decimal places for np.testing.assert_array_almost_equal
- def test_translate_se2(self) -> None:
- pose: StateSE2 = StateSE2.from_array(np.array([1.0, 2.0, 0.0], dtype=np.float64))
- translation: Vector2D = Vector2D(1.0, 1.0)
-
- result: StateSE2 = translate_se2(pose, translation)
- expected: StateSE2 = StateSE2.from_array(np.array([2.0, 3.0, 0.0], dtype=np.float64))
+ def test_translate_se2_along_x(self) -> None:
+ """Tests translating a SE2 state along the X-axis."""
+ pose: StateSE2 = StateSE2.from_array(np.array([0.0, 0.0, 0.0], dtype=np.float64))
+ distance: float = 1.0
+ result: StateSE2 = translate_se2_along_x(pose, distance)
+ expected: StateSE2 = StateSE2.from_array(np.array([1.0, 0.0, 0.0], dtype=np.float64))
np.testing.assert_array_almost_equal(result.array, expected.array, decimal=self.decimal)
- def test_translate_se2_negative_translation(self) -> None:
+ def test_translate_se2_along_x_negative(self) -> None:
+ """Tests translating a SE2 state along the X-axis in the negative direction."""
pose: StateSE2 = StateSE2.from_array(np.array([1.0, 2.0, 0.0], dtype=np.float64))
- translation: Vector2D = Vector2D(-0.5, -1.5)
- result: StateSE2 = translate_se2(pose, translation)
- expected: StateSE2 = StateSE2.from_array(np.array([0.5, 0.5, 0.0], dtype=np.float64))
+ distance: float = -0.5
+ result: StateSE2 = translate_se2_along_x(pose, distance)
+ expected: StateSE2 = StateSE2.from_array(np.array([0.5, 2.0, 0.0], dtype=np.float64))
np.testing.assert_array_almost_equal(result.array, expected.array, decimal=self.decimal)
- def test_translate_se2_with_rotation(self) -> None:
- pose: StateSE2 = StateSE2.from_array(np.array([0.0, 0.0, np.pi / 4], dtype=np.float64))
- translation: Vector2D = Vector2D(1.0, 0.0)
- result: StateSE2 = translate_se2(pose, translation)
- expected: StateSE2 = StateSE2.from_array(np.array([1.0, 0.0, np.pi / 4], dtype=np.float64))
+ def test_translate_se2_along_x_with_rotation(self) -> None:
+ """Tests translating a SE2 state along the X-axis with 90 degree rotation."""
+ pose: StateSE2 = StateSE2.from_array(np.array([0.0, 0.0, np.pi / 2], dtype=np.float64))
+ distance: float = 1.0
+ result: StateSE2 = translate_se2_along_x(pose, distance)
+ expected: StateSE2 = StateSE2.from_array(np.array([0.0, 1.0, np.pi / 2], dtype=np.float64))
np.testing.assert_array_almost_equal(result.array, expected.array, decimal=self.decimal)
- def test_translate_se2_array(self) -> None:
- poses: npt.NDArray[np.float64] = np.array([[1.0, 2.0, 0.0], [0.0, 0.0, np.pi / 2]], dtype=np.float64)
- translation: Vector2D = Vector2D(1.0, 1.0)
- result: npt.NDArray[np.float64] = translate_se2_array(poses, translation)
- expected: npt.NDArray[np.float64] = np.array([[2.0, 3.0, 0.0], [1.0, 1.0, np.pi / 2]], dtype=np.float64)
- np.testing.assert_array_almost_equal(result, expected)
+ def test_translate_se2_along_y(self) -> None:
+ """Tests translating a SE2 state along the Y-axis."""
+ pose: StateSE2 = StateSE2.from_array(np.array([0.0, 0.0, 0.0], dtype=np.float64))
+ distance: float = 1.0
+ result: StateSE2 = translate_se2_along_y(pose, distance)
+ expected: StateSE2 = StateSE2.from_array(np.array([0.0, 1.0, 0.0], dtype=np.float64))
+ np.testing.assert_array_almost_equal(result.array, expected.array, decimal=self.decimal)
- def test_translate_se2_array_zero_translation(self) -> None:
- poses: npt.NDArray[np.float64] = np.array([[1.0, 2.0, 0.0], [0.0, 0.0, np.pi / 2]], dtype=np.float64)
- translation: Vector2D = Vector2D(0.0, 0.0)
- result: npt.NDArray[np.float64] = translate_se2_array(poses, translation)
- expected: npt.NDArray[np.float64] = poses.copy()
- np.testing.assert_array_almost_equal(result, expected)
+ def test_translate_se2_along_y_negative(self) -> None:
+ """Tests translating a SE2 state along the Y-axis in the negative direction."""
+ pose: StateSE2 = StateSE2.from_array(np.array([1.0, 2.0, 0.0], dtype=np.float64))
+ distance: float = -1.5
+ result: StateSE2 = translate_se2_along_y(pose, distance)
+ expected: StateSE2 = StateSE2.from_array(np.array([1.0, 0.5, 0.0], dtype=np.float64))
+ np.testing.assert_array_almost_equal(result.array, expected.array, decimal=self.decimal)
+
+ def test_translate_se2_along_y_with_rotation(self) -> None:
+ """Tests translating a SE2 state along the Y-axis with -90 degree rotation."""
+ pose: StateSE2 = StateSE2.from_array(np.array([0.0, 0.0, -np.pi / 2], dtype=np.float64))
+ distance: float = 2.0
+ result: StateSE2 = translate_se2_along_y(pose, distance)
+ expected: StateSE2 = StateSE2.from_array(np.array([2.0, 0.0, -np.pi / 2], dtype=np.float64))
+ np.testing.assert_array_almost_equal(result.array, expected.array, decimal=self.decimal)
- def test_translate_se2_along_yaw(self) -> None:
+ def test_translate_se2_along_body_frame_forward(self) -> None:
+ """Tests translating a SE2 state along the body frame forward direction, with 90 degree rotation."""
# Move 1 unit forward in the direction of yaw (pi/2 = 90 degrees = +Y direction)
- pose: StateSE2 = StateSE2.from_array(np.array([0.0, 0.0, np.deg2rad(90)], dtype=np.float64))
+ pose: StateSE2 = StateSE2.from_array(np.array([0.0, 0.0, np.pi / 2], dtype=np.float64))
vector: Vector2D = Vector2D(1.0, 0.0)
result: StateSE2 = translate_se2_along_body_frame(pose, vector)
- expected: StateSE2 = StateSE2.from_array(np.array([0.0, 1.0, np.deg2rad(90)], dtype=np.float64))
+ expected: StateSE2 = StateSE2.from_array(np.array([0.0, 1.0, np.pi / 2], dtype=np.float64))
np.testing.assert_array_almost_equal(result.array, expected.array, decimal=self.decimal)
- def test_translate_se2_along_yaw_backward(self) -> None:
+ def test_translate_se2_along_body_frame_backward(self) -> None:
+ """Tests translating a SE2 state along the body frame backward direction."""
pose: StateSE2 = StateSE2.from_array(np.array([0.0, 0.0, 0.0], dtype=np.float64))
vector: Vector2D = Vector2D(-1.0, 0.0)
result: StateSE2 = translate_se2_along_body_frame(pose, vector)
expected: StateSE2 = StateSE2.from_array(np.array([-1.0, 0.0, 0.0], dtype=np.float64))
np.testing.assert_array_almost_equal(result.array, expected.array, decimal=self.decimal)
- def test_translate_se2_along_yaw_diagonal(self) -> None:
+ def test_translate_se2_along_body_frame_diagonal(self) -> None:
+ """Tests translating a SE2 state along the body frame diagonal direction."""
pose: StateSE2 = StateSE2.from_array(np.array([1.0, 0.0, np.deg2rad(45)], dtype=np.float64))
vector: Vector2D = Vector2D(1.0, 0.0)
result: StateSE2 = translate_se2_along_body_frame(pose, vector)
@@ -93,61 +108,127 @@ def test_translate_se2_along_yaw_diagonal(self) -> None:
)
np.testing.assert_array_almost_equal(result.array, expected.array, decimal=self.decimal)
- def test_translate_se2_array_along_yaw(self) -> None:
+ def test_translate_se2_along_body_frame_lateral(self) -> None:
+ """Tests translating a SE2 state along the body frame lateral direction."""
+ # Move 1 unit to the right (positive y in body frame)
+ pose: StateSE2 = StateSE2.from_array(np.array([0.0, 0.0, 0.0], dtype=np.float64))
+ vector: Vector2D = Vector2D(0.0, 1.0)
+ result: StateSE2 = translate_se2_along_body_frame(pose, vector)
+ expected: StateSE2 = StateSE2.from_array(np.array([0.0, 1.0, 0.0], dtype=np.float64))
+ np.testing.assert_array_almost_equal(result.array, expected.array, decimal=self.decimal)
+
+ def test_translate_se2_along_body_frame_lateral_with_rotation(self) -> None:
+ """Tests translating a SE2 state along the body frame lateral direction with 90 degree rotation."""
+ # Move 1 unit to the right when facing 90 degrees
+ pose: StateSE2 = StateSE2.from_array(np.array([0.0, 0.0, np.pi / 2], dtype=np.float64))
+ vector: Vector2D = Vector2D(0.0, 1.0)
+ result: StateSE2 = translate_se2_along_body_frame(pose, vector)
+ expected: StateSE2 = StateSE2.from_array(np.array([-1.0, 0.0, np.pi / 2], dtype=np.float64))
+ np.testing.assert_array_almost_equal(result.array, expected.array, decimal=self.decimal)
+
+ def test_translate_se2_array_along_body_frame_single_distance(self) -> None:
+ """Tests translating a SE2 state array along the body frame forward direction."""
poses: npt.NDArray[np.float64] = np.array([[0.0, 0.0, 0.0], [0.0, 0.0, np.pi / 2]], dtype=np.float64)
- distance: float = Vector2D(1.0, 0.0)
+ distance: Vector2D = Vector2D(1.0, 0.0)
result: npt.NDArray[np.float64] = translate_se2_array_along_body_frame(poses, distance)
expected: npt.NDArray[np.float64] = np.array([[1.0, 0.0, 0.0], [0.0, 1.0, np.pi / 2]], dtype=np.float64)
- np.testing.assert_array_almost_equal(result, expected)
+ np.testing.assert_array_almost_equal(result, expected, decimal=self.decimal)
- def test_translate_se2_array_along_yaw_multiple_distances(self) -> None:
+ def test_translate_se2_array_along_body_frame_multiple_distances(self) -> None:
+ """Tests translating a SE2 state array along the body frame forward direction with different distances."""
poses: npt.NDArray[np.float64] = np.array([[0.0, 0.0, 0.0], [0.0, 0.0, np.pi]], dtype=np.float64)
- distance: float = Vector2D(2.0, 0.0)
+ distance: Vector2D = Vector2D(2.0, 0.0)
result: npt.NDArray[np.float64] = translate_se2_array_along_body_frame(poses, distance)
expected: npt.NDArray[np.float64] = np.array([[2.0, 0.0, 0.0], [-2.0, 0.0, np.pi]], dtype=np.float64)
- np.testing.assert_array_almost_equal(result, expected)
+ np.testing.assert_array_almost_equal(result, expected, decimal=self.decimal)
+
+ def test_translate_se2_array_along_body_frame_lateral(self) -> None:
+ """Tests translating a SE2 state array along the body frame lateral direction with 90 degree rotation."""
+ poses: npt.NDArray[np.float64] = np.array([[0.0, 0.0, 0.0], [0.0, 0.0, np.pi / 2]], dtype=np.float64)
+ distance: Vector2D = Vector2D(0.0, 1.0)
+ result: npt.NDArray[np.float64] = translate_se2_array_along_body_frame(poses, distance)
+ expected: npt.NDArray[np.float64] = np.array([[0.0, 1.0, 0.0], [-1.0, 0.0, np.pi / 2]], dtype=np.float64)
+ np.testing.assert_array_almost_equal(result, expected, decimal=self.decimal)
def test_convert_absolute_to_relative_se2_array(self) -> None:
+ """Tests converting absolute SE2 poses to relative SE2 poses."""
origin: StateSE2 = StateSE2.from_array(np.array([1.0, 1.0, 0.0], dtype=np.float64))
absolute_poses: npt.NDArray[np.float64] = np.array([[2.0, 2.0, 0.0], [0.0, 1.0, np.pi / 2]], dtype=np.float64)
result: npt.NDArray[np.float64] = convert_absolute_to_relative_se2_array(origin, absolute_poses)
expected: npt.NDArray[np.float64] = np.array([[1.0, 1.0, 0.0], [-1.0, 0.0, np.pi / 2]], dtype=np.float64)
- np.testing.assert_array_almost_equal(result, expected)
+ np.testing.assert_array_almost_equal(result, expected, decimal=self.decimal)
def test_convert_absolute_to_relative_se2_array_with_rotation(self) -> None:
+ """Tests converting absolute SE2 poses to relative SE2 poses with 90 degree rotation."""
reference: StateSE2 = StateSE2.from_array(np.array([0.0, 0.0, np.pi / 2], dtype=np.float64))
absolute_poses: npt.NDArray[np.float64] = np.array([[1.0, 0.0, np.pi / 2]], dtype=np.float64)
result: npt.NDArray[np.float64] = convert_absolute_to_relative_se2_array(reference, absolute_poses)
expected: npt.NDArray[np.float64] = np.array([[0.0, -1.0, 0.0]], dtype=np.float64)
- np.testing.assert_array_almost_equal(result, expected)
+ np.testing.assert_array_almost_equal(result, expected, decimal=self.decimal)
+
+ def test_convert_absolute_to_relative_se2_array_identity(self) -> None:
+ """Tests converting absolute SE2 poses to relative SE2 poses with identity transformation."""
+ reference: StateSE2 = StateSE2.from_array(np.array([0.0, 0.0, 0.0], dtype=np.float64))
+ absolute_poses: npt.NDArray[np.float64] = np.array([[1.0, 2.0, np.pi / 4]], dtype=np.float64)
+ result: npt.NDArray[np.float64] = convert_absolute_to_relative_se2_array(reference, absolute_poses)
+ expected: npt.NDArray[np.float64] = np.array([[1.0, 2.0, np.pi / 4]], dtype=np.float64)
+ np.testing.assert_array_almost_equal(result, expected, decimal=self.decimal)
def test_convert_relative_to_absolute_se2_array(self) -> None:
+ """Tests converting relative SE2 poses to absolute SE2 poses."""
reference: StateSE2 = StateSE2.from_array(np.array([1.0, 1.0, 0.0], dtype=np.float64))
relative_poses: npt.NDArray[np.float64] = np.array([[1.0, 1.0, 0.0], [-1.0, 0.0, np.pi / 2]], dtype=np.float64)
result: npt.NDArray[np.float64] = convert_relative_to_absolute_se2_array(reference, relative_poses)
expected: npt.NDArray[np.float64] = np.array([[2.0, 2.0, 0.0], [0.0, 1.0, np.pi / 2]], dtype=np.float64)
- np.testing.assert_array_almost_equal(result, expected)
+ np.testing.assert_array_almost_equal(result, expected, decimal=self.decimal)
+
+ def test_convert_relative_to_absolute_se2_array_with_rotation(self) -> None:
+ """Tests converting relative SE2 poses to absolute SE2 poses with rotation."""
+ reference: StateSE2 = StateSE2.from_array(np.array([1.0, 0.0, np.pi / 2], dtype=np.float64))
+ relative_poses: npt.NDArray[np.float64] = np.array([[1.0, 0.0, 0.0]], dtype=np.float64)
+ result: npt.NDArray[np.float64] = convert_relative_to_absolute_se2_array(reference, relative_poses)
+ expected: npt.NDArray[np.float64] = np.array([[1.0, 1.0, np.pi / 2]], dtype=np.float64)
+ np.testing.assert_array_almost_equal(result, expected, decimal=self.decimal)
def test_convert_absolute_to_relative_point_2d_array(self) -> None:
+ """Tests converting absolute 2D points to relative 2D points."""
reference: StateSE2 = StateSE2.from_array(np.array([1.0, 1.0, 0.0], dtype=np.float64))
absolute_points: npt.NDArray[np.float64] = np.array([[2.0, 2.0], [0.0, 1.0]], dtype=np.float64)
result: npt.NDArray[np.float64] = convert_absolute_to_relative_point_2d_array(reference, absolute_points)
expected: npt.NDArray[np.float64] = np.array([[1.0, 1.0], [-1.0, 0.0]], dtype=np.float64)
- np.testing.assert_array_almost_equal(result, expected)
+ np.testing.assert_array_almost_equal(result, expected, decimal=self.decimal)
def test_convert_absolute_to_relative_point_2d_array_with_rotation(self) -> None:
+ """Tests converting absolute 2D points to relative 2D points with 90 degree rotation."""
reference: StateSE2 = StateSE2.from_array(np.array([0.0, 0.0, np.pi / 2], dtype=np.float64))
absolute_points: npt.NDArray[np.float64] = np.array([[0.0, 1.0], [1.0, 0.0]], dtype=np.float64)
result: npt.NDArray[np.float64] = convert_absolute_to_relative_point_2d_array(reference, absolute_points)
expected: npt.NDArray[np.float64] = np.array([[1.0, 0.0], [0.0, -1.0]], dtype=np.float64)
- np.testing.assert_array_almost_equal(result, expected)
+ np.testing.assert_array_almost_equal(result, expected, decimal=self.decimal)
+
+ def test_convert_absolute_to_relative_point_2d_array_empty(self) -> None:
+ """Tests converting an empty array of absolute 2D points to relative 2D points."""
+ reference: StateSE2 = StateSE2.from_array(np.array([1.0, 1.0, 0.0], dtype=np.float64))
+ absolute_points: npt.NDArray[np.float64] = np.array([], dtype=np.float64).reshape(0, 2)
+ result: npt.NDArray[np.float64] = convert_absolute_to_relative_point_2d_array(reference, absolute_points)
+ expected: npt.NDArray[np.float64] = np.array([], dtype=np.float64).reshape(0, 2)
+ np.testing.assert_array_almost_equal(result, expected, decimal=self.decimal)
def test_convert_relative_to_absolute_point_2d_array(self) -> None:
+ """Tests converting relative 2D points to absolute 2D points."""
reference: StateSE2 = StateSE2.from_array(np.array([1.0, 1.0, 0.0], dtype=np.float64))
relative_points: npt.NDArray[np.float64] = np.array([[1.0, 1.0], [-1.0, 0.0]], dtype=np.float64)
result: npt.NDArray[np.float64] = convert_relative_to_absolute_point_2d_array(reference, relative_points)
expected: npt.NDArray[np.float64] = np.array([[2.0, 2.0], [0.0, 1.0]], dtype=np.float64)
- np.testing.assert_array_almost_equal(result, expected)
+ np.testing.assert_array_almost_equal(result, expected, decimal=self.decimal)
+
+ def test_convert_relative_to_absolute_point_2d_array_with_rotation(self) -> None:
+ """Tests converting relative 2D points to absolute 2D points with 90 degree rotation."""
+ reference: StateSE2 = StateSE2.from_array(np.array([1.0, 0.0, np.pi / 2], dtype=np.float64))
+ relative_points: npt.NDArray[np.float64] = np.array([[1.0, 0.0], [0.0, 1.0]], dtype=np.float64)
+ result: npt.NDArray[np.float64] = convert_relative_to_absolute_point_2d_array(reference, relative_points)
+ expected: npt.NDArray[np.float64] = np.array([[1.0, 1.0], [0.0, 0.0]], dtype=np.float64)
+ np.testing.assert_array_almost_equal(result, expected, decimal=self.decimal)
class TestTransformSE3(unittest.TestCase):
@@ -157,6 +238,7 @@ def setUp(self):
self.num_consistency_tests = 10 # Number of random test cases for consistency checks
def test_translate_se3_along_x(self) -> None:
+ """Tests translating a SE3 state along the body frame forward direction."""
pose: StateSE3 = StateSE3.from_array(np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float64))
distance: float = 1.0
result: StateSE3 = translate_se3_along_x(pose, distance)
@@ -164,13 +246,23 @@ def test_translate_se3_along_x(self) -> None:
np.testing.assert_array_almost_equal(result.array, expected.array)
def test_translate_se3_along_x_negative(self) -> None:
+ """Tests translating a SE3 state along the body frame backward direction."""
pose: StateSE3 = StateSE3.from_array(np.array([1.0, 2.0, 3.0, 0.0, 0.0, 0.0], dtype=np.float64))
distance: float = -0.5
result: StateSE3 = translate_se3_along_x(pose, distance)
expected: StateSE3 = StateSE3.from_array(np.array([0.5, 2.0, 3.0, 0.0, 0.0, 0.0], dtype=np.float64))
np.testing.assert_array_almost_equal(result.array, expected.array)
+ def test_translate_se3_along_x_with_rotation(self) -> None:
+ """Tests translating a SE3 state along the body frame forward direction with yaw rotation."""
+ pose: StateSE3 = StateSE3.from_array(np.array([0.0, 0.0, 0.0, 0.0, 0.0, np.pi / 2], dtype=np.float64))
+ distance: float = 2.5
+ result: StateSE3 = translate_se3_along_x(pose, distance)
+ expected: StateSE3 = StateSE3.from_array(np.array([0.0, 2.5, 0.0, 0.0, 0.0, np.pi / 2], dtype=np.float64))
+ np.testing.assert_array_almost_equal(result.array, expected.array)
+
def test_translate_se3_along_y(self) -> None:
+ """Tests translating a SE3 state along the body frame lateral direction."""
pose: StateSE3 = StateSE3.from_array(np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float64))
distance: float = 1.0
result: StateSE3 = translate_se3_along_y(pose, distance)
@@ -178,13 +270,31 @@ def test_translate_se3_along_y(self) -> None:
np.testing.assert_array_almost_equal(result.array, expected.array)
def test_translate_se3_along_y_with_existing_position(self) -> None:
+ """Tests translating a SE3 state along the body frame lateral direction with existing position."""
pose: StateSE3 = StateSE3.from_array(np.array([1.0, 2.0, 3.0, 0.0, 0.0, 0.0], dtype=np.float64))
distance: float = 2.5
result: StateSE3 = translate_se3_along_y(pose, distance)
expected: StateSE3 = StateSE3.from_array(np.array([1.0, 4.5, 3.0, 0.0, 0.0, 0.0], dtype=np.float64))
np.testing.assert_array_almost_equal(result.array, expected.array)
+ def test_translate_se3_along_y_negative(self) -> None:
+ """Tests translating a SE3 state along the body frame lateral direction in the negative direction."""
+ pose: StateSE3 = StateSE3.from_array(np.array([1.0, 2.0, 3.0, 0.0, 0.0, 0.0], dtype=np.float64))
+ distance: float = -1.0
+ result: StateSE3 = translate_se3_along_y(pose, distance)
+ expected: StateSE3 = StateSE3.from_array(np.array([1.0, 1.0, 3.0, 0.0, 0.0, 0.0], dtype=np.float64))
+ np.testing.assert_array_almost_equal(result.array, expected.array)
+
+ def test_translate_se3_along_y_with_rotation(self) -> None:
+ """Tests translating a SE3 state along the body frame lateral direction with roll rotation."""
+ pose: StateSE3 = StateSE3.from_array(np.array([1.0, 2.0, 3.0, np.pi / 2, 0.0, 0.0], dtype=np.float64))
+ distance: float = -1.0
+ result: StateSE3 = translate_se3_along_y(pose, distance)
+ expected: StateSE3 = StateSE3.from_array(np.array([1.0, 2.0, 2.0, np.pi / 2, 0.0, 0.0], dtype=np.float64))
+ np.testing.assert_array_almost_equal(result.array, expected.array)
+
def test_translate_se3_along_z(self) -> None:
+ """Tests translating a SE3 state along the body frame vertical direction."""
pose: StateSE3 = StateSE3.from_array(np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float64))
distance: float = 1.0
result: StateSE3 = translate_se3_along_z(pose, distance)
@@ -192,34 +302,65 @@ def test_translate_se3_along_z(self) -> None:
np.testing.assert_array_almost_equal(result.array, expected.array)
def test_translate_se3_along_z_large_distance(self) -> None:
+ """Tests translating a SE3 state along the body frame vertical direction with a large distance."""
pose: StateSE3 = StateSE3.from_array(np.array([0.0, 0.0, 5.0, 0.0, 0.0, 0.0], dtype=np.float64))
distance: float = 10.0
result: StateSE3 = translate_se3_along_z(pose, distance)
expected: StateSE3 = StateSE3.from_array(np.array([0.0, 0.0, 15.0, 0.0, 0.0, 0.0], dtype=np.float64))
np.testing.assert_array_almost_equal(result.array, expected.array)
- def test_translate_body_frame(self) -> None:
+ def test_translate_se3_along_z_negative(self) -> None:
+ """Tests translating a SE3 state along the body frame vertical direction in the negative direction."""
+ pose: StateSE3 = StateSE3.from_array(np.array([1.0, 2.0, 5.0, 0.0, 0.0, 0.0], dtype=np.float64))
+ distance: float = -2.0
+ result: StateSE3 = translate_se3_along_z(pose, distance)
+ expected: StateSE3 = StateSE3.from_array(np.array([1.0, 2.0, 3.0, 0.0, 0.0, 0.0], dtype=np.float64))
+ np.testing.assert_array_almost_equal(result.array, expected.array)
+
+ def test_translate_se3_along_z_with_rotation(self) -> None:
+ """Tests translating a SE3 state along the body frame vertical direction with pitch rotation."""
+ pose: StateSE3 = StateSE3.from_array(np.array([1.0, 2.0, 3.0, 0.0, np.pi / 2, 0.0], dtype=np.float64))
+ distance: float = 2.0
+ result: StateSE3 = translate_se3_along_z(pose, distance)
+ expected: StateSE3 = StateSE3.from_array(np.array([3.0, 2.0, 3.0, 0.0, np.pi / 2, 0.0], dtype=np.float64))
+ np.testing.assert_array_almost_equal(result.array, expected.array)
+
+ def test_translate_se3_along_body_frame(self) -> None:
+ """Tests translating a SE3 state along the body frame forward direction."""
pose: StateSE3 = StateSE3.from_array(np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float64))
translation: Vector3D = Vector3D.from_array(np.array([1.0, 0.0, 0.0], dtype=np.float64))
result: StateSE3 = translate_se3_along_body_frame(pose, translation)
expected: StateSE3 = StateSE3.from_array(np.array([1.0, 0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float64))
np.testing.assert_array_almost_equal(result.array, expected.array)
- def test_translate_body_frame_multiple_axes(self) -> None:
+ def test_translate_se3_along_body_frame_multiple_axes(self) -> None:
+ """Tests translating a SE3 state along the body frame in multiple axes."""
pose: StateSE3 = StateSE3.from_array(np.array([1.0, 2.0, 3.0, 0.0, 0.0, 0.0], dtype=np.float64))
translation: Vector3D = Vector3D.from_array(np.array([0.5, -1.0, 2.0], dtype=np.float64))
result: StateSE3 = translate_se3_along_body_frame(pose, translation)
expected: StateSE3 = StateSE3.from_array(np.array([1.5, 1.0, 5.0, 0.0, 0.0, 0.0], dtype=np.float64))
np.testing.assert_array_almost_equal(result.array, expected.array)
- def test_translate_body_frame_zero_translation(self) -> None:
+ def test_translate_se3_along_body_frame_zero_translation(self) -> None:
+ """Tests translating a SE3 state along the body frame with zero translation."""
pose: StateSE3 = StateSE3.from_array(np.array([1.0, 2.0, 3.0, 0.0, 0.0, 0.0], dtype=np.float64))
translation: Vector3D = Vector3D.from_array(np.array([0.0, 0.0, 0.0], dtype=np.float64))
result: StateSE3 = translate_se3_along_body_frame(pose, translation)
expected: StateSE3 = StateSE3.from_array(np.array([1.0, 2.0, 3.0, 0.0, 0.0, 0.0], dtype=np.float64))
np.testing.assert_array_almost_equal(result.array, expected.array)
- def test_translate_body_frame_consistency(self) -> None:
+ def test_translate_se3_along_body_frame_with_rotation(self) -> None:
+ """Tests translating a SE3 state along the body frame forward direction with yaw rotation."""
+ # Rotate 90 degrees around z-axis, then translate 1 unit along body x-axis
+ pose: StateSE3 = StateSE3.from_array(np.array([0.0, 0.0, 0.0, 0.0, 0.0, np.pi / 2], dtype=np.float64))
+ translation: Vector3D = Vector3D.from_array(np.array([1.0, 0.0, 0.0], dtype=np.float64))
+ result: StateSE3 = translate_se3_along_body_frame(pose, translation)
+ # Should move in +Y direction in world frame
+ expected: StateSE3 = StateSE3.from_array(np.array([0.0, 1.0, 0.0, 0.0, 0.0, np.pi / 2], dtype=np.float64))
+ np.testing.assert_array_almost_equal(result.array, expected.array, decimal=self.decimal)
+
+ def test_translate_se3_along_body_frame_consistency(self) -> None:
+ """Tests consistency between translate_se3_along_body_frame and axis-specific translation functions."""
for _ in range(self.num_consistency_tests):
# Generate random parameters
@@ -280,6 +421,7 @@ def test_translate_body_frame_consistency(self) -> None:
)
def test_convert_absolute_to_relative_se3_array(self) -> None:
+ """Tests converting absolute SE3 poses to relative SE3 poses."""
reference: StateSE3 = StateSE3.from_array(np.array([1.0, 1.0, 1.0, 0.0, 0.0, 0.0], dtype=np.float64))
absolute_poses: npt.NDArray[np.float64] = np.array(
[
@@ -299,13 +441,23 @@ def test_convert_absolute_to_relative_se3_array(self) -> None:
np.testing.assert_array_almost_equal(result, expected)
def test_convert_absolute_to_relative_se3_array_single_pose(self) -> None:
+ """Tests converting a single absolute SE3 pose to a relative SE3 pose."""
reference: StateSE3 = StateSE3.from_array(np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float64))
absolute_poses: npt.NDArray[np.float64] = np.array([[1.0, 2.0, 3.0, 0.0, 0.0, 0.0]], dtype=np.float64)
result: npt.NDArray[np.float64] = convert_absolute_to_relative_se3_array(reference, absolute_poses)
expected: npt.NDArray[np.float64] = np.array([[1.0, 2.0, 3.0, 0.0, 0.0, 0.0]], dtype=np.float64)
np.testing.assert_array_almost_equal(result, expected)
+ def test_convert_absolute_to_relative_se3_array_with_rotation(self) -> None:
+ """Tests converting absolute SE3 poses to relative SE3 poses with 90 degree yaw rotation."""
+ reference: StateSE3 = StateSE3.from_array(np.array([0.0, 0.0, 0.0, 0.0, 0.0, np.pi / 2], dtype=np.float64))
+ absolute_poses: npt.NDArray[np.float64] = np.array([[1.0, 0.0, 0.0, 0.0, 0.0, 0.0]], dtype=np.float64)
+ result: npt.NDArray[np.float64] = convert_absolute_to_relative_se3_array(reference, absolute_poses)
+ expected: npt.NDArray[np.float64] = np.array([[0.0, -1.0, 0.0, 0.0, 0.0, -np.pi / 2]], dtype=np.float64)
+ np.testing.assert_array_almost_equal(result, expected, decimal=self.decimal)
+
def test_convert_relative_to_absolute_se3_array(self) -> None:
+ """Tests converting relative SE3 poses to absolute SE3 poses."""
reference: StateSE3 = StateSE3.from_array(np.array([1.0, 1.0, 1.0, 0.0, 0.0, 0.0], dtype=np.float64))
relative_poses: npt.NDArray[np.float64] = np.array(
[
@@ -324,7 +476,16 @@ def test_convert_relative_to_absolute_se3_array(self) -> None:
)
np.testing.assert_array_almost_equal(result, expected)
+ def test_convert_relative_to_absolute_se3_array_with_rotation(self) -> None:
+ """Tests converting relative SE3 poses to absolute SE3 poses with 90 degree yaw rotation."""
+ reference: StateSE3 = StateSE3.from_array(np.array([1.0, 0.0, 0.0, 0.0, 0.0, np.pi / 2], dtype=np.float64))
+ relative_poses: npt.NDArray[np.float64] = np.array([[1.0, 0.0, 0.0, 0.0, 0.0, 0.0]], dtype=np.float64)
+ result: npt.NDArray[np.float64] = convert_relative_to_absolute_se3_array(reference, relative_poses)
+ expected: npt.NDArray[np.float64] = np.array([[1.0, 1.0, 0.0, 0.0, 0.0, np.pi / 2]], dtype=np.float64)
+ np.testing.assert_array_almost_equal(result, expected, decimal=self.decimal)
+
def test_convert_absolute_to_relative_points_3d_array(self) -> None:
+ """Tests converting absolute 3D points to relative 3D points."""
reference: StateSE3 = StateSE3.from_array(np.array([1.0, 1.0, 1.0, 0.0, 0.0, 0.0], dtype=np.float64))
absolute_points: npt.NDArray[np.float64] = np.array([[2.0, 2.0, 2.0], [0.0, 1.0, 0.0]], dtype=np.float64)
result: npt.NDArray[np.float64] = convert_absolute_to_relative_points_3d_array(reference, absolute_points)
@@ -332,13 +493,23 @@ def test_convert_absolute_to_relative_points_3d_array(self) -> None:
np.testing.assert_array_almost_equal(result, expected)
def test_convert_absolute_to_relative_points_3d_array_origin_reference(self) -> None:
+ """Tests converting absolute 3D points to relative 3D points with origin reference."""
reference: StateSE3 = StateSE3.from_array(np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float64))
absolute_points: npt.NDArray[np.float64] = np.array([[1.0, 2.0, 3.0], [-1.0, -2.0, -3.0]], dtype=np.float64)
result: npt.NDArray[np.float64] = convert_absolute_to_relative_points_3d_array(reference, absolute_points)
expected: npt.NDArray[np.float64] = np.array([[1.0, 2.0, 3.0], [-1.0, -2.0, -3.0]], dtype=np.float64)
np.testing.assert_array_almost_equal(result, expected)
+ def test_convert_absolute_to_relative_points_3d_array_with_rotation(self) -> None:
+ """Tests converting absolute 3D points to relative 3D points with 90 degree yaw rotation."""
+ reference: StateSE3 = StateSE3.from_array(np.array([0.0, 0.0, 0.0, 0.0, 0.0, np.pi / 2], dtype=np.float64))
+ absolute_points: npt.NDArray[np.float64] = np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 1.0]], dtype=np.float64)
+ result: npt.NDArray[np.float64] = convert_absolute_to_relative_points_3d_array(reference, absolute_points)
+ expected: npt.NDArray[np.float64] = np.array([[0.0, -1.0, 0.0], [1.0, 0.0, 1.0]], dtype=np.float64)
+ np.testing.assert_array_almost_equal(result, expected, decimal=self.decimal)
+
def test_convert_relative_to_absolute_points_3d_array(self) -> None:
+ """Tests converting relative 3D points to absolute 3D points."""
reference: StateSE3 = StateSE3.from_array(np.array([1.0, 1.0, 1.0, 0.0, 0.0, 0.0], dtype=np.float64))
relative_points: npt.NDArray[np.float64] = np.array([[1.0, 1.0, 1.0], [-1.0, 0.0, -1.0]], dtype=np.float64)
result: npt.NDArray[np.float64] = convert_relative_to_absolute_points_3d_array(reference, relative_points)
@@ -346,14 +517,25 @@ def test_convert_relative_to_absolute_points_3d_array(self) -> None:
np.testing.assert_array_almost_equal(result, expected)
def test_convert_relative_to_absolute_points_3d_array_empty(self) -> None:
+ """Tests converting an empty array of relative 3D points to absolute 3D points."""
reference: StateSE3 = StateSE3.from_array(np.array([1.0, 1.0, 1.0, 0.0, 0.0, 0.0], dtype=np.float64))
relative_points: npt.NDArray[np.float64] = np.array([], dtype=np.float64).reshape(0, 3)
result: npt.NDArray[np.float64] = convert_relative_to_absolute_points_3d_array(reference, relative_points)
expected: npt.NDArray[np.float64] = np.array([], dtype=np.float64).reshape(0, 3)
np.testing.assert_array_almost_equal(result, expected)
+ def test_convert_relative_to_absolute_points_3d_array_with_rotation(self) -> None:
+ """Tests converting relative 3D points to absolute 3D points with 90 degree yaw rotation."""
+ reference: StateSE3 = StateSE3.from_array(np.array([1.0, 0.0, 0.0, 0.0, 0.0, np.pi / 2], dtype=np.float64))
+ relative_points: npt.NDArray[np.float64] = np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 1.0]], dtype=np.float64)
+ result: npt.NDArray[np.float64] = convert_relative_to_absolute_points_3d_array(reference, relative_points)
+ expected: npt.NDArray[np.float64] = np.array([[1.0, 1.0, 0.0], [0.0, 0.0, 1.0]], dtype=np.float64)
+ np.testing.assert_array_almost_equal(result, expected, decimal=self.decimal)
+
class TestTransformConsistency(unittest.TestCase):
+ """Tests to ensure consistency between different transformation functions."""
+
def setUp(self):
self.decimal = 6 # Decimal places for np.testing.assert_array_almost_equal
self.num_consistency_tests = 10 # Number of random test cases for consistency checks
@@ -434,6 +616,31 @@ def test_se2_points_consistency(self) -> None:
recovered_absolute_se2[..., StateSE2Index.XY], absolute_points, decimal=self.decimal
)
+ def test_se2_translation_consistency(self) -> None:
+ """Test that SE2 translations are consistent between different methods"""
+ for _ in range(self.num_consistency_tests):
+ # Generate random pose
+ pose = StateSE2.from_array(self._get_random_se2_array(1)[0])
+
+ # Generate random distances
+ dx = np.random.uniform(-10.0, 10.0)
+ dy = np.random.uniform(-10.0, 10.0)
+
+ # Test x-translation consistency
+ result_x_direct = translate_se2_along_x(pose, dx)
+ result_x_body = translate_se2_along_body_frame(pose, Vector2D(dx, 0.0))
+ np.testing.assert_array_almost_equal(result_x_direct.array, result_x_body.array, decimal=self.decimal)
+
+ # Test y-translation consistency
+ result_y_direct = translate_se2_along_y(pose, dy)
+ result_y_body = translate_se2_along_body_frame(pose, Vector2D(0.0, dy))
+ np.testing.assert_array_almost_equal(result_y_direct.array, result_y_body.array, decimal=self.decimal)
+
+ # Test combined translation
+ result_xy_body = translate_se2_along_body_frame(pose, Vector2D(dx, dy))
+ result_xy_sequential = translate_se2_along_y(translate_se2_along_x(pose, dx), dy)
+ np.testing.assert_array_almost_equal(result_xy_body.array, result_xy_sequential.array, decimal=self.decimal)
+
def test_se3_absolute_relative_conversion_consistency(self) -> None:
"""Test that converting absolute->relative->absolute returns original poses"""
for _ in range(self.num_consistency_tests):
@@ -569,28 +776,141 @@ def test_se2_se3_point_conversion_consistency(self) -> None:
relative_3d = convert_absolute_to_relative_points_3d_array(reference_se3, points_3d)
absolute_3d_recovered = convert_relative_to_absolute_points_3d_array(reference_se3, relative_3d)
- # Check that SE2 and SE3 results are consistent (ignoring z-component)
+ # Check that SE2 and SE3 conversions are consistent for the x,y components
+ np.testing.assert_array_almost_equal(relative_2d, relative_3d[:, Point3DIndex.XY], decimal=self.decimal)
np.testing.assert_array_almost_equal(
- relative_2d,
- relative_3d[..., Point3DIndex.XY],
- decimal=self.decimal,
+ absolute_2d_recovered, absolute_3d_recovered[:, Point3DIndex.XY], decimal=self.decimal
)
+
+ # Check that z-components remain zero
np.testing.assert_array_almost_equal(
- absolute_2d_recovered,
- absolute_3d_recovered[..., Point3DIndex.XY],
- decimal=self.decimal,
+ relative_3d[:, Point3DIndex.Z], np.zeros(num_points), decimal=self.decimal
)
- # Z-component should remain zero
np.testing.assert_array_almost_equal(
- relative_3d[..., Point3DIndex.Z],
- np.zeros(num_points),
+ absolute_3d_recovered[:, Point3DIndex.Z], np.zeros(num_points), decimal=self.decimal
+ )
+
+ def test_se2_array_translation_consistency(self) -> None:
+ """Test that SE2 array translation is consistent with single pose translation"""
+ for _ in range(self.num_consistency_tests):
+ # Generate random poses
+ num_poses = np.random.randint(self.min_random_poses, self.max_random_poses)
+ poses_array = self._get_random_se2_array(num_poses)
+
+ # Generate random translation
+ dx = np.random.uniform(-5.0, 5.0)
+ dy = np.random.uniform(-5.0, 5.0)
+ translation = Vector2D(dx, dy)
+
+ # Translate using array function
+ result_array = translate_se2_array_along_body_frame(poses_array, translation)
+
+ # Translate each pose individually
+ result_individual = np.zeros_like(poses_array)
+ for i in range(num_poses):
+ pose = StateSE2.from_array(poses_array[i])
+ translated = translate_se2_along_body_frame(pose, translation)
+ result_individual[i] = translated.array
+
+ np.testing.assert_array_almost_equal(result_array, result_individual, decimal=self.decimal)
+
+ def test_transform_empty_arrays(self) -> None:
+ """Test that transform functions handle empty arrays correctly"""
+ reference_se2 = StateSE2.from_array(np.array([1.0, 2.0, np.pi / 4], dtype=np.float64))
+ reference_se3 = StateSE3.from_array(np.array([1.0, 2.0, 3.0, 0.1, 0.2, 0.3], dtype=np.float64))
+
+ # Test SE2 empty arrays
+ empty_se2_poses = np.array([], dtype=np.float64).reshape(0, len(StateSE2Index))
+ empty_2d_points = np.array([], dtype=np.float64).reshape(0, len(Point2DIndex))
+
+ result_se2_poses = convert_absolute_to_relative_se2_array(reference_se2, empty_se2_poses)
+ result_2d_points = convert_absolute_to_relative_point_2d_array(reference_se2, empty_2d_points)
+
+ self.assertEqual(result_se2_poses.shape, (0, len(StateSE2Index)))
+ self.assertEqual(result_2d_points.shape, (0, len(Point2DIndex)))
+
+ # Test SE3 empty arrays
+ empty_se3_poses = np.array([], dtype=np.float64).reshape(0, len(StateSE3Index))
+ empty_3d_points = np.array([], dtype=np.float64).reshape(0, len(Point3DIndex))
+
+ result_se3_poses = convert_absolute_to_relative_se3_array(reference_se3, empty_se3_poses)
+ result_3d_points = convert_absolute_to_relative_points_3d_array(reference_se3, empty_3d_points)
+
+ self.assertEqual(result_se3_poses.shape, (0, len(StateSE3Index)))
+ self.assertEqual(result_3d_points.shape, (0, len(Point3DIndex)))
+
+ def test_transform_identity_operations(self) -> None:
+ """Test that transforms with identity reference frames work correctly"""
+ # Identity SE2 pose
+ identity_se2 = StateSE2.from_array(np.array([0.0, 0.0, 0.0], dtype=np.float64))
+ identity_se3 = StateSE3.from_array(np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float64))
+
+ for _ in range(self.num_consistency_tests):
+ # Test SE2 identity transforms
+ num_poses = np.random.randint(1, 10)
+ se2_poses = self._get_random_se2_array(num_poses)
+ se2_points = se2_poses[:, StateSE2Index.XY]
+
+ relative_se2_poses = convert_absolute_to_relative_se2_array(identity_se2, se2_poses)
+ relative_se2_points = convert_absolute_to_relative_point_2d_array(identity_se2, se2_points)
+
+ np.testing.assert_array_almost_equal(se2_poses, relative_se2_poses, decimal=self.decimal)
+ np.testing.assert_array_almost_equal(se2_points, relative_se2_points, decimal=self.decimal)
+
+ # Test SE3 identity transforms
+ se3_poses = self._get_random_se3_array(num_poses)
+ se3_points = se3_poses[:, StateSE3Index.XYZ]
+
+ relative_se3_poses = convert_absolute_to_relative_se3_array(identity_se3, se3_poses)
+ relative_se3_points = convert_absolute_to_relative_points_3d_array(identity_se3, se3_points)
+
+ np.testing.assert_array_almost_equal(se3_poses, relative_se3_poses, decimal=self.decimal)
+ np.testing.assert_array_almost_equal(se3_points, relative_se3_points, decimal=self.decimal)
+
+ def test_transform_large_rotations(self) -> None:
+ """Test transforms with large rotation angles beyond [-π, π]"""
+ for _ in range(self.num_consistency_tests):
+ # Create poses with large rotation angles
+ large_yaw_se2 = np.random.uniform(-4 * np.pi, 4 * np.pi)
+ large_euler_se3 = np.random.uniform(-4 * np.pi, 4 * np.pi, 3)
+
+ reference_se2 = StateSE2.from_array(np.array([0.0, 0.0, large_yaw_se2], dtype=np.float64))
+ reference_se3 = StateSE3.from_array(
+ np.array([0.0, 0.0, 0.0, large_euler_se3[0], large_euler_se3[1], large_euler_se3[2]], dtype=np.float64)
+ )
+
+ # Generate test poses/points
+ test_se2_poses = self._get_random_se2_array(5)
+ test_se3_poses = self._get_random_se3_array(5)
+ test_2d_points = test_se2_poses[:, StateSE2Index.XY]
+ test_3d_points = test_se3_poses[:, StateSE3Index.XYZ]
+
+ # Test round-trip conversions should still work
+ relative_se2 = convert_absolute_to_relative_se2_array(reference_se2, test_se2_poses)
+ recovered_se2 = convert_relative_to_absolute_se2_array(reference_se2, relative_se2)
+
+ relative_se3 = convert_absolute_to_relative_se3_array(reference_se3, test_se3_poses)
+ recovered_se3 = convert_relative_to_absolute_se3_array(reference_se3, relative_se3)
+
+ relative_2d_points = convert_absolute_to_relative_point_2d_array(reference_se2, test_2d_points)
+ recovered_2d_points = convert_relative_to_absolute_point_2d_array(reference_se2, relative_2d_points)
+
+ relative_3d_points = convert_absolute_to_relative_points_3d_array(reference_se3, test_3d_points)
+ recovered_3d_points = convert_relative_to_absolute_points_3d_array(reference_se3, relative_3d_points)
+
+ # Check consistency (allowing for angle wrapping)
+ np.testing.assert_array_almost_equal(
+ test_se2_poses[:, StateSE2Index.XY],
+ recovered_se2[:, StateSE2Index.XY],
decimal=self.decimal,
)
np.testing.assert_array_almost_equal(
- absolute_3d_recovered[..., Point3DIndex.Z],
- np.zeros(num_points),
+ test_se3_poses[:, StateSE3Index.XYZ],
+ recovered_se3[:, StateSE3Index.XYZ],
decimal=self.decimal,
)
+ np.testing.assert_array_almost_equal(test_2d_points, recovered_2d_points, decimal=self.decimal)
+ np.testing.assert_array_almost_equal(test_3d_points, recovered_3d_points, decimal=self.decimal)
if __name__ == "__main__":
From ac7fc40c90b2661bd2c72b14fa9cc4c5e6d588cb Mon Sep 17 00:00:00 2001
From: jbwang <1159270049@qq.com>
Date: Thu, 28 Aug 2025 11:02:30 +0800
Subject: [PATCH 029/145] stop tracking jbwang_test script
---
.../dataset_specific/kitti_360/jbwang_test.py | 155 -----------
.../kitti_360/kitti_360_data_converter.py | 9 +-
.../code/hydra/config.yaml | 60 -----
.../2025.08.15.14.31.57/code/hydra/hydra.yaml | 177 ------------
.../code/hydra/overrides.yaml | 1 -
exp/kitti360_test/2025.08.15.14.31.57/log.txt | 10 -
.../code/hydra/config.yaml | 60 -----
.../2025.08.15.14.36.40/code/hydra/hydra.yaml | 177 ------------
.../code/hydra/overrides.yaml | 1 -
exp/kitti360_test/2025.08.15.14.36.40/log.txt | 10 -
.../code/hydra/config.yaml | 60 -----
.../2025.08.15.14.40.29/code/hydra/hydra.yaml | 177 ------------
.../code/hydra/overrides.yaml | 1 -
exp/kitti_test2/2025.08.15.14.40.29/log.txt | 10 -
.../code/hydra/config.yaml | 60 -----
.../2025.08.15.14.43.13/code/hydra/hydra.yaml | 177 ------------
.../code/hydra/overrides.yaml | 1 -
exp/kitti_test2/2025.08.15.14.43.13/log.txt | 12 -
.../code/hydra/config.yaml | 60 -----
.../2025.08.15.14.46.49/code/hydra/hydra.yaml | 177 ------------
.../code/hydra/overrides.yaml | 1 -
exp/kitti_test2/2025.08.15.14.46.49/log.txt | 10 -
.../code/hydra/config.yaml | 60 -----
.../2025.08.15.14.50.55/code/hydra/hydra.yaml | 177 ------------
.../code/hydra/overrides.yaml | 1 -
exp/kitti_test2/2025.08.15.14.50.55/log.txt | 11 -
.../code/hydra/config.yaml | 60 -----
.../2025.08.15.14.52.39/code/hydra/hydra.yaml | 177 ------------
.../code/hydra/overrides.yaml | 1 -
exp/kitti_test2/2025.08.15.14.52.39/log.txt | 11 -
.../code/hydra/config.yaml | 60 -----
.../2025.08.11.15.45.36/code/hydra/hydra.yaml | 177 ------------
.../code/hydra/overrides.yaml | 1 -
exp/my_run/2025.08.11.15.45.36/log.txt | 10 -
jbwang_test.py | 98 -------
jbwang_test2.py | 229 ----------------
notebooks/dataset/jbwang_test.py | 94 -------
notebooks/gym/jbwang_test.py | 180 -------------
notebooks/jbwang_viz_test.py | 252 ------------------
39 files changed, 5 insertions(+), 3000 deletions(-)
delete mode 100644 d123/dataset/dataset_specific/kitti_360/jbwang_test.py
delete mode 100644 exp/kitti360_test/2025.08.15.14.31.57/code/hydra/config.yaml
delete mode 100644 exp/kitti360_test/2025.08.15.14.31.57/code/hydra/hydra.yaml
delete mode 100644 exp/kitti360_test/2025.08.15.14.31.57/code/hydra/overrides.yaml
delete mode 100644 exp/kitti360_test/2025.08.15.14.31.57/log.txt
delete mode 100644 exp/kitti360_test/2025.08.15.14.36.40/code/hydra/config.yaml
delete mode 100644 exp/kitti360_test/2025.08.15.14.36.40/code/hydra/hydra.yaml
delete mode 100644 exp/kitti360_test/2025.08.15.14.36.40/code/hydra/overrides.yaml
delete mode 100644 exp/kitti360_test/2025.08.15.14.36.40/log.txt
delete mode 100644 exp/kitti_test2/2025.08.15.14.40.29/code/hydra/config.yaml
delete mode 100644 exp/kitti_test2/2025.08.15.14.40.29/code/hydra/hydra.yaml
delete mode 100644 exp/kitti_test2/2025.08.15.14.40.29/code/hydra/overrides.yaml
delete mode 100644 exp/kitti_test2/2025.08.15.14.40.29/log.txt
delete mode 100644 exp/kitti_test2/2025.08.15.14.43.13/code/hydra/config.yaml
delete mode 100644 exp/kitti_test2/2025.08.15.14.43.13/code/hydra/hydra.yaml
delete mode 100644 exp/kitti_test2/2025.08.15.14.43.13/code/hydra/overrides.yaml
delete mode 100644 exp/kitti_test2/2025.08.15.14.43.13/log.txt
delete mode 100644 exp/kitti_test2/2025.08.15.14.46.49/code/hydra/config.yaml
delete mode 100644 exp/kitti_test2/2025.08.15.14.46.49/code/hydra/hydra.yaml
delete mode 100644 exp/kitti_test2/2025.08.15.14.46.49/code/hydra/overrides.yaml
delete mode 100644 exp/kitti_test2/2025.08.15.14.46.49/log.txt
delete mode 100644 exp/kitti_test2/2025.08.15.14.50.55/code/hydra/config.yaml
delete mode 100644 exp/kitti_test2/2025.08.15.14.50.55/code/hydra/hydra.yaml
delete mode 100644 exp/kitti_test2/2025.08.15.14.50.55/code/hydra/overrides.yaml
delete mode 100644 exp/kitti_test2/2025.08.15.14.50.55/log.txt
delete mode 100644 exp/kitti_test2/2025.08.15.14.52.39/code/hydra/config.yaml
delete mode 100644 exp/kitti_test2/2025.08.15.14.52.39/code/hydra/hydra.yaml
delete mode 100644 exp/kitti_test2/2025.08.15.14.52.39/code/hydra/overrides.yaml
delete mode 100644 exp/kitti_test2/2025.08.15.14.52.39/log.txt
delete mode 100644 exp/my_run/2025.08.11.15.45.36/code/hydra/config.yaml
delete mode 100644 exp/my_run/2025.08.11.15.45.36/code/hydra/hydra.yaml
delete mode 100644 exp/my_run/2025.08.11.15.45.36/code/hydra/overrides.yaml
delete mode 100644 exp/my_run/2025.08.11.15.45.36/log.txt
delete mode 100644 jbwang_test.py
delete mode 100644 jbwang_test2.py
delete mode 100644 notebooks/dataset/jbwang_test.py
delete mode 100644 notebooks/gym/jbwang_test.py
delete mode 100644 notebooks/jbwang_viz_test.py
diff --git a/d123/dataset/dataset_specific/kitti_360/jbwang_test.py b/d123/dataset/dataset_specific/kitti_360/jbwang_test.py
deleted file mode 100644
index e480783e..00000000
--- a/d123/dataset/dataset_specific/kitti_360/jbwang_test.py
+++ /dev/null
@@ -1,155 +0,0 @@
-import gc
-import json
-import os
-import pickle
-from dataclasses import asdict
-from functools import partial
-from pathlib import Path
-from typing import Any, Dict, Final, List, Optional, Tuple, Union
-
-import numpy as np
-import pyarrow as pa
-import yaml
-from nuplan.database.nuplan_db.nuplan_scenario_queries import get_cameras, get_images_from_lidar_tokens
-from nuplan.database.nuplan_db_orm.ego_pose import EgoPose
-from nuplan.database.nuplan_db_orm.lidar_box import LidarBox
-from nuplan.database.nuplan_db_orm.lidar_pc import LidarPc
-from nuplan.database.nuplan_db_orm.nuplandb import NuPlanDB
-from nuplan.planning.simulation.observation.observation_type import CameraChannel
-from nuplan.planning.utils.multithreading.worker_utils import WorkerPool, worker_map
-from pyquaternion import Quaternion
-from sqlalchemy import func
-
-
-from kitti_360_data_converter import _extract_ego_state_all,get_kitti360_lidar_metadata,_extract_cameras,_extract_detections,_read_timestamps
-
-# a = _extract_ego_state_all("2013_05_28_drive_0000_sync")
-# print(a[0])
-# print(a[1])
-# print(a[10])
-from d123.common.datatypes.time.time_point import TimePoint
-from d123.common.datatypes.sensor.camera import CameraMetadata, CameraType, camera_metadata_dict_to_json
-
-NUPLAN_CAMERA_TYPES = {
- CameraType.CAM_F0: CameraChannel.CAM_F0,
- CameraType.CAM_B0: CameraChannel.CAM_B0,
- CameraType.CAM_L0: CameraChannel.CAM_L0,
- CameraType.CAM_L1: CameraChannel.CAM_L1,
- CameraType.CAM_L2: CameraChannel.CAM_L2,
- CameraType.CAM_R0: CameraChannel.CAM_R0,
- CameraType.CAM_R1: CameraChannel.CAM_R1,
- CameraType.CAM_R2: CameraChannel.CAM_R2,
-}
-
-NUPLAN_DATA_ROOT = Path(os.environ["NUPLAN_DATA_ROOT"])
-NUPLAN_ROLLING_SHUTTER_S: Final[TimePoint] = TimePoint.from_s(1 / 60)
-
-def _extract_camera(
- log_db: NuPlanDB,
- lidar_pc: LidarPc,
- source_log_path: Path,
-) -> Dict[CameraType, Union[str, bytes]]:
-
- camera_dict: Dict[str, Union[str, bytes]] = {}
- sensor_root = NUPLAN_DATA_ROOT / "nuplan-v1.1" / "sensor_blobs"
-
- log_cam_infos = {camera.token: camera for camera in log_db.log.cameras}
- for camera_type, camera_channel in NUPLAN_CAMERA_TYPES.items():
- camera_data: Optional[Union[str, bytes]] = None
- c2e: Optional[List[float]] = None
- image_class = list(get_images_from_lidar_tokens(source_log_path, [lidar_pc.token], [str(camera_channel.value)]))
- # print("image_class",image_class)
- if len(image_class) != 0:
- image = image_class[0]
- filename_jpg = sensor_root / image.filename_jpg
-
- timestamp = image.timestamp + NUPLAN_ROLLING_SHUTTER_S.time_us
- img_ego_pose: EgoPose = (
- log_db.log._session.query(EgoPose).order_by(func.abs(EgoPose.timestamp - timestamp)).first()
- )
- img_e2g = img_ego_pose.trans_matrix
- g2e = lidar_pc.ego_pose.trans_matrix_inv
- img_e2e = g2e @ img_e2g
- cam_info = log_cam_infos[image.camera_token]
- c2img_e = cam_info.trans_matrix
- c2e = img_e2e @ c2img_e
- # print(f"Camera {camera_type} found for lidar {lidar_pc.token} at timestamp {timestamp}")
- print(camera_type,"c2e:", c2e)
- camera_dict[camera_type] = camera_data
-
- return camera_dict
-
-
-def get_cam_info_from_lidar_pc(log,log_file, lidar_pc, rolling_shutter_s=1/60):
-
- retrieved_images = get_images_from_lidar_tokens(
- log_file, [lidar_pc.token], [str(channel.value) for channel in CameraChannel]
- )
-
- # if interp_trans:
- # neighbours = []
- # ego_poses_dict = {}
- # for ego_pose in log.ego_poses:
- # ego_poses_dict[ego_pose.token] = ego_pose
- # if abs(ego_pose.timestamp - lidar_pc.ego_pose.timestamp) / 1e6 < 0.5:
- # neighbours.append(ego_pose)
- # timestamps = [pose.timestamp for pose in neighbours]
- # translations = [pose.translation_np for pose in neighbours]
- # splines = [CubicSpline(timestamps, [translation[i] for translation in translations]) for i in range(2)]
-
- log_cam_infos = {camera.token : camera for camera in log.camera}
- cams = {}
- for img in retrieved_images:
- channel = img.channel
- filename = img.filename_jpg
-
- # if interp_trans:
- # img_ego_pose = ego_poses_dict[img.ego_pose_token]
- # interpolated_translation = np.array([splines[0](timestamp), splines[1](timestamp), img_ego_pose.z])
- # delta = interpolated_translation - lidar_pc.ego_pose.translation_np
- # delta = np.dot(lidar_pc.ego_pose.quaternion.rotation_matrix.T, delta)
- if channel == "CAM_F0":
- timestamp = img.timestamp + (rolling_shutter_s * 1e6)
- img_ego_pose = log.session.query(EgoPose).order_by(func.abs(EgoPose.timestamp - timestamp)).first()
- img_e2g = img_ego_pose.trans_matrix
- # print("img_e2g:", img_e2g)
-
- g2e = lidar_pc.ego_pose.trans_matrix_inv
- # print("g2e:", g2e) #change obviously
- img_e2e = g2e @ img_e2g
- # print("img_e2e:", img_e2e)
- cam_info = log_cam_infos[img.camera_token]
- c2img_e = cam_info.trans_matrix
- # print("c2img_e:", c2img_e)
- c2e = img_e2e @ c2img_e
- # print("channel:", channel, "c2e:", c2e)
-
- cams[channel] = dict(
- data_path = filename,
- timestamp = img.timestamp,
- token=img.token,
- sensor2ego_rotation = Quaternion(matrix=c2e[:3, :3]),
- sensor2ego_translation = c2e[:3, 3],
- cam_intrinsic = cam_info.intrinsic_np,
- distortion = cam_info.distortion_np,
- )
-
-
- if len(cams) != 8:
- return None
- # print(cams)
- return cams
-
-if __name__ == "__main__":
- # Example usage
- # data_converter_config: DataConverterConfig
- # log_path = Path("/nas/datasets/nuplan/nuplan-v1.1/splits/mini/2021.10.11.07.12.18_veh-50_00211_00304.db")
- # log_path = Path("/nas/datasets/nuplan/nuplan-v1.1/splits/mini/2021.09.16.15.12.03_veh-42_01037_01434.db")
- # log_db = NuPlanDB(NUPLAN_DATA_ROOT, str(log_path), None)
-
- # for lidar_pc in log_db.lidar_pc: # Replace with actual token
- # # camera_data = _extract_camera(log_db, lidar_pc, log_path)
- # camera_data = get_cam_info_from_lidar_pc(log_db,log_path, lidar_pc, rolling_shutter_s=1/60)
- # print(_extract_cameras("2013_05_28_drive_0000_sync",0))
- # _extract_detections("2013_05_28_drive_0000_sync", 0)
- print(_read_timestamps("2013_05_28_drive_0000_sync"))
\ No newline at end of file
diff --git a/d123/dataset/dataset_specific/kitti_360/kitti_360_data_converter.py b/d123/dataset/dataset_specific/kitti_360/kitti_360_data_converter.py
index 03e5bd37..2cc40675 100644
--- a/d123/dataset/dataset_specific/kitti_360/kitti_360_data_converter.py
+++ b/d123/dataset/dataset_specific/kitti_360/kitti_360_data_converter.py
@@ -197,7 +197,6 @@ def convert_kitti360_log_to_arrow(
vehicle_parameters = get_kitti360_station_wagon_parameters()
camera_metadata = get_kitti360_camera_metadata()
- #TODO now only velodyne lidar
lidar_metadata = get_kitti360_lidar_metadata()
schema_column_list = [
@@ -406,14 +405,17 @@ def _write_recording_table(
recording_table = recording_table.sort_by([("timestamp", "ascending")])
write_arrow_table(recording_table, log_file_path)
-#TODO Synchronization all other sequences)
def _read_timestamps(log_name: str) -> Optional[List[TimePoint]]:
# unix
ts_files = [
+ PATH_3D_RAW_ROOT / log_name / "velodyne_points" / "timestamps.txt",
PATH_2D_RAW_ROOT / log_name / "image_00" / "timestamps.txt",
PATH_2D_RAW_ROOT / log_name / "image_01" / "timestamps.txt",
- PATH_3D_RAW_ROOT / log_name / "velodyne_points" / "timestamps.txt",
]
+
+ if log_name == "2013_05_28_drive_0002_sync":
+ ts_files = ts_files[1:]
+
for ts_file in ts_files:
if ts_file.exists():
tps: List[TimePoint] = []
@@ -602,7 +604,6 @@ def _extract_detections(
return detections_states, detections_velocity, detections_tokens, detections_types
-#TODO lidar extraction now only velo
def _extract_lidar(log_name: str, idx: int, data_converter_config: DataConverterConfig) -> Dict[LiDARType, Optional[str]]:
#NOTE special case for sequence 2013_05_28_drive_0002_sync which has no lidar data before frame 4391
diff --git a/exp/kitti360_test/2025.08.15.14.31.57/code/hydra/config.yaml b/exp/kitti360_test/2025.08.15.14.31.57/code/hydra/config.yaml
deleted file mode 100644
index a505c4d2..00000000
--- a/exp/kitti360_test/2025.08.15.14.31.57/code/hydra/config.yaml
+++ /dev/null
@@ -1,60 +0,0 @@
-worker:
- _target_: nuplan.planning.utils.multithreading.worker_ray.RayDistributed
- _convert_: all
- master_node_ip: null
- threads_per_node: null
- debug_mode: false
- log_to_driver: true
- logs_subdir: logs
- use_distributed: false
-scene_filter:
- _target_: d123.dataset.scene.scene_filter.SceneFilter
- _convert_: all
- split_types: null
- split_names: null
- log_names: null
- map_names: null
- scene_tokens: null
- timestamp_threshold_s: null
- ego_displacement_minimum_m: null
- duration_s: 9.2
- history_s: 3.0
-scene_builder:
- _target_: d123.dataset.scene.scene_builder.ArrowSceneBuilder
- _convert_: all
- dataset_path: ${d123_data_root}
-distributed_timeout_seconds: 7200
-selected_simulation_metrics: null
-verbose: false
-logger_level: info
-logger_format_string: null
-max_number_of_workers: null
-gpu: true
-seed: 42
-d123_devkit_root: ${oc.env:D123_DEVKIT_ROOT}
-d123_maps_root: ${oc.env:D123_MAPS_ROOT}
-d123_data_root: ${oc.env:D123_DATA_ROOT}
-nuplan_devkit_root: ${oc.env:NUPLAN_DEVKIT_ROOT}
-nuplan_maps_root: ${oc.env:NUPLAN_MAPS_ROOT}
-nuplan_data_root: ${oc.env:NUPLAN_DATA_ROOT}
-experiment_name: kitti360_test
-date_format: '%Y.%m.%d.%H.%M.%S'
-experiment_uid: ${now:${date_format}}
-output_dir: ${oc.env:D123_EXP_ROOT}/${experiment_name}/${experiment_uid}
-force_log_conversion: true
-force_map_conversion: false
-datasets:
- nuplan_dataset:
- _target_: d123.dataset.dataset_specific.kitti_360.kitti_360_data_converter.Kitti360DataConverter
- _convert_: all
- splits:
- - kitti360
- log_path: ${oc.env:KITTI360_DATA_ROOT}
- data_converter_config:
- _target_: d123.dataset.dataset_specific.raw_data_converter.DataConverterConfig
- _convert_: all
- output_path: ${d123_data_root}
- force_log_conversion: ${force_log_conversion}
- force_map_conversion: ${force_map_conversion}
- camera_store_option: path
- lidar_store_option: path
diff --git a/exp/kitti360_test/2025.08.15.14.31.57/code/hydra/hydra.yaml b/exp/kitti360_test/2025.08.15.14.31.57/code/hydra/hydra.yaml
deleted file mode 100644
index 406ccbe7..00000000
--- a/exp/kitti360_test/2025.08.15.14.31.57/code/hydra/hydra.yaml
+++ /dev/null
@@ -1,177 +0,0 @@
-hydra:
- run:
- dir: ${output_dir}
- sweep:
- dir: multirun/${now:%Y-%m-%d}/${now:%H-%M-%S}
- subdir: ${hydra.job.num}
- launcher:
- _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher
- sweeper:
- _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper
- max_batch_size: null
- params: null
- help:
- app_name: ${hydra.job.name}
- header: '${hydra.help.app_name} is powered by Hydra.
-
- '
- footer: 'Powered by Hydra (https://hydra.cc)
-
- Use --hydra-help to view Hydra specific help
-
- '
- template: '${hydra.help.header}
-
- == Configuration groups ==
-
- Compose your configuration from those groups (group=option)
-
-
- $APP_CONFIG_GROUPS
-
-
- == Config ==
-
- Override anything in the config (foo.bar=value)
-
-
- $CONFIG
-
-
- ${hydra.help.footer}
-
- '
- hydra_help:
- template: 'Hydra (${hydra.runtime.version})
-
- See https://hydra.cc for more info.
-
-
- == Flags ==
-
- $FLAGS_HELP
-
-
- == Configuration groups ==
-
- Compose your configuration from those groups (For example, append hydra/job_logging=disabled
- to command line)
-
-
- $HYDRA_CONFIG_GROUPS
-
-
- Use ''--cfg hydra'' to Show the Hydra config.
-
- '
- hydra_help: ???
- hydra_logging:
- version: 1
- formatters:
- colorlog:
- (): colorlog.ColoredFormatter
- format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s'
- handlers:
- console:
- class: logging.StreamHandler
- formatter: colorlog
- stream: ext://sys.stdout
- root:
- level: INFO
- handlers:
- - console
- disable_existing_loggers: false
- job_logging:
- version: 1
- formatters:
- simple:
- format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s'
- colorlog:
- (): colorlog.ColoredFormatter
- format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s]
- - %(message)s'
- log_colors:
- DEBUG: purple
- INFO: green
- WARNING: yellow
- ERROR: red
- CRITICAL: red
- handlers:
- console:
- class: logging.StreamHandler
- formatter: colorlog
- stream: ext://sys.stdout
- file:
- class: logging.FileHandler
- formatter: simple
- filename: ${hydra.job.name}.log
- root:
- level: INFO
- handlers:
- - console
- - file
- disable_existing_loggers: false
- env: {}
- mode: RUN
- searchpath:
- - pkg://d123.script.config
- - pkg://d123.script.config.common
- callbacks: {}
- output_subdir: ${output_dir}/code/hydra
- overrides:
- hydra:
- - hydra.mode=RUN
- task:
- - experiment_name=kitti360_test
- job:
- name: run_dataset_conversion
- chdir: false
- override_dirname: experiment_name=kitti360_test
- id: ???
- num: ???
- config_name: default_dataset_conversion
- env_set: {}
- env_copy: []
- config:
- override_dirname:
- kv_sep: '='
- item_sep: ','
- exclude_keys: []
- runtime:
- version: 1.3.2
- version_base: '1.3'
- cwd: /home/jbwang/d123/d123/script
- config_sources:
- - path: hydra.conf
- schema: pkg
- provider: hydra
- - path: /home/jbwang/d123/d123/script/config/dataset_conversion
- schema: file
- provider: main
- - path: hydra_plugins.hydra_colorlog.conf
- schema: pkg
- provider: hydra-colorlog
- - path: d123.script.config
- schema: pkg
- provider: hydra.searchpath in main
- - path: d123.script.config.common
- schema: pkg
- provider: hydra.searchpath in main
- - path: ''
- schema: structured
- provider: schema
- output_dir: /home/jbwang/d123/exp/kitti360_test/2025.08.15.14.31.57
- choices:
- scene_builder: default_scene_builder
- scene_filter: all_scenes
- worker: ray_distributed
- hydra/env: default
- hydra/callbacks: null
- hydra/job_logging: colorlog
- hydra/hydra_logging: colorlog
- hydra/hydra_help: default
- hydra/help: default
- hydra/sweeper: basic
- hydra/launcher: basic
- hydra/output: default
- verbose: false
diff --git a/exp/kitti360_test/2025.08.15.14.31.57/code/hydra/overrides.yaml b/exp/kitti360_test/2025.08.15.14.31.57/code/hydra/overrides.yaml
deleted file mode 100644
index 6c8e6217..00000000
--- a/exp/kitti360_test/2025.08.15.14.31.57/code/hydra/overrides.yaml
+++ /dev/null
@@ -1 +0,0 @@
-- experiment_name=kitti360_test
diff --git a/exp/kitti360_test/2025.08.15.14.31.57/log.txt b/exp/kitti360_test/2025.08.15.14.31.57/log.txt
deleted file mode 100644
index 984f705a..00000000
--- a/exp/kitti360_test/2025.08.15.14.31.57/log.txt
+++ /dev/null
@@ -1,10 +0,0 @@
-2025-08-15 14:31:57,385 INFO {/home/jbwang/d123/d123/script/builders/worker_pool_builder.py:19} Building WorkerPool...
-2025-08-15 14:32:14,105 INFO {/data/jbwang/submodule/nuplan-devkit-master/nuplan/planning/utils/multithreading/worker_ray.py:78} Starting ray local!
-2025-08-15 14:32:35,603 INFO {/data/jbwang/submodule/nuplan-devkit-master/nuplan/planning/utils/multithreading/worker_pool.py:101} Worker: RayDistributed
-2025-08-15 14:32:35,604 INFO {/data/jbwang/submodule/nuplan-devkit-master/nuplan/planning/utils/multithreading/worker_pool.py:102} Number of nodes: 1
-Number of CPUs per node: 64
-Number of GPUs per node: 8
-Number of threads across all nodes: 64
-2025-08-15 14:32:35,604 INFO {/home/jbwang/d123/d123/script/builders/worker_pool_builder.py:27} Building WorkerPool...DONE!
-2025-08-15 14:32:35,604 INFO {/home/jbwang/d123/d123/script/run_dataset_conversion.py:30} Starting Dataset Caching...
-2025-08-15 14:32:35,605 INFO {/home/jbwang/d123/d123/script/builders/data_converter_builder.py:14} Building RawDataProcessor...
diff --git a/exp/kitti360_test/2025.08.15.14.36.40/code/hydra/config.yaml b/exp/kitti360_test/2025.08.15.14.36.40/code/hydra/config.yaml
deleted file mode 100644
index 0fd6120d..00000000
--- a/exp/kitti360_test/2025.08.15.14.36.40/code/hydra/config.yaml
+++ /dev/null
@@ -1,60 +0,0 @@
-worker:
- _target_: nuplan.planning.utils.multithreading.worker_ray.RayDistributed
- _convert_: all
- master_node_ip: null
- threads_per_node: null
- debug_mode: false
- log_to_driver: true
- logs_subdir: logs
- use_distributed: false
-scene_filter:
- _target_: d123.dataset.scene.scene_filter.SceneFilter
- _convert_: all
- split_types: null
- split_names: null
- log_names: null
- map_names: null
- scene_tokens: null
- timestamp_threshold_s: null
- ego_displacement_minimum_m: null
- duration_s: 9.2
- history_s: 3.0
-scene_builder:
- _target_: d123.dataset.scene.scene_builder.ArrowSceneBuilder
- _convert_: all
- dataset_path: ${d123_data_root}
-distributed_timeout_seconds: 7200
-selected_simulation_metrics: null
-verbose: false
-logger_level: info
-logger_format_string: null
-max_number_of_workers: null
-gpu: true
-seed: 42
-d123_devkit_root: ${oc.env:D123_DEVKIT_ROOT}
-d123_maps_root: ${oc.env:D123_MAPS_ROOT}
-d123_data_root: ${oc.env:D123_DATA_ROOT}
-nuplan_devkit_root: ${oc.env:NUPLAN_DEVKIT_ROOT}
-nuplan_maps_root: ${oc.env:NUPLAN_MAPS_ROOT}
-nuplan_data_root: ${oc.env:NUPLAN_DATA_ROOT}
-experiment_name: kitti360_test
-date_format: '%Y.%m.%d.%H.%M.%S'
-experiment_uid: ${now:${date_format}}
-output_dir: ${oc.env:D123_EXP_ROOT}/${experiment_name}/${experiment_uid}
-force_log_conversion: true
-force_map_conversion: false
-datasets:
- kitti360_dataset:
- _target_: d123.dataset.dataset_specific.kitti_360.kitti_360_data_converter.Kitti360DataConverter
- _convert_: all
- splits:
- - kitti360
- log_path: ${oc.env:KITTI360_DATA_ROOT}
- data_converter_config:
- _target_: d123.dataset.dataset_specific.raw_data_converter.DataConverterConfig
- _convert_: all
- output_path: ${d123_data_root}
- force_log_conversion: ${force_log_conversion}
- force_map_conversion: ${force_map_conversion}
- camera_store_option: path
- lidar_store_option: path
diff --git a/exp/kitti360_test/2025.08.15.14.36.40/code/hydra/hydra.yaml b/exp/kitti360_test/2025.08.15.14.36.40/code/hydra/hydra.yaml
deleted file mode 100644
index 4eee2c65..00000000
--- a/exp/kitti360_test/2025.08.15.14.36.40/code/hydra/hydra.yaml
+++ /dev/null
@@ -1,177 +0,0 @@
-hydra:
- run:
- dir: ${output_dir}
- sweep:
- dir: multirun/${now:%Y-%m-%d}/${now:%H-%M-%S}
- subdir: ${hydra.job.num}
- launcher:
- _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher
- sweeper:
- _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper
- max_batch_size: null
- params: null
- help:
- app_name: ${hydra.job.name}
- header: '${hydra.help.app_name} is powered by Hydra.
-
- '
- footer: 'Powered by Hydra (https://hydra.cc)
-
- Use --hydra-help to view Hydra specific help
-
- '
- template: '${hydra.help.header}
-
- == Configuration groups ==
-
- Compose your configuration from those groups (group=option)
-
-
- $APP_CONFIG_GROUPS
-
-
- == Config ==
-
- Override anything in the config (foo.bar=value)
-
-
- $CONFIG
-
-
- ${hydra.help.footer}
-
- '
- hydra_help:
- template: 'Hydra (${hydra.runtime.version})
-
- See https://hydra.cc for more info.
-
-
- == Flags ==
-
- $FLAGS_HELP
-
-
- == Configuration groups ==
-
- Compose your configuration from those groups (For example, append hydra/job_logging=disabled
- to command line)
-
-
- $HYDRA_CONFIG_GROUPS
-
-
- Use ''--cfg hydra'' to Show the Hydra config.
-
- '
- hydra_help: ???
- hydra_logging:
- version: 1
- formatters:
- colorlog:
- (): colorlog.ColoredFormatter
- format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s'
- handlers:
- console:
- class: logging.StreamHandler
- formatter: colorlog
- stream: ext://sys.stdout
- root:
- level: INFO
- handlers:
- - console
- disable_existing_loggers: false
- job_logging:
- version: 1
- formatters:
- simple:
- format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s'
- colorlog:
- (): colorlog.ColoredFormatter
- format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s]
- - %(message)s'
- log_colors:
- DEBUG: purple
- INFO: green
- WARNING: yellow
- ERROR: red
- CRITICAL: red
- handlers:
- console:
- class: logging.StreamHandler
- formatter: colorlog
- stream: ext://sys.stdout
- file:
- class: logging.FileHandler
- formatter: simple
- filename: ${hydra.job.name}.log
- root:
- level: INFO
- handlers:
- - console
- - file
- disable_existing_loggers: false
- env: {}
- mode: RUN
- searchpath:
- - pkg://d123.script.config
- - pkg://d123.script.config.common
- callbacks: {}
- output_subdir: ${output_dir}/code/hydra
- overrides:
- hydra:
- - hydra.mode=RUN
- task:
- - experiment_name=kitti360_test
- job:
- name: run_dataset_conversion
- chdir: false
- override_dirname: experiment_name=kitti360_test
- id: ???
- num: ???
- config_name: default_dataset_conversion
- env_set: {}
- env_copy: []
- config:
- override_dirname:
- kv_sep: '='
- item_sep: ','
- exclude_keys: []
- runtime:
- version: 1.3.2
- version_base: '1.3'
- cwd: /home/jbwang/d123/d123/script
- config_sources:
- - path: hydra.conf
- schema: pkg
- provider: hydra
- - path: /home/jbwang/d123/d123/script/config/dataset_conversion
- schema: file
- provider: main
- - path: hydra_plugins.hydra_colorlog.conf
- schema: pkg
- provider: hydra-colorlog
- - path: d123.script.config
- schema: pkg
- provider: hydra.searchpath in main
- - path: d123.script.config.common
- schema: pkg
- provider: hydra.searchpath in main
- - path: ''
- schema: structured
- provider: schema
- output_dir: /home/jbwang/d123/exp/kitti360_test/2025.08.15.14.36.40
- choices:
- scene_builder: default_scene_builder
- scene_filter: all_scenes
- worker: ray_distributed
- hydra/env: default
- hydra/callbacks: null
- hydra/job_logging: colorlog
- hydra/hydra_logging: colorlog
- hydra/hydra_help: default
- hydra/help: default
- hydra/sweeper: basic
- hydra/launcher: basic
- hydra/output: default
- verbose: false
diff --git a/exp/kitti360_test/2025.08.15.14.36.40/code/hydra/overrides.yaml b/exp/kitti360_test/2025.08.15.14.36.40/code/hydra/overrides.yaml
deleted file mode 100644
index 6c8e6217..00000000
--- a/exp/kitti360_test/2025.08.15.14.36.40/code/hydra/overrides.yaml
+++ /dev/null
@@ -1 +0,0 @@
-- experiment_name=kitti360_test
diff --git a/exp/kitti360_test/2025.08.15.14.36.40/log.txt b/exp/kitti360_test/2025.08.15.14.36.40/log.txt
deleted file mode 100644
index 5f939dac..00000000
--- a/exp/kitti360_test/2025.08.15.14.36.40/log.txt
+++ /dev/null
@@ -1,10 +0,0 @@
-2025-08-15 14:36:40,989 INFO {/home/jbwang/d123/d123/script/builders/worker_pool_builder.py:19} Building WorkerPool...
-2025-08-15 14:36:56,167 INFO {/data/jbwang/submodule/nuplan-devkit-master/nuplan/planning/utils/multithreading/worker_ray.py:78} Starting ray local!
-2025-08-15 14:37:18,685 INFO {/data/jbwang/submodule/nuplan-devkit-master/nuplan/planning/utils/multithreading/worker_pool.py:101} Worker: RayDistributed
-2025-08-15 14:37:18,686 INFO {/data/jbwang/submodule/nuplan-devkit-master/nuplan/planning/utils/multithreading/worker_pool.py:102} Number of nodes: 1
-Number of CPUs per node: 64
-Number of GPUs per node: 8
-Number of threads across all nodes: 64
-2025-08-15 14:37:18,686 INFO {/home/jbwang/d123/d123/script/builders/worker_pool_builder.py:27} Building WorkerPool...DONE!
-2025-08-15 14:37:18,686 INFO {/home/jbwang/d123/d123/script/run_dataset_conversion.py:30} Starting Dataset Caching...
-2025-08-15 14:37:18,687 INFO {/home/jbwang/d123/d123/script/builders/data_converter_builder.py:14} Building RawDataProcessor...
diff --git a/exp/kitti_test2/2025.08.15.14.40.29/code/hydra/config.yaml b/exp/kitti_test2/2025.08.15.14.40.29/code/hydra/config.yaml
deleted file mode 100644
index 5ce47ba9..00000000
--- a/exp/kitti_test2/2025.08.15.14.40.29/code/hydra/config.yaml
+++ /dev/null
@@ -1,60 +0,0 @@
-worker:
- _target_: nuplan.planning.utils.multithreading.worker_ray.RayDistributed
- _convert_: all
- master_node_ip: null
- threads_per_node: null
- debug_mode: false
- log_to_driver: true
- logs_subdir: logs
- use_distributed: false
-scene_filter:
- _target_: d123.dataset.scene.scene_filter.SceneFilter
- _convert_: all
- split_types: null
- split_names: null
- log_names: null
- map_names: null
- scene_tokens: null
- timestamp_threshold_s: null
- ego_displacement_minimum_m: null
- duration_s: 9.2
- history_s: 3.0
-scene_builder:
- _target_: d123.dataset.scene.scene_builder.ArrowSceneBuilder
- _convert_: all
- dataset_path: ${d123_data_root}
-distributed_timeout_seconds: 7200
-selected_simulation_metrics: null
-verbose: false
-logger_level: info
-logger_format_string: null
-max_number_of_workers: null
-gpu: true
-seed: 42
-d123_devkit_root: ${oc.env:D123_DEVKIT_ROOT}
-d123_maps_root: ${oc.env:D123_MAPS_ROOT}
-d123_data_root: ${oc.env:D123_DATA_ROOT}
-nuplan_devkit_root: ${oc.env:NUPLAN_DEVKIT_ROOT}
-nuplan_maps_root: ${oc.env:NUPLAN_MAPS_ROOT}
-nuplan_data_root: ${oc.env:NUPLAN_DATA_ROOT}
-experiment_name: kitti_test2
-date_format: '%Y.%m.%d.%H.%M.%S'
-experiment_uid: ${now:${date_format}}
-output_dir: ${oc.env:D123_EXP_ROOT}/${experiment_name}/${experiment_uid}
-force_log_conversion: true
-force_map_conversion: false
-datasets:
- kitti360_dataset:
- _target_: d123.dataset.dataset_specific.kitti_360.kitti_360_data_converter.Kitti360DataConverter
- _convert_: all
- splits:
- - kitti360
- log_path: ${oc.env:KITTI360_DATA_ROOT}
- data_converter_config:
- _target_: d123.dataset.dataset_specific.raw_data_converter.DataConverterConfig
- _convert_: all
- output_path: ${d123_data_root}
- force_log_conversion: ${force_log_conversion}
- force_map_conversion: ${force_map_conversion}
- camera_store_option: path
- lidar_store_option: path
diff --git a/exp/kitti_test2/2025.08.15.14.40.29/code/hydra/hydra.yaml b/exp/kitti_test2/2025.08.15.14.40.29/code/hydra/hydra.yaml
deleted file mode 100644
index 2d1c615a..00000000
--- a/exp/kitti_test2/2025.08.15.14.40.29/code/hydra/hydra.yaml
+++ /dev/null
@@ -1,177 +0,0 @@
-hydra:
- run:
- dir: ${output_dir}
- sweep:
- dir: multirun/${now:%Y-%m-%d}/${now:%H-%M-%S}
- subdir: ${hydra.job.num}
- launcher:
- _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher
- sweeper:
- _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper
- max_batch_size: null
- params: null
- help:
- app_name: ${hydra.job.name}
- header: '${hydra.help.app_name} is powered by Hydra.
-
- '
- footer: 'Powered by Hydra (https://hydra.cc)
-
- Use --hydra-help to view Hydra specific help
-
- '
- template: '${hydra.help.header}
-
- == Configuration groups ==
-
- Compose your configuration from those groups (group=option)
-
-
- $APP_CONFIG_GROUPS
-
-
- == Config ==
-
- Override anything in the config (foo.bar=value)
-
-
- $CONFIG
-
-
- ${hydra.help.footer}
-
- '
- hydra_help:
- template: 'Hydra (${hydra.runtime.version})
-
- See https://hydra.cc for more info.
-
-
- == Flags ==
-
- $FLAGS_HELP
-
-
- == Configuration groups ==
-
- Compose your configuration from those groups (For example, append hydra/job_logging=disabled
- to command line)
-
-
- $HYDRA_CONFIG_GROUPS
-
-
- Use ''--cfg hydra'' to Show the Hydra config.
-
- '
- hydra_help: ???
- hydra_logging:
- version: 1
- formatters:
- colorlog:
- (): colorlog.ColoredFormatter
- format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s'
- handlers:
- console:
- class: logging.StreamHandler
- formatter: colorlog
- stream: ext://sys.stdout
- root:
- level: INFO
- handlers:
- - console
- disable_existing_loggers: false
- job_logging:
- version: 1
- formatters:
- simple:
- format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s'
- colorlog:
- (): colorlog.ColoredFormatter
- format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s]
- - %(message)s'
- log_colors:
- DEBUG: purple
- INFO: green
- WARNING: yellow
- ERROR: red
- CRITICAL: red
- handlers:
- console:
- class: logging.StreamHandler
- formatter: colorlog
- stream: ext://sys.stdout
- file:
- class: logging.FileHandler
- formatter: simple
- filename: ${hydra.job.name}.log
- root:
- level: INFO
- handlers:
- - console
- - file
- disable_existing_loggers: false
- env: {}
- mode: RUN
- searchpath:
- - pkg://d123.script.config
- - pkg://d123.script.config.common
- callbacks: {}
- output_subdir: ${output_dir}/code/hydra
- overrides:
- hydra:
- - hydra.mode=RUN
- task:
- - experiment_name=kitti_test2
- job:
- name: run_dataset_conversion
- chdir: false
- override_dirname: experiment_name=kitti_test2
- id: ???
- num: ???
- config_name: default_dataset_conversion
- env_set: {}
- env_copy: []
- config:
- override_dirname:
- kv_sep: '='
- item_sep: ','
- exclude_keys: []
- runtime:
- version: 1.3.2
- version_base: '1.3'
- cwd: /home/jbwang/d123
- config_sources:
- - path: hydra.conf
- schema: pkg
- provider: hydra
- - path: /home/jbwang/d123/d123/script/config/dataset_conversion
- schema: file
- provider: main
- - path: hydra_plugins.hydra_colorlog.conf
- schema: pkg
- provider: hydra-colorlog
- - path: d123.script.config
- schema: pkg
- provider: hydra.searchpath in main
- - path: d123.script.config.common
- schema: pkg
- provider: hydra.searchpath in main
- - path: ''
- schema: structured
- provider: schema
- output_dir: /home/jbwang/d123/exp/kitti_test2/2025.08.15.14.40.29
- choices:
- scene_builder: default_scene_builder
- scene_filter: all_scenes
- worker: ray_distributed
- hydra/env: default
- hydra/callbacks: null
- hydra/job_logging: colorlog
- hydra/hydra_logging: colorlog
- hydra/hydra_help: default
- hydra/help: default
- hydra/sweeper: basic
- hydra/launcher: basic
- hydra/output: default
- verbose: false
diff --git a/exp/kitti_test2/2025.08.15.14.40.29/code/hydra/overrides.yaml b/exp/kitti_test2/2025.08.15.14.40.29/code/hydra/overrides.yaml
deleted file mode 100644
index 676c1042..00000000
--- a/exp/kitti_test2/2025.08.15.14.40.29/code/hydra/overrides.yaml
+++ /dev/null
@@ -1 +0,0 @@
-- experiment_name=kitti_test2
diff --git a/exp/kitti_test2/2025.08.15.14.40.29/log.txt b/exp/kitti_test2/2025.08.15.14.40.29/log.txt
deleted file mode 100644
index 8437d38e..00000000
--- a/exp/kitti_test2/2025.08.15.14.40.29/log.txt
+++ /dev/null
@@ -1,10 +0,0 @@
-2025-08-15 14:40:29,427 INFO {/home/jbwang/d123/d123/script/builders/worker_pool_builder.py:19} Building WorkerPool...
-2025-08-15 14:40:42,538 INFO {/data/jbwang/submodule/nuplan-devkit-master/nuplan/planning/utils/multithreading/worker_ray.py:78} Starting ray local!
-2025-08-15 14:41:00,324 INFO {/data/jbwang/submodule/nuplan-devkit-master/nuplan/planning/utils/multithreading/worker_pool.py:101} Worker: RayDistributed
-2025-08-15 14:41:00,325 INFO {/data/jbwang/submodule/nuplan-devkit-master/nuplan/planning/utils/multithreading/worker_pool.py:102} Number of nodes: 1
-Number of CPUs per node: 64
-Number of GPUs per node: 8
-Number of threads across all nodes: 64
-2025-08-15 14:41:00,325 INFO {/home/jbwang/d123/d123/script/builders/worker_pool_builder.py:27} Building WorkerPool...DONE!
-2025-08-15 14:41:00,325 INFO {/home/jbwang/d123/d123/script/run_dataset_conversion.py:30} Starting Dataset Caching...
-2025-08-15 14:41:00,326 INFO {/home/jbwang/d123/d123/script/builders/data_converter_builder.py:14} Building RawDataProcessor...
diff --git a/exp/kitti_test2/2025.08.15.14.43.13/code/hydra/config.yaml b/exp/kitti_test2/2025.08.15.14.43.13/code/hydra/config.yaml
deleted file mode 100644
index de70bfa3..00000000
--- a/exp/kitti_test2/2025.08.15.14.43.13/code/hydra/config.yaml
+++ /dev/null
@@ -1,60 +0,0 @@
-worker:
- _target_: nuplan.planning.utils.multithreading.worker_ray.RayDistributed
- _convert_: all
- master_node_ip: null
- threads_per_node: null
- debug_mode: false
- log_to_driver: true
- logs_subdir: logs
- use_distributed: false
-scene_filter:
- _target_: d123.dataset.scene.scene_filter.SceneFilter
- _convert_: all
- split_types: null
- split_names: null
- log_names: null
- map_names: null
- scene_tokens: null
- timestamp_threshold_s: null
- ego_displacement_minimum_m: null
- duration_s: 9.2
- history_s: 3.0
-scene_builder:
- _target_: d123.dataset.scene.scene_builder.ArrowSceneBuilder
- _convert_: all
- dataset_path: ${d123_data_root}
-distributed_timeout_seconds: 7200
-selected_simulation_metrics: null
-verbose: false
-logger_level: info
-logger_format_string: null
-max_number_of_workers: null
-gpu: true
-seed: 42
-d123_devkit_root: ${oc.env:D123_DEVKIT_ROOT}
-d123_maps_root: ${oc.env:D123_MAPS_ROOT}
-d123_data_root: ${oc.env:D123_DATA_ROOT}
-nuplan_devkit_root: ${oc.env:NUPLAN_DEVKIT_ROOT}
-nuplan_maps_root: ${oc.env:NUPLAN_MAPS_ROOT}
-nuplan_data_root: ${oc.env:NUPLAN_DATA_ROOT}
-experiment_name: kitti_test2
-date_format: '%Y.%m.%d.%H.%M.%S'
-experiment_uid: ${now:${date_format}}
-output_dir: ${oc.env:D123_EXP_ROOT}/${experiment_name}/${experiment_uid}
-force_log_conversion: true
-force_map_conversion: false
-datasets:
- nuplan_private_dataset:
- _target_: d123.dataset.dataset_specific.nuplan.nuplan_data_converter.NuplanDataConverter
- _convert_: all
- splits:
- - nuplan_private_test
- log_path: ${oc.env:NUPLAN_DATA_ROOT}/nuplan-v1.1/splits
- data_converter_config:
- _target_: d123.dataset.dataset_specific.raw_data_converter.DataConverterConfig
- _convert_: all
- output_path: ${d123_data_root}
- force_log_conversion: ${force_log_conversion}
- force_map_conversion: ${force_map_conversion}
- camera_store_option: path
- lidar_store_option: path
diff --git a/exp/kitti_test2/2025.08.15.14.43.13/code/hydra/hydra.yaml b/exp/kitti_test2/2025.08.15.14.43.13/code/hydra/hydra.yaml
deleted file mode 100644
index cca44d29..00000000
--- a/exp/kitti_test2/2025.08.15.14.43.13/code/hydra/hydra.yaml
+++ /dev/null
@@ -1,177 +0,0 @@
-hydra:
- run:
- dir: ${output_dir}
- sweep:
- dir: multirun/${now:%Y-%m-%d}/${now:%H-%M-%S}
- subdir: ${hydra.job.num}
- launcher:
- _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher
- sweeper:
- _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper
- max_batch_size: null
- params: null
- help:
- app_name: ${hydra.job.name}
- header: '${hydra.help.app_name} is powered by Hydra.
-
- '
- footer: 'Powered by Hydra (https://hydra.cc)
-
- Use --hydra-help to view Hydra specific help
-
- '
- template: '${hydra.help.header}
-
- == Configuration groups ==
-
- Compose your configuration from those groups (group=option)
-
-
- $APP_CONFIG_GROUPS
-
-
- == Config ==
-
- Override anything in the config (foo.bar=value)
-
-
- $CONFIG
-
-
- ${hydra.help.footer}
-
- '
- hydra_help:
- template: 'Hydra (${hydra.runtime.version})
-
- See https://hydra.cc for more info.
-
-
- == Flags ==
-
- $FLAGS_HELP
-
-
- == Configuration groups ==
-
- Compose your configuration from those groups (For example, append hydra/job_logging=disabled
- to command line)
-
-
- $HYDRA_CONFIG_GROUPS
-
-
- Use ''--cfg hydra'' to Show the Hydra config.
-
- '
- hydra_help: ???
- hydra_logging:
- version: 1
- formatters:
- colorlog:
- (): colorlog.ColoredFormatter
- format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s'
- handlers:
- console:
- class: logging.StreamHandler
- formatter: colorlog
- stream: ext://sys.stdout
- root:
- level: INFO
- handlers:
- - console
- disable_existing_loggers: false
- job_logging:
- version: 1
- formatters:
- simple:
- format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s'
- colorlog:
- (): colorlog.ColoredFormatter
- format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s]
- - %(message)s'
- log_colors:
- DEBUG: purple
- INFO: green
- WARNING: yellow
- ERROR: red
- CRITICAL: red
- handlers:
- console:
- class: logging.StreamHandler
- formatter: colorlog
- stream: ext://sys.stdout
- file:
- class: logging.FileHandler
- formatter: simple
- filename: ${hydra.job.name}.log
- root:
- level: INFO
- handlers:
- - console
- - file
- disable_existing_loggers: false
- env: {}
- mode: RUN
- searchpath:
- - pkg://d123.script.config
- - pkg://d123.script.config.common
- callbacks: {}
- output_subdir: ${output_dir}/code/hydra
- overrides:
- hydra:
- - hydra.mode=RUN
- task:
- - experiment_name=kitti_test2
- job:
- name: run_dataset_conversion
- chdir: false
- override_dirname: experiment_name=kitti_test2
- id: ???
- num: ???
- config_name: default_dataset_conversion
- env_set: {}
- env_copy: []
- config:
- override_dirname:
- kv_sep: '='
- item_sep: ','
- exclude_keys: []
- runtime:
- version: 1.3.2
- version_base: '1.3'
- cwd: /home/jbwang/d123
- config_sources:
- - path: hydra.conf
- schema: pkg
- provider: hydra
- - path: /home/jbwang/d123/d123/script/config/dataset_conversion
- schema: file
- provider: main
- - path: hydra_plugins.hydra_colorlog.conf
- schema: pkg
- provider: hydra-colorlog
- - path: d123.script.config
- schema: pkg
- provider: hydra.searchpath in main
- - path: d123.script.config.common
- schema: pkg
- provider: hydra.searchpath in main
- - path: ''
- schema: structured
- provider: schema
- output_dir: /home/jbwang/d123/exp/kitti_test2/2025.08.15.14.43.13
- choices:
- scene_builder: default_scene_builder
- scene_filter: all_scenes
- worker: ray_distributed
- hydra/env: default
- hydra/callbacks: null
- hydra/job_logging: colorlog
- hydra/hydra_logging: colorlog
- hydra/hydra_help: default
- hydra/help: default
- hydra/sweeper: basic
- hydra/launcher: basic
- hydra/output: default
- verbose: false
diff --git a/exp/kitti_test2/2025.08.15.14.43.13/code/hydra/overrides.yaml b/exp/kitti_test2/2025.08.15.14.43.13/code/hydra/overrides.yaml
deleted file mode 100644
index 676c1042..00000000
--- a/exp/kitti_test2/2025.08.15.14.43.13/code/hydra/overrides.yaml
+++ /dev/null
@@ -1 +0,0 @@
-- experiment_name=kitti_test2
diff --git a/exp/kitti_test2/2025.08.15.14.43.13/log.txt b/exp/kitti_test2/2025.08.15.14.43.13/log.txt
deleted file mode 100644
index fec50568..00000000
--- a/exp/kitti_test2/2025.08.15.14.43.13/log.txt
+++ /dev/null
@@ -1,12 +0,0 @@
-2025-08-15 14:43:13,965 INFO {/home/jbwang/d123/d123/script/builders/worker_pool_builder.py:19} Building WorkerPool...
-2025-08-15 14:43:24,401 INFO {/data/jbwang/submodule/nuplan-devkit-master/nuplan/planning/utils/multithreading/worker_ray.py:78} Starting ray local!
-2025-08-15 14:43:39,643 INFO {/data/jbwang/submodule/nuplan-devkit-master/nuplan/planning/utils/multithreading/worker_pool.py:101} Worker: RayDistributed
-2025-08-15 14:43:39,644 INFO {/data/jbwang/submodule/nuplan-devkit-master/nuplan/planning/utils/multithreading/worker_pool.py:102} Number of nodes: 1
-Number of CPUs per node: 64
-Number of GPUs per node: 8
-Number of threads across all nodes: 64
-2025-08-15 14:43:39,644 INFO {/home/jbwang/d123/d123/script/builders/worker_pool_builder.py:27} Building WorkerPool...DONE!
-2025-08-15 14:43:39,644 INFO {/home/jbwang/d123/d123/script/run_dataset_conversion.py:30} Starting Dataset Caching...
-2025-08-15 14:43:39,645 INFO {/home/jbwang/d123/d123/script/builders/data_converter_builder.py:14} Building RawDataProcessor...
-2025-08-15 14:43:44,316 INFO {/home/jbwang/d123/d123/script/builders/data_converter_builder.py:21} Building RawDataProcessor...DONE!
-2025-08-15 14:43:44,316 INFO {/home/jbwang/d123/d123/script/run_dataset_conversion.py:34} Processing dataset: NuplanDataConverter
diff --git a/exp/kitti_test2/2025.08.15.14.46.49/code/hydra/config.yaml b/exp/kitti_test2/2025.08.15.14.46.49/code/hydra/config.yaml
deleted file mode 100644
index 5ce47ba9..00000000
--- a/exp/kitti_test2/2025.08.15.14.46.49/code/hydra/config.yaml
+++ /dev/null
@@ -1,60 +0,0 @@
-worker:
- _target_: nuplan.planning.utils.multithreading.worker_ray.RayDistributed
- _convert_: all
- master_node_ip: null
- threads_per_node: null
- debug_mode: false
- log_to_driver: true
- logs_subdir: logs
- use_distributed: false
-scene_filter:
- _target_: d123.dataset.scene.scene_filter.SceneFilter
- _convert_: all
- split_types: null
- split_names: null
- log_names: null
- map_names: null
- scene_tokens: null
- timestamp_threshold_s: null
- ego_displacement_minimum_m: null
- duration_s: 9.2
- history_s: 3.0
-scene_builder:
- _target_: d123.dataset.scene.scene_builder.ArrowSceneBuilder
- _convert_: all
- dataset_path: ${d123_data_root}
-distributed_timeout_seconds: 7200
-selected_simulation_metrics: null
-verbose: false
-logger_level: info
-logger_format_string: null
-max_number_of_workers: null
-gpu: true
-seed: 42
-d123_devkit_root: ${oc.env:D123_DEVKIT_ROOT}
-d123_maps_root: ${oc.env:D123_MAPS_ROOT}
-d123_data_root: ${oc.env:D123_DATA_ROOT}
-nuplan_devkit_root: ${oc.env:NUPLAN_DEVKIT_ROOT}
-nuplan_maps_root: ${oc.env:NUPLAN_MAPS_ROOT}
-nuplan_data_root: ${oc.env:NUPLAN_DATA_ROOT}
-experiment_name: kitti_test2
-date_format: '%Y.%m.%d.%H.%M.%S'
-experiment_uid: ${now:${date_format}}
-output_dir: ${oc.env:D123_EXP_ROOT}/${experiment_name}/${experiment_uid}
-force_log_conversion: true
-force_map_conversion: false
-datasets:
- kitti360_dataset:
- _target_: d123.dataset.dataset_specific.kitti_360.kitti_360_data_converter.Kitti360DataConverter
- _convert_: all
- splits:
- - kitti360
- log_path: ${oc.env:KITTI360_DATA_ROOT}
- data_converter_config:
- _target_: d123.dataset.dataset_specific.raw_data_converter.DataConverterConfig
- _convert_: all
- output_path: ${d123_data_root}
- force_log_conversion: ${force_log_conversion}
- force_map_conversion: ${force_map_conversion}
- camera_store_option: path
- lidar_store_option: path
diff --git a/exp/kitti_test2/2025.08.15.14.46.49/code/hydra/hydra.yaml b/exp/kitti_test2/2025.08.15.14.46.49/code/hydra/hydra.yaml
deleted file mode 100644
index bd9698a2..00000000
--- a/exp/kitti_test2/2025.08.15.14.46.49/code/hydra/hydra.yaml
+++ /dev/null
@@ -1,177 +0,0 @@
-hydra:
- run:
- dir: ${output_dir}
- sweep:
- dir: multirun/${now:%Y-%m-%d}/${now:%H-%M-%S}
- subdir: ${hydra.job.num}
- launcher:
- _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher
- sweeper:
- _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper
- max_batch_size: null
- params: null
- help:
- app_name: ${hydra.job.name}
- header: '${hydra.help.app_name} is powered by Hydra.
-
- '
- footer: 'Powered by Hydra (https://hydra.cc)
-
- Use --hydra-help to view Hydra specific help
-
- '
- template: '${hydra.help.header}
-
- == Configuration groups ==
-
- Compose your configuration from those groups (group=option)
-
-
- $APP_CONFIG_GROUPS
-
-
- == Config ==
-
- Override anything in the config (foo.bar=value)
-
-
- $CONFIG
-
-
- ${hydra.help.footer}
-
- '
- hydra_help:
- template: 'Hydra (${hydra.runtime.version})
-
- See https://hydra.cc for more info.
-
-
- == Flags ==
-
- $FLAGS_HELP
-
-
- == Configuration groups ==
-
- Compose your configuration from those groups (For example, append hydra/job_logging=disabled
- to command line)
-
-
- $HYDRA_CONFIG_GROUPS
-
-
- Use ''--cfg hydra'' to Show the Hydra config.
-
- '
- hydra_help: ???
- hydra_logging:
- version: 1
- formatters:
- colorlog:
- (): colorlog.ColoredFormatter
- format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s'
- handlers:
- console:
- class: logging.StreamHandler
- formatter: colorlog
- stream: ext://sys.stdout
- root:
- level: INFO
- handlers:
- - console
- disable_existing_loggers: false
- job_logging:
- version: 1
- formatters:
- simple:
- format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s'
- colorlog:
- (): colorlog.ColoredFormatter
- format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s]
- - %(message)s'
- log_colors:
- DEBUG: purple
- INFO: green
- WARNING: yellow
- ERROR: red
- CRITICAL: red
- handlers:
- console:
- class: logging.StreamHandler
- formatter: colorlog
- stream: ext://sys.stdout
- file:
- class: logging.FileHandler
- formatter: simple
- filename: ${hydra.job.name}.log
- root:
- level: INFO
- handlers:
- - console
- - file
- disable_existing_loggers: false
- env: {}
- mode: RUN
- searchpath:
- - pkg://d123.script.config
- - pkg://d123.script.config.common
- callbacks: {}
- output_subdir: ${output_dir}/code/hydra
- overrides:
- hydra:
- - hydra.mode=RUN
- task:
- - experiment_name=kitti_test2
- job:
- name: run_dataset_conversion
- chdir: false
- override_dirname: experiment_name=kitti_test2
- id: ???
- num: ???
- config_name: default_dataset_conversion
- env_set: {}
- env_copy: []
- config:
- override_dirname:
- kv_sep: '='
- item_sep: ','
- exclude_keys: []
- runtime:
- version: 1.3.2
- version_base: '1.3'
- cwd: /home/jbwang/d123
- config_sources:
- - path: hydra.conf
- schema: pkg
- provider: hydra
- - path: /home/jbwang/d123/d123/script/config/dataset_conversion
- schema: file
- provider: main
- - path: hydra_plugins.hydra_colorlog.conf
- schema: pkg
- provider: hydra-colorlog
- - path: d123.script.config
- schema: pkg
- provider: hydra.searchpath in main
- - path: d123.script.config.common
- schema: pkg
- provider: hydra.searchpath in main
- - path: ''
- schema: structured
- provider: schema
- output_dir: /home/jbwang/d123/exp/kitti_test2/2025.08.15.14.46.49
- choices:
- scene_builder: default_scene_builder
- scene_filter: all_scenes
- worker: ray_distributed
- hydra/env: default
- hydra/callbacks: null
- hydra/job_logging: colorlog
- hydra/hydra_logging: colorlog
- hydra/hydra_help: default
- hydra/help: default
- hydra/sweeper: basic
- hydra/launcher: basic
- hydra/output: default
- verbose: false
diff --git a/exp/kitti_test2/2025.08.15.14.46.49/code/hydra/overrides.yaml b/exp/kitti_test2/2025.08.15.14.46.49/code/hydra/overrides.yaml
deleted file mode 100644
index 676c1042..00000000
--- a/exp/kitti_test2/2025.08.15.14.46.49/code/hydra/overrides.yaml
+++ /dev/null
@@ -1 +0,0 @@
-- experiment_name=kitti_test2
diff --git a/exp/kitti_test2/2025.08.15.14.46.49/log.txt b/exp/kitti_test2/2025.08.15.14.46.49/log.txt
deleted file mode 100644
index 00286f48..00000000
--- a/exp/kitti_test2/2025.08.15.14.46.49/log.txt
+++ /dev/null
@@ -1,10 +0,0 @@
-2025-08-15 14:46:49,566 INFO {/home/jbwang/d123/d123/script/builders/worker_pool_builder.py:19} Building WorkerPool...
-2025-08-15 14:46:59,509 INFO {/data/jbwang/submodule/nuplan-devkit-master/nuplan/planning/utils/multithreading/worker_ray.py:78} Starting ray local!
-2025-08-15 14:47:14,118 INFO {/data/jbwang/submodule/nuplan-devkit-master/nuplan/planning/utils/multithreading/worker_pool.py:101} Worker: RayDistributed
-2025-08-15 14:47:14,118 INFO {/data/jbwang/submodule/nuplan-devkit-master/nuplan/planning/utils/multithreading/worker_pool.py:102} Number of nodes: 1
-Number of CPUs per node: 64
-Number of GPUs per node: 8
-Number of threads across all nodes: 64
-2025-08-15 14:47:14,119 INFO {/home/jbwang/d123/d123/script/builders/worker_pool_builder.py:27} Building WorkerPool...DONE!
-2025-08-15 14:47:14,119 INFO {/home/jbwang/d123/d123/script/run_dataset_conversion.py:30} Starting Dataset Caching...
-2025-08-15 14:47:14,122 INFO {/home/jbwang/d123/d123/script/builders/data_converter_builder.py:14} Building RawDataProcessor...
diff --git a/exp/kitti_test2/2025.08.15.14.50.55/code/hydra/config.yaml b/exp/kitti_test2/2025.08.15.14.50.55/code/hydra/config.yaml
deleted file mode 100644
index 5ce47ba9..00000000
--- a/exp/kitti_test2/2025.08.15.14.50.55/code/hydra/config.yaml
+++ /dev/null
@@ -1,60 +0,0 @@
-worker:
- _target_: nuplan.planning.utils.multithreading.worker_ray.RayDistributed
- _convert_: all
- master_node_ip: null
- threads_per_node: null
- debug_mode: false
- log_to_driver: true
- logs_subdir: logs
- use_distributed: false
-scene_filter:
- _target_: d123.dataset.scene.scene_filter.SceneFilter
- _convert_: all
- split_types: null
- split_names: null
- log_names: null
- map_names: null
- scene_tokens: null
- timestamp_threshold_s: null
- ego_displacement_minimum_m: null
- duration_s: 9.2
- history_s: 3.0
-scene_builder:
- _target_: d123.dataset.scene.scene_builder.ArrowSceneBuilder
- _convert_: all
- dataset_path: ${d123_data_root}
-distributed_timeout_seconds: 7200
-selected_simulation_metrics: null
-verbose: false
-logger_level: info
-logger_format_string: null
-max_number_of_workers: null
-gpu: true
-seed: 42
-d123_devkit_root: ${oc.env:D123_DEVKIT_ROOT}
-d123_maps_root: ${oc.env:D123_MAPS_ROOT}
-d123_data_root: ${oc.env:D123_DATA_ROOT}
-nuplan_devkit_root: ${oc.env:NUPLAN_DEVKIT_ROOT}
-nuplan_maps_root: ${oc.env:NUPLAN_MAPS_ROOT}
-nuplan_data_root: ${oc.env:NUPLAN_DATA_ROOT}
-experiment_name: kitti_test2
-date_format: '%Y.%m.%d.%H.%M.%S'
-experiment_uid: ${now:${date_format}}
-output_dir: ${oc.env:D123_EXP_ROOT}/${experiment_name}/${experiment_uid}
-force_log_conversion: true
-force_map_conversion: false
-datasets:
- kitti360_dataset:
- _target_: d123.dataset.dataset_specific.kitti_360.kitti_360_data_converter.Kitti360DataConverter
- _convert_: all
- splits:
- - kitti360
- log_path: ${oc.env:KITTI360_DATA_ROOT}
- data_converter_config:
- _target_: d123.dataset.dataset_specific.raw_data_converter.DataConverterConfig
- _convert_: all
- output_path: ${d123_data_root}
- force_log_conversion: ${force_log_conversion}
- force_map_conversion: ${force_map_conversion}
- camera_store_option: path
- lidar_store_option: path
diff --git a/exp/kitti_test2/2025.08.15.14.50.55/code/hydra/hydra.yaml b/exp/kitti_test2/2025.08.15.14.50.55/code/hydra/hydra.yaml
deleted file mode 100644
index acff45d7..00000000
--- a/exp/kitti_test2/2025.08.15.14.50.55/code/hydra/hydra.yaml
+++ /dev/null
@@ -1,177 +0,0 @@
-hydra:
- run:
- dir: ${output_dir}
- sweep:
- dir: multirun/${now:%Y-%m-%d}/${now:%H-%M-%S}
- subdir: ${hydra.job.num}
- launcher:
- _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher
- sweeper:
- _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper
- max_batch_size: null
- params: null
- help:
- app_name: ${hydra.job.name}
- header: '${hydra.help.app_name} is powered by Hydra.
-
- '
- footer: 'Powered by Hydra (https://hydra.cc)
-
- Use --hydra-help to view Hydra specific help
-
- '
- template: '${hydra.help.header}
-
- == Configuration groups ==
-
- Compose your configuration from those groups (group=option)
-
-
- $APP_CONFIG_GROUPS
-
-
- == Config ==
-
- Override anything in the config (foo.bar=value)
-
-
- $CONFIG
-
-
- ${hydra.help.footer}
-
- '
- hydra_help:
- template: 'Hydra (${hydra.runtime.version})
-
- See https://hydra.cc for more info.
-
-
- == Flags ==
-
- $FLAGS_HELP
-
-
- == Configuration groups ==
-
- Compose your configuration from those groups (For example, append hydra/job_logging=disabled
- to command line)
-
-
- $HYDRA_CONFIG_GROUPS
-
-
- Use ''--cfg hydra'' to Show the Hydra config.
-
- '
- hydra_help: ???
- hydra_logging:
- version: 1
- formatters:
- colorlog:
- (): colorlog.ColoredFormatter
- format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s'
- handlers:
- console:
- class: logging.StreamHandler
- formatter: colorlog
- stream: ext://sys.stdout
- root:
- level: INFO
- handlers:
- - console
- disable_existing_loggers: false
- job_logging:
- version: 1
- formatters:
- simple:
- format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s'
- colorlog:
- (): colorlog.ColoredFormatter
- format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s]
- - %(message)s'
- log_colors:
- DEBUG: purple
- INFO: green
- WARNING: yellow
- ERROR: red
- CRITICAL: red
- handlers:
- console:
- class: logging.StreamHandler
- formatter: colorlog
- stream: ext://sys.stdout
- file:
- class: logging.FileHandler
- formatter: simple
- filename: ${hydra.job.name}.log
- root:
- level: INFO
- handlers:
- - console
- - file
- disable_existing_loggers: false
- env: {}
- mode: RUN
- searchpath:
- - pkg://d123.script.config
- - pkg://d123.script.config.common
- callbacks: {}
- output_subdir: ${output_dir}/code/hydra
- overrides:
- hydra:
- - hydra.mode=RUN
- task:
- - experiment_name=kitti_test2
- job:
- name: run_dataset_conversion
- chdir: false
- override_dirname: experiment_name=kitti_test2
- id: ???
- num: ???
- config_name: default_dataset_conversion
- env_set: {}
- env_copy: []
- config:
- override_dirname:
- kv_sep: '='
- item_sep: ','
- exclude_keys: []
- runtime:
- version: 1.3.2
- version_base: '1.3'
- cwd: /home/jbwang/d123
- config_sources:
- - path: hydra.conf
- schema: pkg
- provider: hydra
- - path: /home/jbwang/d123/d123/script/config/dataset_conversion
- schema: file
- provider: main
- - path: hydra_plugins.hydra_colorlog.conf
- schema: pkg
- provider: hydra-colorlog
- - path: d123.script.config
- schema: pkg
- provider: hydra.searchpath in main
- - path: d123.script.config.common
- schema: pkg
- provider: hydra.searchpath in main
- - path: ''
- schema: structured
- provider: schema
- output_dir: /home/jbwang/d123/exp/kitti_test2/2025.08.15.14.50.55
- choices:
- scene_builder: default_scene_builder
- scene_filter: all_scenes
- worker: ray_distributed
- hydra/env: default
- hydra/callbacks: null
- hydra/job_logging: colorlog
- hydra/hydra_logging: colorlog
- hydra/hydra_help: default
- hydra/help: default
- hydra/sweeper: basic
- hydra/launcher: basic
- hydra/output: default
- verbose: false
diff --git a/exp/kitti_test2/2025.08.15.14.50.55/code/hydra/overrides.yaml b/exp/kitti_test2/2025.08.15.14.50.55/code/hydra/overrides.yaml
deleted file mode 100644
index 676c1042..00000000
--- a/exp/kitti_test2/2025.08.15.14.50.55/code/hydra/overrides.yaml
+++ /dev/null
@@ -1 +0,0 @@
-- experiment_name=kitti_test2
diff --git a/exp/kitti_test2/2025.08.15.14.50.55/log.txt b/exp/kitti_test2/2025.08.15.14.50.55/log.txt
deleted file mode 100644
index 9902e0ce..00000000
--- a/exp/kitti_test2/2025.08.15.14.50.55/log.txt
+++ /dev/null
@@ -1,11 +0,0 @@
-2025-08-15 14:50:55,950 INFO {/home/jbwang/d123/d123/script/builders/worker_pool_builder.py:19} Building WorkerPool...
-2025-08-15 14:51:19,466 INFO {/data/jbwang/submodule/nuplan-devkit-master/nuplan/planning/utils/multithreading/worker_ray.py:78} Starting ray local!
-2025-08-15 14:51:52,653 INFO {/data/jbwang/submodule/nuplan-devkit-master/nuplan/planning/utils/multithreading/worker_pool.py:101} Worker: RayDistributed
-2025-08-15 14:51:52,653 INFO {/data/jbwang/submodule/nuplan-devkit-master/nuplan/planning/utils/multithreading/worker_pool.py:102} Number of nodes: 1
-Number of CPUs per node: 64
-Number of GPUs per node: 8
-Number of threads across all nodes: 64
-2025-08-15 14:51:52,654 INFO {/home/jbwang/d123/d123/script/builders/worker_pool_builder.py:27} Building WorkerPool...DONE!
-2025-08-15 14:51:52,654 INFO {/home/jbwang/d123/d123/script/run_dataset_conversion.py:30} Starting Dataset Caching...
-2025-08-15 14:51:52,654 INFO {/home/jbwang/d123/d123/script/builders/data_converter_builder.py:14} Building RawDataProcessor...
-2025-08-15 14:51:52,655 INFO {/home/jbwang/d123/d123/script/builders/data_converter_builder.py:17} Instantiating dataset type: {'_target_': 'd123.dataset.dataset_specific.kitti_360.kitti_360_data_converter.Kitti360DataConverter', '_convert_': 'all', 'splits': ['kitti360'], 'log_path': '${oc.env:KITTI360_DATA_ROOT}', 'data_converter_config': {'_target_': 'd123.dataset.dataset_specific.raw_data_converter.DataConverterConfig', '_convert_': 'all', 'output_path': '${d123_data_root}', 'force_log_conversion': '${force_log_conversion}', 'force_map_conversion': '${force_map_conversion}', 'camera_store_option': 'path', 'lidar_store_option': 'path'}}
diff --git a/exp/kitti_test2/2025.08.15.14.52.39/code/hydra/config.yaml b/exp/kitti_test2/2025.08.15.14.52.39/code/hydra/config.yaml
deleted file mode 100644
index de70bfa3..00000000
--- a/exp/kitti_test2/2025.08.15.14.52.39/code/hydra/config.yaml
+++ /dev/null
@@ -1,60 +0,0 @@
-worker:
- _target_: nuplan.planning.utils.multithreading.worker_ray.RayDistributed
- _convert_: all
- master_node_ip: null
- threads_per_node: null
- debug_mode: false
- log_to_driver: true
- logs_subdir: logs
- use_distributed: false
-scene_filter:
- _target_: d123.dataset.scene.scene_filter.SceneFilter
- _convert_: all
- split_types: null
- split_names: null
- log_names: null
- map_names: null
- scene_tokens: null
- timestamp_threshold_s: null
- ego_displacement_minimum_m: null
- duration_s: 9.2
- history_s: 3.0
-scene_builder:
- _target_: d123.dataset.scene.scene_builder.ArrowSceneBuilder
- _convert_: all
- dataset_path: ${d123_data_root}
-distributed_timeout_seconds: 7200
-selected_simulation_metrics: null
-verbose: false
-logger_level: info
-logger_format_string: null
-max_number_of_workers: null
-gpu: true
-seed: 42
-d123_devkit_root: ${oc.env:D123_DEVKIT_ROOT}
-d123_maps_root: ${oc.env:D123_MAPS_ROOT}
-d123_data_root: ${oc.env:D123_DATA_ROOT}
-nuplan_devkit_root: ${oc.env:NUPLAN_DEVKIT_ROOT}
-nuplan_maps_root: ${oc.env:NUPLAN_MAPS_ROOT}
-nuplan_data_root: ${oc.env:NUPLAN_DATA_ROOT}
-experiment_name: kitti_test2
-date_format: '%Y.%m.%d.%H.%M.%S'
-experiment_uid: ${now:${date_format}}
-output_dir: ${oc.env:D123_EXP_ROOT}/${experiment_name}/${experiment_uid}
-force_log_conversion: true
-force_map_conversion: false
-datasets:
- nuplan_private_dataset:
- _target_: d123.dataset.dataset_specific.nuplan.nuplan_data_converter.NuplanDataConverter
- _convert_: all
- splits:
- - nuplan_private_test
- log_path: ${oc.env:NUPLAN_DATA_ROOT}/nuplan-v1.1/splits
- data_converter_config:
- _target_: d123.dataset.dataset_specific.raw_data_converter.DataConverterConfig
- _convert_: all
- output_path: ${d123_data_root}
- force_log_conversion: ${force_log_conversion}
- force_map_conversion: ${force_map_conversion}
- camera_store_option: path
- lidar_store_option: path
diff --git a/exp/kitti_test2/2025.08.15.14.52.39/code/hydra/hydra.yaml b/exp/kitti_test2/2025.08.15.14.52.39/code/hydra/hydra.yaml
deleted file mode 100644
index d053f8e7..00000000
--- a/exp/kitti_test2/2025.08.15.14.52.39/code/hydra/hydra.yaml
+++ /dev/null
@@ -1,177 +0,0 @@
-hydra:
- run:
- dir: ${output_dir}
- sweep:
- dir: multirun/${now:%Y-%m-%d}/${now:%H-%M-%S}
- subdir: ${hydra.job.num}
- launcher:
- _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher
- sweeper:
- _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper
- max_batch_size: null
- params: null
- help:
- app_name: ${hydra.job.name}
- header: '${hydra.help.app_name} is powered by Hydra.
-
- '
- footer: 'Powered by Hydra (https://hydra.cc)
-
- Use --hydra-help to view Hydra specific help
-
- '
- template: '${hydra.help.header}
-
- == Configuration groups ==
-
- Compose your configuration from those groups (group=option)
-
-
- $APP_CONFIG_GROUPS
-
-
- == Config ==
-
- Override anything in the config (foo.bar=value)
-
-
- $CONFIG
-
-
- ${hydra.help.footer}
-
- '
- hydra_help:
- template: 'Hydra (${hydra.runtime.version})
-
- See https://hydra.cc for more info.
-
-
- == Flags ==
-
- $FLAGS_HELP
-
-
- == Configuration groups ==
-
- Compose your configuration from those groups (For example, append hydra/job_logging=disabled
- to command line)
-
-
- $HYDRA_CONFIG_GROUPS
-
-
- Use ''--cfg hydra'' to Show the Hydra config.
-
- '
- hydra_help: ???
- hydra_logging:
- version: 1
- formatters:
- colorlog:
- (): colorlog.ColoredFormatter
- format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s'
- handlers:
- console:
- class: logging.StreamHandler
- formatter: colorlog
- stream: ext://sys.stdout
- root:
- level: INFO
- handlers:
- - console
- disable_existing_loggers: false
- job_logging:
- version: 1
- formatters:
- simple:
- format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s'
- colorlog:
- (): colorlog.ColoredFormatter
- format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s]
- - %(message)s'
- log_colors:
- DEBUG: purple
- INFO: green
- WARNING: yellow
- ERROR: red
- CRITICAL: red
- handlers:
- console:
- class: logging.StreamHandler
- formatter: colorlog
- stream: ext://sys.stdout
- file:
- class: logging.FileHandler
- formatter: simple
- filename: ${hydra.job.name}.log
- root:
- level: INFO
- handlers:
- - console
- - file
- disable_existing_loggers: false
- env: {}
- mode: RUN
- searchpath:
- - pkg://d123.script.config
- - pkg://d123.script.config.common
- callbacks: {}
- output_subdir: ${output_dir}/code/hydra
- overrides:
- hydra:
- - hydra.mode=RUN
- task:
- - experiment_name=kitti_test2
- job:
- name: run_dataset_conversion
- chdir: false
- override_dirname: experiment_name=kitti_test2
- id: ???
- num: ???
- config_name: default_dataset_conversion
- env_set: {}
- env_copy: []
- config:
- override_dirname:
- kv_sep: '='
- item_sep: ','
- exclude_keys: []
- runtime:
- version: 1.3.2
- version_base: '1.3'
- cwd: /home/jbwang/d123
- config_sources:
- - path: hydra.conf
- schema: pkg
- provider: hydra
- - path: /home/jbwang/d123/d123/script/config/dataset_conversion
- schema: file
- provider: main
- - path: hydra_plugins.hydra_colorlog.conf
- schema: pkg
- provider: hydra-colorlog
- - path: d123.script.config
- schema: pkg
- provider: hydra.searchpath in main
- - path: d123.script.config.common
- schema: pkg
- provider: hydra.searchpath in main
- - path: ''
- schema: structured
- provider: schema
- output_dir: /home/jbwang/d123/exp/kitti_test2/2025.08.15.14.52.39
- choices:
- scene_builder: default_scene_builder
- scene_filter: all_scenes
- worker: ray_distributed
- hydra/env: default
- hydra/callbacks: null
- hydra/job_logging: colorlog
- hydra/hydra_logging: colorlog
- hydra/hydra_help: default
- hydra/help: default
- hydra/sweeper: basic
- hydra/launcher: basic
- hydra/output: default
- verbose: false
diff --git a/exp/kitti_test2/2025.08.15.14.52.39/code/hydra/overrides.yaml b/exp/kitti_test2/2025.08.15.14.52.39/code/hydra/overrides.yaml
deleted file mode 100644
index 676c1042..00000000
--- a/exp/kitti_test2/2025.08.15.14.52.39/code/hydra/overrides.yaml
+++ /dev/null
@@ -1 +0,0 @@
-- experiment_name=kitti_test2
diff --git a/exp/kitti_test2/2025.08.15.14.52.39/log.txt b/exp/kitti_test2/2025.08.15.14.52.39/log.txt
deleted file mode 100644
index e2585299..00000000
--- a/exp/kitti_test2/2025.08.15.14.52.39/log.txt
+++ /dev/null
@@ -1,11 +0,0 @@
-2025-08-15 14:52:39,717 INFO {/home/jbwang/d123/d123/script/builders/worker_pool_builder.py:19} Building WorkerPool...
-2025-08-15 14:53:02,994 INFO {/data/jbwang/submodule/nuplan-devkit-master/nuplan/planning/utils/multithreading/worker_ray.py:78} Starting ray local!
-2025-08-15 14:53:36,548 INFO {/data/jbwang/submodule/nuplan-devkit-master/nuplan/planning/utils/multithreading/worker_pool.py:101} Worker: RayDistributed
-2025-08-15 14:53:36,549 INFO {/data/jbwang/submodule/nuplan-devkit-master/nuplan/planning/utils/multithreading/worker_pool.py:102} Number of nodes: 1
-Number of CPUs per node: 64
-Number of GPUs per node: 8
-Number of threads across all nodes: 64
-2025-08-15 14:53:36,549 INFO {/home/jbwang/d123/d123/script/builders/worker_pool_builder.py:27} Building WorkerPool...DONE!
-2025-08-15 14:53:36,549 INFO {/home/jbwang/d123/d123/script/run_dataset_conversion.py:30} Starting Dataset Caching...
-2025-08-15 14:53:36,550 INFO {/home/jbwang/d123/d123/script/builders/data_converter_builder.py:14} Building RawDataProcessor...
-2025-08-15 14:53:36,550 INFO {/home/jbwang/d123/d123/script/builders/data_converter_builder.py:17} Instantiating dataset type: {'_target_': 'd123.dataset.dataset_specific.nuplan.nuplan_data_converter.NuplanDataConverter', '_convert_': 'all', 'splits': ['nuplan_private_test'], 'log_path': '${oc.env:NUPLAN_DATA_ROOT}/nuplan-v1.1/splits', 'data_converter_config': {'_target_': 'd123.dataset.dataset_specific.raw_data_converter.DataConverterConfig', '_convert_': 'all', 'output_path': '${d123_data_root}', 'force_log_conversion': '${force_log_conversion}', 'force_map_conversion': '${force_map_conversion}', 'camera_store_option': 'path', 'lidar_store_option': 'path'}}
diff --git a/exp/my_run/2025.08.11.15.45.36/code/hydra/config.yaml b/exp/my_run/2025.08.11.15.45.36/code/hydra/config.yaml
deleted file mode 100644
index 86d05e7b..00000000
--- a/exp/my_run/2025.08.11.15.45.36/code/hydra/config.yaml
+++ /dev/null
@@ -1,60 +0,0 @@
-worker:
- _target_: nuplan.planning.utils.multithreading.worker_ray.RayDistributed
- _convert_: all
- master_node_ip: null
- threads_per_node: null
- debug_mode: false
- log_to_driver: true
- logs_subdir: logs
- use_distributed: false
-scene_filter:
- _target_: d123.dataset.scene.scene_filter.SceneFilter
- _convert_: all
- split_types: null
- split_names: null
- log_names: null
- map_names: null
- scene_tokens: null
- timestamp_threshold_s: null
- ego_displacement_minimum_m: null
- duration_s: 9.2
- history_s: 3.0
-scene_builder:
- _target_: d123.dataset.scene.scene_builder.ArrowSceneBuilder
- _convert_: all
- dataset_path: ${d123_data_root}
-distributed_timeout_seconds: 7200
-selected_simulation_metrics: null
-verbose: false
-logger_level: info
-logger_format_string: null
-max_number_of_workers: null
-gpu: true
-seed: 42
-d123_devkit_root: ${oc.env:D123_DEVKIT_ROOT}
-d123_maps_root: ${oc.env:D123_MAPS_ROOT}
-d123_data_root: ${oc.env:D123_DATA_ROOT}
-nuplan_devkit_root: ${oc.env:NUPLAN_DEVKIT_ROOT}
-nuplan_maps_root: ${oc.env:NUPLAN_MAPS_ROOT}
-nuplan_data_root: ${oc.env:NUPLAN_DATA_ROOT}
-experiment_name: my_run
-date_format: '%Y.%m.%d.%H.%M.%S'
-experiment_uid: ${now:${date_format}}
-output_dir: ${oc.env:D123_EXP_ROOT}/${experiment_name}/${experiment_uid}
-force_log_conversion: false
-force_map_conversion: true
-datasets:
- nuplan_private_dataset:
- _target_: d123.dataset.dataset_specific.nuplan.nuplan_data_converter.NuplanDataConverter
- _convert_: all
- splits:
- - nuplan_private_test
- log_path: ${oc.env:NUPLAN_DATA_ROOT}/nuplan-v1.1/splits
- data_converter_config:
- _target_: d123.dataset.dataset_specific.raw_data_converter.DataConverterConfig
- _convert_: all
- output_path: ${d123_data_root}
- force_log_conversion: ${force_log_conversion}
- force_map_conversion: ${force_map_conversion}
- camera_store_option: path
- lidar_store_option: path
diff --git a/exp/my_run/2025.08.11.15.45.36/code/hydra/hydra.yaml b/exp/my_run/2025.08.11.15.45.36/code/hydra/hydra.yaml
deleted file mode 100644
index bf09b447..00000000
--- a/exp/my_run/2025.08.11.15.45.36/code/hydra/hydra.yaml
+++ /dev/null
@@ -1,177 +0,0 @@
-hydra:
- run:
- dir: ${output_dir}
- sweep:
- dir: multirun/${now:%Y-%m-%d}/${now:%H-%M-%S}
- subdir: ${hydra.job.num}
- launcher:
- _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher
- sweeper:
- _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper
- max_batch_size: null
- params: null
- help:
- app_name: ${hydra.job.name}
- header: '${hydra.help.app_name} is powered by Hydra.
-
- '
- footer: 'Powered by Hydra (https://hydra.cc)
-
- Use --hydra-help to view Hydra specific help
-
- '
- template: '${hydra.help.header}
-
- == Configuration groups ==
-
- Compose your configuration from those groups (group=option)
-
-
- $APP_CONFIG_GROUPS
-
-
- == Config ==
-
- Override anything in the config (foo.bar=value)
-
-
- $CONFIG
-
-
- ${hydra.help.footer}
-
- '
- hydra_help:
- template: 'Hydra (${hydra.runtime.version})
-
- See https://hydra.cc for more info.
-
-
- == Flags ==
-
- $FLAGS_HELP
-
-
- == Configuration groups ==
-
- Compose your configuration from those groups (For example, append hydra/job_logging=disabled
- to command line)
-
-
- $HYDRA_CONFIG_GROUPS
-
-
- Use ''--cfg hydra'' to Show the Hydra config.
-
- '
- hydra_help: ???
- hydra_logging:
- version: 1
- formatters:
- colorlog:
- (): colorlog.ColoredFormatter
- format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s'
- handlers:
- console:
- class: logging.StreamHandler
- formatter: colorlog
- stream: ext://sys.stdout
- root:
- level: INFO
- handlers:
- - console
- disable_existing_loggers: false
- job_logging:
- version: 1
- formatters:
- simple:
- format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s'
- colorlog:
- (): colorlog.ColoredFormatter
- format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s]
- - %(message)s'
- log_colors:
- DEBUG: purple
- INFO: green
- WARNING: yellow
- ERROR: red
- CRITICAL: red
- handlers:
- console:
- class: logging.StreamHandler
- formatter: colorlog
- stream: ext://sys.stdout
- file:
- class: logging.FileHandler
- formatter: simple
- filename: ${hydra.job.name}.log
- root:
- level: INFO
- handlers:
- - console
- - file
- disable_existing_loggers: false
- env: {}
- mode: RUN
- searchpath:
- - pkg://d123.script.config
- - pkg://d123.script.config.common
- callbacks: {}
- output_subdir: ${output_dir}/code/hydra
- overrides:
- hydra:
- - hydra.mode=RUN
- task:
- - experiment_name=my_run
- job:
- name: run_dataset_conversion
- chdir: false
- override_dirname: experiment_name=my_run
- id: ???
- num: ???
- config_name: default_dataset_conversion
- env_set: {}
- env_copy: []
- config:
- override_dirname:
- kv_sep: '='
- item_sep: ','
- exclude_keys: []
- runtime:
- version: 1.3.2
- version_base: '1.3'
- cwd: /home/jbwang/d123/d123/script
- config_sources:
- - path: hydra.conf
- schema: pkg
- provider: hydra
- - path: /home/jbwang/d123/d123/script/config/dataset_conversion
- schema: file
- provider: main
- - path: hydra_plugins.hydra_colorlog.conf
- schema: pkg
- provider: hydra-colorlog
- - path: d123.script.config
- schema: pkg
- provider: hydra.searchpath in main
- - path: d123.script.config.common
- schema: pkg
- provider: hydra.searchpath in main
- - path: ''
- schema: structured
- provider: schema
- output_dir: /home/jbwang/d123/exp/my_run/2025.08.11.15.45.36
- choices:
- scene_builder: default_scene_builder
- scene_filter: all_scenes
- worker: ray_distributed
- hydra/env: default
- hydra/callbacks: null
- hydra/job_logging: colorlog
- hydra/hydra_logging: colorlog
- hydra/hydra_help: default
- hydra/help: default
- hydra/sweeper: basic
- hydra/launcher: basic
- hydra/output: default
- verbose: false
diff --git a/exp/my_run/2025.08.11.15.45.36/code/hydra/overrides.yaml b/exp/my_run/2025.08.11.15.45.36/code/hydra/overrides.yaml
deleted file mode 100644
index 373bde0c..00000000
--- a/exp/my_run/2025.08.11.15.45.36/code/hydra/overrides.yaml
+++ /dev/null
@@ -1 +0,0 @@
-- experiment_name=my_run
diff --git a/exp/my_run/2025.08.11.15.45.36/log.txt b/exp/my_run/2025.08.11.15.45.36/log.txt
deleted file mode 100644
index 2bdc0b60..00000000
--- a/exp/my_run/2025.08.11.15.45.36/log.txt
+++ /dev/null
@@ -1,10 +0,0 @@
-2025-08-11 15:45:36,813 INFO {/home/jbwang/d123/d123/script/builders/worker_pool_builder.py:19} Building WorkerPool...
-2025-08-11 15:46:10,300 INFO {/data/jbwang/submodule/nuplan-devkit-master/nuplan/planning/utils/multithreading/worker_ray.py:78} Starting ray local!
-2025-08-11 15:46:34,960 INFO {/data/jbwang/submodule/nuplan-devkit-master/nuplan/planning/utils/multithreading/worker_pool.py:101} Worker: RayDistributed
-2025-08-11 15:46:34,962 INFO {/data/jbwang/submodule/nuplan-devkit-master/nuplan/planning/utils/multithreading/worker_pool.py:102} Number of nodes: 1
-Number of CPUs per node: 64
-Number of GPUs per node: 8
-Number of threads across all nodes: 64
-2025-08-11 15:46:34,962 INFO {/home/jbwang/d123/d123/script/builders/worker_pool_builder.py:27} Building WorkerPool...DONE!
-2025-08-11 15:46:34,963 INFO {/home/jbwang/d123/d123/script/run_dataset_conversion.py:30} Starting Dataset Caching...
-2025-08-11 15:46:34,964 INFO {/home/jbwang/d123/d123/script/builders/data_converter_builder.py:14} Building RawDataProcessor...
diff --git a/jbwang_test.py b/jbwang_test.py
deleted file mode 100644
index e42f512a..00000000
--- a/jbwang_test.py
+++ /dev/null
@@ -1,98 +0,0 @@
-# from nuplan.database.nuplan_db_orm.nuplandb import NuPlanDB
-
-# # # 打开数据库文件
-# # db = NuPlanDB(db_path="/nas/datasets/nuplan/nuplan-v1.1/splits/mini/2021.05.12.22.00.38_veh-35_01008_01518.db")
-# NUPLAN_DATA_ROOT = "/nas/datasets/nuplan/nuplan-v1.1/splits/mini"
-# log_path
-# log_db = NuPlanDB(NUPLAN_DATA_ROOT, str(log_path), None)
-
-# # 获取第1050帧数据
-# frame = db.get_frame(1050)
-# img_front = frame.camera_front # 前视图像
-# point_cloud = frame.lidar # 点云
-
-# # 获取本片段所有车辆状态
-# status_data = db.get_vehicle_status() # 返回DataFrame
-# print(status_data)
-
-
-
-# from d123.dataset.dataset_specific.nuplan.nuplan_data_converter import NuplanDataConverter, DataConverterConfig
-# spits = ["nuplan_mini_train"]
-# log_path = "/nas/datasets/nuplan/nuplan-v1.1/splits/mini/"
-# converter = NuplanDataConverter(
-# log_path=log_path,
-# splits=spits,
-# data_converter_config=DataConverterConfig(output_path="data/jbwang/d123"),
-# )
-# # converter.convert_logs()
-from pathlib import Path
-log_paths_per_split = {
- "nuplan_mini_train": [
- "2021","2022"]
- }
-log_args = [
- {
- "log_path": log_path,
- "split": split,
- }
- for split, log_paths in log_paths_per_split.items()
- for log_path in log_paths
- ]
-PATH_2D_RAW_ROOT = Path("/nas/datasets/KITTI-360/data_3d_raw/")
-candidates = sorted(p for p in PATH_2D_RAW_ROOT.iterdir() if p.is_dir() and p.name.endswith("_sync"))
-# print(log_args)
-# print(candidates)
-# print(candidates[0].name)
-# print(candidates[0].stem)
-# print(type(candidates[0].name))
-# print(type(candidates[0].stem))
-# PATH_2D_RAW_ROOT_new = PATH_2D_RAW_ROOT/"123"/candidates[0].name
-# print(PATH_2D_RAW_ROOT_new)
-
-
-
-# import hashlib
-# def create_token(input_data: str) -> str:
-# # TODO: Refactor this function.
-# # TODO: Add a general function to create tokens from arbitrary data.
-# if isinstance(input_data, str):
-# input_data = input_data.encode("utf-8")
-
-# hash_obj = hashlib.sha256(input_data)
-# return hash_obj.hexdigest()[:16]
-
-# log_name = "1230_asd_"
-# for i in range(20):
-# a = create_token(f"{log_name}_{i}")
-# print(a)ee
-
-
-# import numpy as np
-# from pathlib import Path
-# a = np.loadtxt("/data/jbwang/d123/data_poses/2013_05_28_drive_0000_sync/oxts/data/0000000000.txt")
-# b = np.loadtxt("/nas/datasets/KITTI-360/data_poses/2013_05_28_drive_0018_sync/poses.txt")
-# data = b
-# ts = data[:, 0].astype(np.int32)
-# poses = np.reshape(data[:, 1:], (-1, 3, 4))
-# poses = np.concatenate((poses, np.tile(np.array([0, 0, 0, 1]).reshape(1,1,4),(poses.shape[0],1,1))), 1)
-# print(a)
-# print(b.shape)
-# print(ts.shape)
-# print(poses.shape)
-
-# ccc = Path("/data/jbwang/d123/data_poses/2013_05_28_drive_0000_sync/oxts/data/")
-# print(len(list(ccc.glob("*.txt"))))
-
-
-
-
-from d123.dataset.dataset_specific.nuplan.nuplan_data_converter import convert_nuplan_map_to_gpkg
-
-from d123.dataset.dataset_specific.raw_data_converter import DataConverterConfig
-
-MAP_LOCATIONS = {"sg-one-north", "us-ma-boston", "us-nv-las-vegas-strip", "us-pa-pittsburgh-hazelwood"}
-maps = list(MAP_LOCATIONS)
-
-data_conveter_config = DataConverterConfig(output_path = "/nas/datasets/nuplan/maps")
-convert_nuplan_map_to_gpkg(maps,data_conveter_config)
\ No newline at end of file
diff --git a/jbwang_test2.py b/jbwang_test2.py
deleted file mode 100644
index 183df813..00000000
--- a/jbwang_test2.py
+++ /dev/null
@@ -1,229 +0,0 @@
-# # import numpy as np
-# # import pickle
-
-# # # path = "/nas/datasets/KITTI-360/data_3d_raw/2013_05_28_drive_0000_sync/velodyne_points/data/0000000000.bin"
-# # # a = np.fromfile(path, dtype=np.float32)
-
-# # # print(a.shape)
-# # # print(a[:10])
-
-# # # path2 = "/nas/datasets/KITTI-360/calibration/calib_cam_to_pose.txt"
-# # # c = np.loadtxt(path2)
-# # # print(c)
-
-# # import open3d as o3d
-# # import numpy as np
-
-# # def read_ply_file(file_path):
-# # # 读取 PLY 文件
-# # pcd = o3d.io.read_point_cloud(file_path)
-# # print(len(pcd.points), len(pcd.colors))
-# # # 提取顶点信息
-# # points = np.asarray(pcd.points) # x, y, z
-# # colors = np.asarray(pcd.colors) # red, green, blue
-# # # semantics = np.asarray(pcd.semantic) # semanticID, instanceID, isVisible, confidence
-
-# # # 将所有信息合并到一个数组中
-# # vertices = np.hstack((points, colors))
-
-# # return vertices
-
-# # # 示例用法
-# # file_path = '/nas/datasets/KITTI-360/data_3d_semantics/train/2013_05_28_drive_0000_sync/static/0000000002_0000000385.ply' # 替换为你的 PLY 文件路径
-# # vertices = read_ply_file(file_path)
-
-# # # 打印前几个顶点信息
-# # print("顶点信息 (前5个顶点):")
-# # print(vertices[:5])
-
-# import numpy as np
-# from scipy.linalg import polar
-# from scipy.spatial.transform import Rotation as R
-
-# def polar_decompose_rotation_scale(A: np.ndarray):
-# """
-# A: 3x3 (含旋转+缩放+剪切)
-# 返回:
-# Rm: 纯旋转
-# Sm: 对称正定 (缩放+剪切)
-# scale: 近似轴缩放(从 Sm 特征值开方或对角提取;若存在剪切需谨慎)
-# yaw,pitch,roll: 使用 ZYX 序列 (常对应 yaw(Z), pitch(Y), roll(X))
-# """
-# Rm, Sm = polar(A) # A = Rm @ Sm
-# # 近似各向缩放(若无剪切):
-# scale = np.diag(Sm)
-# # 欧拉角
-# yaw, pitch, roll = R.from_matrix(Rm).as_euler('zyx', degrees=False)
-# return {
-# "R": Rm,
-# "S": Sm,
-# "scale_diag": scale,
-# "yaw_pitch_roll": (yaw, pitch, roll),
-# }
-
-# M = np.array([
-# [-3.97771668e+00, -1.05715942e+00,-2.18206085e-02],
-# [2.43555284e+00, -1.72707462e+00, -1.03932284e-02],
-# [-4.41359095e-02, -2.94448305e-02, 1.39303744e+00],
-# ])
-# out = polar_decompose_rotation_scale(M)
-# print(out)
-
-# import numpy as np
-# path = "/nas/datasets/KITTI-360/data_3d_raw/2013_05_28_drive_0000_sync/velodyne_points/data/0000000000.bin"
-# a = np.fromfile(path, dtype=np.float32)
-# a = a.reshape((-1,4))
-# print(a[10000:10010,:3])
-
-
-
-
-
-# import gc
-# import json
-# import os
-# from dataclasses import asdict
-# from functools import partial
-# from pathlib import Path
-# from typing import Any, Dict, Final, List, Optional, Tuple, Union
-
-# import numpy as np
-# from collections import defaultdict
-# import datetime
-# import hashlib
-# import xml.etree.ElementTree as ET
-# import pyarrow as pa
-# from PIL import Image
-# import logging
-
-# from d123.common.datatypes.detection.detection_types import DetectionType
-# from d123.dataset.dataset_specific.kitti_360.kitti_360_helper import KITTI360Bbox3D
-
-
-# #TODO train and train_full
-# bbox_3d_path = Path("/nas/datasets/KITTI-360/data_3d_bboxes/train/2013_05_28_drive_0000_sync.xml")
-
-# tree = ET.parse(bbox_3d_path)
-# root = tree.getroot()
-
-# KIITI360_DETECTION_NAME_DICT = {
-# "truck": DetectionType.VEHICLE,
-# "bus": DetectionType.VEHICLE,
-# "car": DetectionType.VEHICLE,
-# "motorcycle": DetectionType.BICYCLE,
-# "bicycle": DetectionType.BICYCLE,
-# "pedestrian": DetectionType.PEDESTRIAN,
-# }
-# # x,y,z = 881.2268115,3247.493293,115.239219
-# # x,y,z = 867.715474,3229.630439,115.189221 # 自车
-# # x,y,z = 873.533508, 3227.16235, 115.185341 # 要找的那个人
-# x,y,z = 874.233508, 3231.56235, 115.185341 # 要找的那个车
-# CENTER_REF = np.array([x, y, z], dtype=np.float64)
-# objs_name = []
-# lable_name = []
-# for child in root:
-# label = child.find('label').text
-# # if child.find('transform') is None or label not in KIITI360_DETECTION_NAME_DICT.keys():
-# # continue
-
-# if child.find('transform') is None:
-# continue
-# print("this label is ",label)
-# print("!!!!!!!!!!!!!!!!!!!")
-# obj = KITTI360Bbox3D()
-# obj.parseBbox(child)
-# # obj.parseVertices(child)
-# name = child.find('label').text
-# lable_name.append(name)
-# # if obj.start_frame < 10030 and obj.end_frame > 10030:
-# center = np.array(obj.T, dtype=np.float64)
-# dist = np.linalg.norm(center - CENTER_REF)
-# if dist < 7:
-# print(f"Object ID: {obj.name}, Start Frame: {obj.start_frame}, End Frame: {obj.end_frame},self.annotationId: {obj.annotationId},{obj.timestamp},{obj.T}")
-# objs_name.append(obj.name)
-# print(len(objs_name))
-# print(set(objs_name))
-# print(set(lable_name))
-# # print(obj.Rm)
-# # print(Sigma)
-# names = []
-# for child in root:
-# label = child.find('label').text
-# if child.find('transform') is None:
-# continue
-# names.append(label)
-# print(set(names))
-
-from scipy.spatial.transform import Rotation as R
-import numpy as np
-from pathlib import Path as PATH
-
-def get_rotation_matrix(roll,pitch,yaw):
- # Intrinsic Z-Y'-X'' rotation: R = R_x(roll) @ R_y(pitch) @ R_z(yaw)
- R_x = np.array(
- [
- [1, 0, 0],
- [0, np.cos(roll), -np.sin(roll)],
- [0, np.sin(roll), np.cos(roll)],
- ],
- dtype=np.float64,
- )
- R_y = np.array(
- [
- [np.cos(pitch), 0, np.sin(pitch)],
- [0, 1, 0],
- [-np.sin(pitch), 0, np.cos(pitch)],
- ],
- dtype=np.float64,
- )
- R_z = np.array(
- [
- [np.cos(yaw), -np.sin(yaw), 0],
- [np.sin(yaw), np.cos(yaw), 0],
- [0, 0, 1],
- ],
- dtype=np.float64,
- )
- return R_x @ R_y @ R_z
-
-oxts_path = PATH("/data/jbwang/d123/data_poses/2013_05_28_drive_0000_sync/oxts/data/" )
-pose_file = PATH("/nas/datasets/KITTI-360/data_poses/2013_05_28_drive_0000_sync/poses.txt")
-poses = np.loadtxt(pose_file)
-poses_time = poses[:, 0] - 1 # Adjusting time to start from 0
-
-pose_idx = 0
-poses_time_len = len(poses_time)
-
-from pyquaternion import Quaternion
-
-for idx in range(len(list(oxts_path.glob("*.txt")))):
- oxts_path_file = oxts_path / f"{int(idx):010d}.txt"
- oxts_data = np.loadtxt(oxts_path_file)
- while pose_idx + 1 < poses_time_len and poses_time[pose_idx + 1] < idx:
- pose_idx += 1
- pos = pose_idx
-
- r00, r01, r02 = poses[pos, 1:4]
- r10, r11, r12 = poses[pos, 5:8]
- r20, r21, r22 = poses[pos, 9:12]
- R_mat = np.array([[r00, r01, r02],
- [r10, r11, r12],
- [r20, r21, r22]], dtype=np.float64)
- calib = np.array([[1.0, 0.0, 0.0],
- [0.0, -1.0, 0.0],
- [0.0, 0.0, -1.0]], dtype=np.float64)
- R_mat = R_mat @ calib
- from d123.geometry.rotation import EulerAngles
- if idx <= 300:
- # print("R_mat",R_mat)
-
- new_yaw, new_pitch, new_roll = Quaternion(matrix=R_mat[:3, :3]).yaw_pitch_roll
- R = EulerAngles.from_array(np.array([new_roll, new_pitch, new_yaw])).rotation_matrix
- # print("R from yaw_pitch_roll",R)
- print(R_mat - R)
- # new_yaw,new_pitch,new_roll = R.from_matrix(R_mat).as_euler('yxz', degrees=False)
- # print("new",new_roll,new_pitch,new_yaw)
- # print("roll,pitch,yaw",oxts_data[3:6]) # 前6个元素是位置和速度
- # roll, pitch, yaw = oxts_data[3:6]
- # print("true",get_rotation_matrix(roll,pitch,yaw))
- # print("new",roll,pitch,yaw)
\ No newline at end of file
diff --git a/notebooks/dataset/jbwang_test.py b/notebooks/dataset/jbwang_test.py
deleted file mode 100644
index c37d8d40..00000000
--- a/notebooks/dataset/jbwang_test.py
+++ /dev/null
@@ -1,94 +0,0 @@
-# s3_uri = "/data/jbwang/d123/data/nuplan_mini_train/2021.10.11.07.12.18_veh-50_00211_00304.arrow"
-# s3_uri = "/data/jbwang/d123/data/nuplan_private_test/2021.09.22.13.20.34_veh-28_01446_01583.arrow"
-# s3_uri = "/data/jbwang/d123/data/carla/_Rep0_routes_validation1_route0_07_23_14_33_15.arrow"
-# s3_uri = "/data/jbwang/d123/data/nuplan_mini_val/2021.06.07.12.54.00_veh-35_01843_02314.arrow"
-# s3_uri = "/data/jbwang/d123/data2/kitti360_c2e_train/2013_05_28_drive_0000_sync_c2e.arrow"
-s3_uri = "/data/jbwang/d123/data2/kitti360_detection_all_test/2013_05_28_drive_0000_sync.arrow"
-
-
-import pyarrow as pa
-import pyarrow.fs as fs
-import pyarrow.dataset as ds
-
-import os
-
-s3_fs = fs.S3FileSystem()
-from d123.common.utils.timer import Timer
-
-
-timer = Timer()
-timer.start()
-
-dataset = ds.dataset(f"{s3_uri}", format="ipc")
-timer.log("1. Dataset loaded")
-
-# Get all column names and remove the ones you want to drop
-all_columns = dataset.schema.names
-# print("all_columns", all_columns)
-# print("Schema:")
-# print(dataset.schema)
-# columns_to_keep = [col for col in all_columns if col not in ["front_cam_demo", "front_cam_transform"]]
-timer.log("2. Columns filtered")
-
-table = dataset.to_table(columns=all_columns)
-# print("table",table)
-# print(table["token"])
-for col in table.column_names:
- if col == "lidar":
- continue
- print(f"Column : {col}, Type: {table.schema.field(col).type}")
- tokens = table["detections_velocity"] # 或 table.column("token")
- # tokens = table["detections_type"]
- # print(tokens)
- # print(len(tokens))
- result = tokens.slice(1470, 40).to_pylist()
- # for item in result:
- # print(len(item))
-print(result)
-# print(table["traffic_light_ids"])
-timer.log("3. Table created")
-# Save locally
-# with pa.ipc.new_file("filtered_file.arrow", table.schema) as writer:
-# writer.write_table(table)
-timer.log("4. Table saved locally")
-
-timer.end()
-timer.stats(verbose=False)
-
-# 查看nuplan数据库的表结构和内容
-
-# from pathlib import Path
-# from nuplan.database.nuplan_db_orm.nuplandb import NuPlanDB
-# from nuplan.database.nuplan_db_orm.lidar_pc import LidarPc
-# from sqlalchemy import inspect, select
-# from sqlalchemy.orm import Session
-# from sqlalchemy import func
-# from nuplan.database.nuplan_db_orm.ego_pose import EgoPose
-
-# NUPLAN_DATA_ROOT = Path("/nas/datasets/nuplan/") # 按你实际路径
-# log_path = "/nas/datasets/nuplan/nuplan-v1.1/splits/mini/2021.05.12.22.00.38_veh-35_01008_01518.db"
-
-# db = NuPlanDB(NUPLAN_DATA_ROOT, log_path, None)
-# # print(db.log)
-# print(db.log.map_version)
-# # print("log.cameras",db.log.cameras)
-# # print("Log name:", db.log_name)
-# # print("lidar",db.lidar_pc)
-# # print("scenario_tags", db.scenario_tag)
-# # print(db.log._session.query(EgoPose).order_by(func.abs(EgoPose.timestamp)).first())
-
-# # persp = Path("/nas/datasets/KITTI-360/calibration/perspective.txt")
-# # with open(persp, "r") as f:
-# # lines = [ln.strip() for ln in f if ln.strip()]
-# # print(lines)
-
-# from d123.dataset.dataset_specific.kitti_360.kitti_360_data_converter import get_kitti360_camera_metadata
-
-# print(get_kitti360_camera_metadata())
-
-
-
-# from d123.dataset.dataset_specific.kitti_360.kitti_360_data_converter import _read_timestamps
-# result = _read_timestamps("2013_05_28_drive_0000_sync")
-# print(len(result))
-# print([result[0].time_us])
\ No newline at end of file
diff --git a/notebooks/gym/jbwang_test.py b/notebooks/gym/jbwang_test.py
deleted file mode 100644
index 663e2899..00000000
--- a/notebooks/gym/jbwang_test.py
+++ /dev/null
@@ -1,180 +0,0 @@
-from d123.dataset.scene.scene_builder import ArrowSceneBuilder
-from d123.dataset.scene.scene_filter import SceneFilter
-
-from d123.common.multithreading.worker_sequential import Sequential
-# from d123.common.multithreading.worker_ray import RayDistributed
-
-import os, psutil
-
-from pathlib import Path
-from typing import Optional, Tuple
-
-import matplotlib.animation as animation
-import matplotlib.pyplot as plt
-from tqdm import tqdm
-
-from d123.common.datatypes.vehicle_state.ego_state import EgoStateSE2
-from d123.common.geometry.base import Point2D, StateSE2
-from d123.common.geometry.bounding_box.bounding_box import BoundingBoxSE2
-from d123.common.visualization.color.default import EGO_VEHICLE_CONFIG
-from d123.common.visualization.matplotlib.observation import (
- add_bounding_box_to_ax,
- add_box_detections_to_ax,
- add_default_map_on_ax,
- add_traffic_lights_to_ax,
- add_ego_vehicle_to_ax,
-)
-from d123.dataset.arrow.conversion import TrafficLightDetectionWrapper
-from d123.dataset.maps.abstract_map import AbstractMap
-from d123.common.datatypes.detection.detection import BoxDetectionWrapper
-from d123.dataset.scene.abstract_scene import AbstractScene
-import io
-from PIL import Image
-
-
-
-def _plot_scene_on_ax(
- ax: plt.Axes,
- map_api: AbstractMap,
- ego_state: EgoStateSE2,
- initial_ego_state: Optional[EgoStateSE2],
- box_detections: BoxDetectionWrapper,
- traffic_light_detections: TrafficLightDetectionWrapper,
- radius: float = 120,
-) -> plt.Axes:
-
- if initial_ego_state is not None:
- point_2d = initial_ego_state.center.point_2d
- else:
- point_2d = ego_state.center.point_2d
- add_default_map_on_ax(ax, map_api, point_2d, radius=radius)
- add_traffic_lights_to_ax(ax, traffic_light_detections, map_api)
-
- add_box_detections_to_ax(ax, box_detections)
- add_ego_vehicle_to_ax(ax, ego_state)
-
- ax.set_xlim(point_2d.x - radius, point_2d.x + radius)
- ax.set_ylim(point_2d.y - radius, point_2d.y + radius)
-
- ax.set_aspect("equal", adjustable="box")
- return ax
-
-
-def plot_scene_to_image(
- map_api: AbstractMap,
- ego_state: EgoStateSE2,
- initial_ego_state: Optional[EgoStateSE2],
- box_detections: BoxDetectionWrapper,
- traffic_light_detections: TrafficLightDetectionWrapper,
- radius: float = 120,
- figsize: Tuple[int, int] = (8, 8),
-) -> Image:
-
- fig, ax = plt.subplots(figsize=figsize)
- _plot_scene_on_ax(ax, map_api, ego_state, initial_ego_state, box_detections, traffic_light_detections, radius)
- ax.set_aspect("equal", adjustable="box")
- plt.subplots_adjust(left=0.05, right=0.95, top=0.95, bottom=0.05)
- # plt.tight_layout()
-
- buf = io.BytesIO()
- fig.savefig(buf, format="png", bbox_inches="tight")
- plt.close(fig)
- buf.seek(0)
- img = Image.open(buf)
- return img
-
-
-def print_memory_usage():
- process = psutil.Process(os.getpid())
- memory_info = process.memory_info()
- print(f"Memory usage: {memory_info.rss / 1024 ** 2:.2f} MB")
-
-
-split = "kitti360_detection_all_and_vel"
-scene_tokens = None
-log_names = None
-
-scene_filter = SceneFilter(
- split_names=[split], log_names=log_names, scene_tokens=scene_tokens, duration_s=15.1, history_s=1.0
-)
-scene_builder = ArrowSceneBuilder("/data/jbwang/d123/data2/")
-worker = Sequential()
-# worker = RayDistributed()
-scenes = scene_builder.get_scenes(scene_filter, worker)
-
-print(len(scenes))
-
-for scene in scenes[:10]:
- print(scene.log_name, scene.token)
-
-from d123.dataset.arrow.conversion import DetectionType
-from d123.simulation.gym.gym_env import GymEnvironment
-from d123.simulation.observation.agents_observation import _filter_agents_by_type
-
-import time
-
-images = []
-agent_rollouts = []
-plot: bool = True
-action = [1.0, -0.0] # Placeholder action, replace with actual action logic
-env = GymEnvironment(scenes)
-
-start = time.time()
-
-map_api, ego_state, detection_observation, current_scene = env.reset(scenes[1460])
-initial_ego_state = ego_state
-cars, _, _ = _filter_agents_by_type(detection_observation.box_detections, detection_types=[DetectionType.VEHICLE])
-agent_rollouts.append(BoxDetectionWrapper(cars))
-if plot:
- images.append(
- plot_scene_to_image(
- map_api,
- ego_state,
- initial_ego_state,
- detection_observation.box_detections,
- detection_observation.traffic_light_detections,
- )
- )
-
-
-for i in range(160):
- ego_state, detection_observation, end = env.step(action)
- cars, _, _ = _filter_agents_by_type(detection_observation.box_detections, detection_types=[DetectionType.VEHICLE])
- agent_rollouts.append(BoxDetectionWrapper(cars))
- if plot:
- images.append(
- plot_scene_to_image(
- map_api,
- ego_state,
- initial_ego_state,
- detection_observation.box_detections,
- detection_observation.traffic_light_detections,
- )
- )
- if end:
- print("End of scene reached.")
- break
-
-time_s = time.time() - start
-print(time_s)
-print(151/ time_s)
-
-import numpy as np
-
-
-def create_gif(images, output_path, duration=100):
- """
- Create a GIF from a list of PIL images.
-
- Args:
- images (list): List of PIL.Image objects.
- output_path (str): Path to save the GIF.
- duration (int): Duration between frames in milliseconds.
- """
- if images:
- print(len(images))
- images_p = [img.convert("P", palette=Image.ADAPTIVE) for img in images]
- images_p[0].save(output_path, save_all=True, append_images=images_p[1:], duration=duration, loop=0)
-
-
-create_gif(images, f"/data/jbwang/d123/data2/{split}_{current_scene.token}.gif", duration=20)
\ No newline at end of file
diff --git a/notebooks/jbwang_viz_test.py b/notebooks/jbwang_viz_test.py
deleted file mode 100644
index 73f05dbf..00000000
--- a/notebooks/jbwang_viz_test.py
+++ /dev/null
@@ -1,252 +0,0 @@
-# from typing import Tuple
-
-# import matplotlib.pyplot as plt
-
-# from nuplan.planning.utils.multithreading.worker_sequential import Sequential
-
-# from d123.dataset.scene.scene_builder import ArrowSceneBuilder
-# from d123.dataset.scene.scene_filter import SceneFilter
-# from d123.dataset.scene.abstract_scene import AbstractScene
-
-# from typing import Dict
-# from d123.common.datatypes.sensor.camera import CameraType
-# from d123.common.visualization.matplotlib.camera import add_camera_ax
-# from d123.common.visualization.matplotlib.camera import add_box_detections_to_camera_ax
-
-# # split = "nuplan_private_test"
-# # log_names = ["2021.09.29.17.35.58_veh-44_00066_00432"]
-
-
-
-
-# # splits = ["carla"]
-# splits = ["nuplan_private_test"]
-# # splits = ["wopd_train"]
-# # log_names = None
-
-
-
-# # splits = ["nuplan_private_test"]
-# log_names = None
-
-# scene_tokens = None
-
-# scene_filter = SceneFilter(
-# split_names=splits,
-# log_names=log_names,
-# scene_tokens=scene_tokens,
-# duration_s=19,
-# history_s=0.0,
-# timestamp_threshold_s=20,
-# shuffle=False,
-# camera_types=[CameraType.CAM_F0],
-# )
-# scene_builder = ArrowSceneBuilder("/data/jbwang/d123/data/")
-# worker = Sequential()
-# # worker = RayDistributed()
-# scenes = scene_builder.get_scenes(scene_filter, worker)
-
-# print(f"Found {len(scenes)} scenes")
-
-
-# from typing import List, Optional, Tuple
-# import matplotlib.pyplot as plt
-# import numpy as np
-# from d123.common.geometry.base import Point2D
-# from d123.common.visualization.color.color import BLACK, DARK_GREY, DARKER_GREY, LIGHT_GREY, NEW_TAB_10, TAB_10
-# from d123.common.visualization.color.config import PlotConfig
-# from d123.common.visualization.color.default import CENTERLINE_CONFIG, MAP_SURFACE_CONFIG, ROUTE_CONFIG
-# from d123.common.visualization.matplotlib.observation import (
-# add_box_detections_to_ax,
-# add_default_map_on_ax,
-# add_ego_vehicle_to_ax,
-# add_traffic_lights_to_ax,
-# )
-# from d123.common.visualization.matplotlib.utils import add_shapely_linestring_to_ax, add_shapely_polygon_to_ax
-# from d123.dataset.maps.abstract_map import AbstractMap
-# from d123.dataset.maps.abstract_map_objects import AbstractLane
-# from d123.dataset.maps.map_datatypes import MapLayer
-# from d123.dataset.scene.abstract_scene import AbstractScene
-
-
-# import shapely.geometry as geom
-
-# LEFT_CONFIG: PlotConfig = PlotConfig(
-# fill_color=TAB_10[2],
-# fill_color_alpha=1.0,
-# line_color=TAB_10[2],
-# line_color_alpha=0.5,
-# line_width=1.0,
-# line_style="-",
-# zorder=3,
-# )
-
-# RIGHT_CONFIG: PlotConfig = PlotConfig(
-# fill_color=TAB_10[3],
-# fill_color_alpha=1.0,
-# line_color=TAB_10[3],
-# line_color_alpha=0.5,
-# line_width=1.0,
-# line_style="-",
-# zorder=3,
-# )
-
-
-# LANE_CONFIG: PlotConfig = PlotConfig(
-# fill_color=BLACK,
-# fill_color_alpha=1.0,
-# line_color=BLACK,
-# line_color_alpha=0.0,
-# line_width=0.0,
-# line_style="-",
-# zorder=5,
-# )
-
-# ROAD_EDGE_CONFIG: PlotConfig = PlotConfig(
-# fill_color=DARKER_GREY.set_brightness(0.0),
-# fill_color_alpha=1.0,
-# line_color=DARKER_GREY.set_brightness(0.0),
-# line_color_alpha=1.0,
-# line_width=1.0,
-# line_style="-",
-# zorder=3,
-# )
-
-# ROAD_LINE_CONFIG: PlotConfig = PlotConfig(
-# fill_color=DARKER_GREY,
-# fill_color_alpha=1.0,
-# line_color=NEW_TAB_10[5],
-# line_color_alpha=1.0,
-# line_width=1.5,
-# line_style="-",
-# zorder=3,
-# )
-
-
-# def add_debug_map_on_ax(
-# ax: plt.Axes,
-# map_api: AbstractMap,
-# point_2d: Point2D,
-# radius: float,
-# route_lane_group_ids: Optional[List[int]] = None,
-# ) -> None:
-# layers: List[MapLayer] = [
-# MapLayer.LANE,
-# MapLayer.LANE_GROUP,
-# MapLayer.GENERIC_DRIVABLE,
-# MapLayer.CARPARK,
-# MapLayer.CROSSWALK,
-# MapLayer.INTERSECTION,
-# MapLayer.WALKWAY,
-# MapLayer.ROAD_EDGE,
-# MapLayer.ROAD_LINE,
-# ]
-# x_min, x_max = point_2d.x - radius, point_2d.x + radius
-# y_min, y_max = point_2d.y - radius, point_2d.y + radius
-# patch = geom.box(x_min, y_min, x_max, y_max)
-# map_objects_dict = map_api.query(geometry=patch, layers=layers, predicate="intersects")
-
-# done = False
-# for layer, map_objects in map_objects_dict.items():
-# for map_object in map_objects:
-# try:
-# if layer in [
-# # MapLayer.GENERIC_DRIVABLE,
-# # MapLayer.CARPARK,
-# # MapLayer.CROSSWALK,
-# # MapLayer.INTERSECTION,
-# # MapLayer.WALKWAY,
-# ]:
-# add_shapely_polygon_to_ax(ax, map_object.shapely_polygon, MAP_SURFACE_CONFIG[layer])
-
-# # if layer in [MapLayer.LANE_GROUP]:
-# # add_shapely_polygon_to_ax(ax, map_object.shapely_polygon, MAP_SURFACE_CONFIG[layer])
-
-# if layer in [MapLayer.LANE]:
-# map_object: AbstractLane
-# if map_object.right_lane is not None and map_object.left_lane is not None and not done:
-# add_shapely_polygon_to_ax(ax, map_object.shapely_polygon, LANE_CONFIG)
-# add_shapely_polygon_to_ax(ax, map_object.right_lane.shapely_polygon, RIGHT_CONFIG)
-# add_shapely_polygon_to_ax(ax, map_object.left_lane.shapely_polygon, LEFT_CONFIG)
-# done = True
-# else:
-# add_shapely_polygon_to_ax(ax, map_object.shapely_polygon, MAP_SURFACE_CONFIG[layer])
-
-
-# # add_shapely_linestring_to_ax(ax, map_object.right_boundary.linestring, RIGHT_CONFIG)
-# # add_shapely_linestring_to_ax(ax, map_object.left_boundary.linestring, LEFT_CONFIG)
-# # add_shapely_polygon_to_ax(ax, map_object.shapely_polygon, LANE_CONFIG)
-
-# # centroid = map_object.shapely_polygon.centroid
-# # ax.text(
-# # centroid.x,
-# # centroid.y,
-# # str(map_object.id),
-# # horizontalalignment="center",
-# # verticalalignment="center",
-# # fontsize=8,
-# # bbox=dict(facecolor="white", alpha=0.7, boxstyle="round,pad=0.2"),
-# # )
-# # if layer in [MapLayer.ROAD_EDGE]:
-# # add_shapely_linestring_to_ax(ax, map_object.polyline_3d.linestring, ROAD_EDGE_CONFIG)
-# # edge_lengths.append(map_object.polyline_3d.linestring.length)
-
-# if layer in [MapLayer.ROAD_LINE]:
-# line_type = int(map_object.road_line_type)
-# plt_config = PlotConfig(
-# fill_color=NEW_TAB_10[line_type % len(NEW_TAB_10)],
-# fill_color_alpha=1.0,
-# line_color=NEW_TAB_10[line_type % len(NEW_TAB_10)],
-# line_color_alpha=1.0,
-# line_width=1.5,
-# line_style="-",
-# zorder=3,
-# )
-# add_shapely_linestring_to_ax(ax, map_object.polyline_3d.linestring, plt_config)
-
-# except Exception:
-# import traceback
-
-# print(f"Error adding map object of type {layer.name} and id {map_object.id}")
-# traceback.print_exc()
-
-# ax.set_title(f"Map: {map_api.map_name}")
-
-
-# def _plot_scene_on_ax(ax: plt.Axes, scene: AbstractScene, iteration: int = 0, radius: float = 80) -> plt.Axes:
-
-# ego_vehicle_state = scene.get_ego_state_at_iteration(iteration)
-# box_detections = scene.get_box_detections_at_iteration(iteration)
-
-# point_2d = ego_vehicle_state.bounding_box.center.state_se2.point_2d
-# add_debug_map_on_ax(ax, scene.map_api, point_2d, radius=radius, route_lane_group_ids=None)
-# # add_default_map_on_ax(ax, scene.map_api, point_2d, radius=radius, route_lane_group_ids=None)
-# # add_traffic_lights_to_ax(ax, traffic_light_detections, scene.map_api)
-
-# add_box_detections_to_ax(ax, box_detections)
-# add_ego_vehicle_to_ax(ax, ego_vehicle_state)
-
-# zoom = 1.0
-# ax.set_xlim(point_2d.x - radius * zoom, point_2d.x + radius * zoom)
-# ax.set_ylim(point_2d.y - radius * zoom, point_2d.y + radius * zoom)
-
-# ax.set_aspect("equal", adjustable="box")
-# return ax
-
-
-# def plot_scene_at_iteration(
-# scene: AbstractScene, iteration: int = 0, radius: float = 80
-# ) -> Tuple[plt.Figure, plt.Axes]:
-
-# size = 15
-
-# fig, ax = plt.subplots(figsize=(size, size))
-# _plot_scene_on_ax(ax, scene, iteration, radius)
-# return fig, ax
-
-
-# scene_index = 1
-# fig, ax = plot_scene_at_iteration(scenes[scene_index], iteration=100, radius=100)
-
-# # fig.savefig(f"/home/daniel/scene_{scene_index}_iteration_1.pdf", dpi=300, bbox_inches="tight")
-
From b4d06bdcac7e58c891728d5289e13df1b345f825 Mon Sep 17 00:00:00 2001
From: jbwang <1159270049@qq.com>
Date: Thu, 28 Aug 2025 15:30:26 +0800
Subject: [PATCH 030/145] ready to push
---
d123/common/visualization/viser/server.py | 4 +-
.../kitti_360/kitti_360_data_converter.py | 51 ++++++++++---------
.../kitti_360/preprocess_detection.py | 2 +-
3 files changed, 29 insertions(+), 28 deletions(-)
diff --git a/d123/common/visualization/viser/server.py b/d123/common/visualization/viser/server.py
index 990a90dd..6cba5dd5 100644
--- a/d123/common/visualization/viser/server.py
+++ b/d123/common/visualization/viser/server.py
@@ -43,9 +43,9 @@
# VISUALIZE_CAMERA_FRUSTUM: List[CameraType] = [CameraType.CAM_F0, CameraType.CAM_L0, CameraType.CAM_R0]
# VISUALIZE_CAMERA_FRUSTUM: List[CameraType] = all_camera_types
-VISUALIZE_CAMERA_FRUSTUM: List[CameraType] = [CameraType.CAM_STEREO_L, CameraType.CAM_STEREO_R]
+VISUALIZE_CAMERA_FRUSTUM: List[CameraType] = [CameraType.CAM_STEREO_L]
# VISUALIZE_CAMERA_FRUSTUM: List[CameraType] = []
-VISUALIZE_CAMERA_GUI: List[CameraType] = [CameraType.CAM_F0]
+VISUALIZE_CAMERA_GUI: List[CameraType] = [CameraType.CAM_STEREO_L]
CAMERA_SCALE: float = 1.0
# Lidar config:
diff --git a/d123/dataset/dataset_specific/kitti_360/kitti_360_data_converter.py b/d123/dataset/dataset_specific/kitti_360/kitti_360_data_converter.py
index 2cc40675..1b967fca 100644
--- a/d123/dataset/dataset_specific/kitti_360/kitti_360_data_converter.py
+++ b/d123/dataset/dataset_specific/kitti_360/kitti_360_data_converter.py
@@ -56,11 +56,11 @@
DIR_POSES = "data_poses"
DIR_CALIB = "calibration"
-#TODO PATH_2D_RAW_ROOT
-# PATH_2D_RAW_ROOT: Path = KITTI360_DATA_ROOT / DIR_2D_RAW
-PATH_2D_RAW_ROOT: Path = KITTI360_DATA_ROOT
+PATH_2D_RAW_ROOT: Path = KITTI360_DATA_ROOT / DIR_2D_RAW
+# PATH_2D_RAW_ROOT: Path = KITTI360_DATA_ROOT
PATH_2D_SMT_ROOT: Path = KITTI360_DATA_ROOT / DIR_2D_SMT
PATH_3D_RAW_ROOT: Path = KITTI360_DATA_ROOT / DIR_3D_RAW
+# PATH_3D_RAW_ROOT: Path = Path("/data/jbwang/d123/data_3d_raw")
PATH_3D_SMT_ROOT: Path = KITTI360_DATA_ROOT / DIR_3D_SMT
PATH_3D_BBOX_ROOT: Path = KITTI360_DATA_ROOT / DIR_3D_BBOX
PATH_POSES_ROOT: Path = KITTI360_DATA_ROOT / DIR_POSES
@@ -406,7 +406,9 @@ def _write_recording_table(
write_arrow_table(recording_table, log_file_path)
def _read_timestamps(log_name: str) -> Optional[List[TimePoint]]:
- # unix
+ """
+ Read KITTI-360 timestamps for the given sequence and return Unix epoch timestamps.
+ """
ts_files = [
PATH_3D_RAW_ROOT / log_name / "velodyne_points" / "timestamps.txt",
PATH_2D_RAW_ROOT / log_name / "image_00" / "timestamps.txt",
@@ -449,10 +451,9 @@ def _extract_ego_state_all(log_name: str) -> List[List[float]]:
raise FileNotFoundError(f"Pose file not found: {pose_file}")
poses = np.loadtxt(pose_file)
poses_time = poses[:, 0] - 1 # Adjusting time to start from 0
-
- #TODO
- #oxts_path = PATH_POSES_ROOT / log_name / "oxts" / "data"
- oxts_path = Path("/data/jbwang/d123/data_poses/") / log_name / "oxts" / "data"
+
+ oxts_path = PATH_POSES_ROOT / log_name / "oxts" / "data"
+ # oxts_path = Path("/data/jbwang/d123/data_poses/") / log_name / "oxts" / "data"
pose_idx = 0
poses_time_len = len(poses_time)
@@ -632,29 +633,29 @@ def _extract_cameras(
elif cam_dir_name in ["image_02", "image_03"]:
img_path_png = PATH_2D_RAW_ROOT / log_name / cam_dir_name / "data_rgb" / f"{idx:010d}.png"
- if img_path_png.exists():
- cam2pose_txt = PATH_CALIB_ROOT / "calib_cam_to_pose.txt"
- if not cam2pose_txt.exists():
- raise FileNotFoundError(f"calib_cam_to_pose.txt file not found: {cam2pose_txt}")
-
- lastrow = np.array([0,0,0,1]).reshape(1,4)
-
- with open(cam2pose_txt, 'r') as f:
- for line in f:
- parts = line.strip().split()
- key = parts[0][:-1]
- if key == cam_dir_name:
- values = list(map(float, parts[1:]))
- matrix = np.array(values).reshape(3, 4)
- cam2pose = np.concatenate((matrix, lastrow))
- cam2pose = KITTI3602NUPLAN_IMU_CALIBRATION @ cam2pose
+ cam2pose_txt = PATH_CALIB_ROOT / "calib_cam_to_pose.txt"
+ if not cam2pose_txt.exists():
+ raise FileNotFoundError(f"calib_cam_to_pose.txt file not found: {cam2pose_txt}")
+
+ lastrow = np.array([0,0,0,1]).reshape(1,4)
+ with open(cam2pose_txt, 'r') as f:
+ for line in f:
+ parts = line.strip().split()
+ key = parts[0][:-1]
+ if key == cam_dir_name:
+ values = list(map(float, parts[1:]))
+ matrix = np.array(values).reshape(3, 4)
+ cam2pose = np.concatenate((matrix, lastrow))
+ cam2pose = KITTI3602NUPLAN_IMU_CALIBRATION @ cam2pose
+ if img_path_png.exists():
if data_converter_config.camera_store_option == "path":
camera_data = str(img_path_png), cam2pose.flatten().tolist()
elif data_converter_config.camera_store_option == "binary":
with open(img_path_png, "rb") as f:
camera_data = f.read(), cam2pose
else:
- raise FileNotFoundError(f"Camera image not found: {img_path_png}")
+ #TODO
+ camera_data = None, cam2pose.flatten().tolist()
camera_dict[camera_type] = camera_data
return camera_dict
diff --git a/d123/dataset/dataset_specific/kitti_360/preprocess_detection.py b/d123/dataset/dataset_specific/kitti_360/preprocess_detection.py
index e45e76d9..8b7c284f 100644
--- a/d123/dataset/dataset_specific/kitti_360/preprocess_detection.py
+++ b/d123/dataset/dataset_specific/kitti_360/preprocess_detection.py
@@ -163,7 +163,7 @@ def process_detection(
for obj in static_objs:
records.append(obj.valid_frames)
if output_dir is None:
- output_dir = PATH_3D_BBOX_ROOT / "preprocessed"
+ output_dir = PATH_3D_BBOX_ROOT / "preprocess"
output_dir.mkdir(parents=True, exist_ok=True)
out_path = output_dir / f"{log_name}_detection_preprocessed.pkl"
payload = {
From 493c02993ec34be3c6cef11c80186061af62deaa Mon Sep 17 00:00:00 2001
From: Daniel Dauner
Date: Thu, 28 Aug 2025 10:32:17 +0200
Subject: [PATCH 031/145] Make viser server not crash if cameras or lidar is
not available. (#49)
---
d123/common/visualization/viser/server.py | 45 ++--
d123/common/visualization/viser/utils.py | 19 +-
.../transform/transform_quaternion_se3.py | 237 ++++++++++++++++++
d123/geometry/utils/rotation_utils.py | 4 -
.../common/scene_filter/all_scenes.yaml | 5 +
5 files changed, 280 insertions(+), 30 deletions(-)
create mode 100644 d123/geometry/transform/transform_quaternion_se3.py
diff --git a/d123/common/visualization/viser/server.py b/d123/common/visualization/viser/server.py
index 686d3746..c9ce3601 100644
--- a/d123/common/visualization/viser/server.py
+++ b/d123/common/visualization/viser/server.py
@@ -1,6 +1,7 @@
import time
from typing import Dict, List, Literal
+import numpy as np
import trimesh
import viser
@@ -8,6 +9,7 @@
from d123.common.datatypes.sensor.lidar import LiDARType
from d123.common.visualization.viser.utils import (
get_bounding_box_meshes,
+ get_camera_if_available,
get_camera_values,
get_lidar_points,
get_map_meshes,
@@ -186,23 +188,26 @@ def _(_) -> None:
current_frame_handle = mew_frame_handle
for camera_type in VISUALIZE_CAMERA_GUI:
- if camera_type in scene.available_camera_types:
- camera_gui_handles[camera_type].image = scene.get_camera_at_iteration(
- gui_timestep.value, camera_type
- ).image
+ camera = get_camera_if_available(scene, camera_type, gui_timestep.value)
+ if camera is not None:
+ camera_gui_handles[camera_type].image = camera.image
for camera_type in VISUALIZE_CAMERA_FRUSTUM:
- if camera_type in scene.available_camera_types:
- camera_position, camera_quaternion, camera = get_camera_values(
- scene, camera_type, gui_timestep.value
- )
-
+ camera = get_camera_if_available(scene, camera_type, gui_timestep.value)
+ if camera is not None:
+ camera_position, camera_quaternion = get_camera_values(scene, camera, gui_timestep.value)
camera_frustum_handles[camera_type].position = camera_position.array
camera_frustum_handles[camera_type].wxyz = camera_quaternion.q
camera_frustum_handles[camera_type].image = camera.image
if LIDAR_AVAILABLE:
- points, colors = get_lidar_points(scene, gui_timestep.value, LIDAR_TYPES)
+ try:
+ points, colors = get_lidar_points(scene, gui_timestep.value, LIDAR_TYPES)
+ except Exception as e:
+ print(f"Error getting lidar points: {e}")
+ points = np.zeros((0, 3))
+ colors = np.zeros((0, 3))
+
gui_lidar.points = points
gui_lidar.colors = colors
@@ -221,19 +226,19 @@ def _(_) -> None:
camera_frustum_handles: Dict[CameraType, viser.CameraFrustumHandle] = {}
for camera_type in VISUALIZE_CAMERA_GUI:
- if camera_type in scene.available_camera_types:
+ camera = get_camera_if_available(scene, camera_type, gui_timestep.value)
+ if camera is not None:
with self.server.gui.add_folder(f"Camera {camera_type.serialize()}"):
camera_gui_handles[camera_type] = self.server.gui.add_image(
- image=scene.get_camera_at_iteration(gui_timestep.value, camera_type).image,
+ image=camera.image,
label=camera_type.serialize(),
format="jpeg",
)
for camera_type in VISUALIZE_CAMERA_FRUSTUM:
- if camera_type in scene.available_camera_types:
- camera_position, camera_quaternion, camera = get_camera_values(
- scene, camera_type, gui_timestep.value
- )
+ camera = get_camera_if_available(scene, camera_type, gui_timestep.value)
+ if camera is not None:
+ camera_position, camera_quaternion = get_camera_values(scene, camera, gui_timestep.value)
camera_frustum_handles[camera_type] = self.server.scene.add_camera_frustum(
f"camera_frustum_{camera_type.serialize()}",
fov=camera.metadata.fov_y,
@@ -245,7 +250,13 @@ def _(_) -> None:
)
if LIDAR_AVAILABLE:
- points, colors = get_lidar_points(scene, gui_timestep.value, LIDAR_TYPES)
+ try:
+ points, colors = get_lidar_points(scene, gui_timestep.value, LIDAR_TYPES)
+ except Exception as e:
+ print(f"Error getting lidar points: {e}")
+ points = np.zeros((0, 3))
+ colors = np.zeros((0, 3))
+
gui_lidar = self.server.scene.add_point_cloud(
name="LiDAR",
points=points,
diff --git a/d123/common/visualization/viser/utils.py b/d123/common/visualization/viser/utils.py
index ad49b88a..16717654 100644
--- a/d123/common/visualization/viser/utils.py
+++ b/d123/common/visualization/viser/utils.py
@@ -1,4 +1,4 @@
-from typing import List, Tuple
+from typing import List, Optional, Tuple
import numpy as np
import numpy.typing as npt
@@ -216,22 +216,23 @@ def _create_lane_mesh_from_boundary_arrays(
return mesh
-def get_camera_values(
- scene: AbstractScene, camera_type: CameraType, iteration: int
-) -> Tuple[Point3D, Quaternion, Camera]:
+def get_camera_if_available(scene: AbstractScene, camera_type: CameraType, iteration: int) -> Optional[Camera]:
+ camera: Optional[Camera] = None
+ if camera_type in scene.available_camera_types:
+ camera: Camera = scene.get_camera_at_iteration(iteration, camera_type)
+ return camera
+
+
+def get_camera_values(scene: AbstractScene, camera: Camera, iteration: int) -> Tuple[Point3D, Quaternion]:
initial_point_3d = scene.get_ego_state_at_iteration(0).center_se3.point_3d
rear_axle = scene.get_ego_state_at_iteration(iteration).rear_axle_se3
- camera = scene.get_camera_at_iteration(iteration, camera_type)
-
rear_axle_array = rear_axle.array
rear_axle_array[:3] -= initial_point_3d.array
rear_axle = StateSE3.from_array(rear_axle_array)
camera_to_ego = camera.extrinsic # 4x4 transformation from camera to ego frame
- # Get the rotation matrix of the rear axle pose
-
ego_transform = rear_axle.transformation_matrix
camera_transform = ego_transform @ camera_to_ego
@@ -240,7 +241,7 @@ def get_camera_values(
camera_position = Point3D(*camera_transform[:3, 3])
camera_rotation = Quaternion(matrix=camera_transform[:3, :3])
- return camera_position, camera_rotation, camera
+ return camera_position, camera_rotation
def get_lidar_points(
diff --git a/d123/geometry/transform/transform_quaternion_se3.py b/d123/geometry/transform/transform_quaternion_se3.py
new file mode 100644
index 00000000..9a74b601
--- /dev/null
+++ b/d123/geometry/transform/transform_quaternion_se3.py
@@ -0,0 +1,237 @@
+# TODO: Properly implement and test these functions
+
+# from typing import Union
+
+# import numpy as np
+# import numpy.typing as npt
+
+# from d123.geometry import Vector3D
+# from d123.geometry.geometry_index import Point3DIndex, QuaternionSE3Index, Vector3DIndex
+# from d123.geometry.se import QuaternionSE3
+
+
+# def translate_qse3_along_z(state_se3: QuaternionSE3, distance: float) -> QuaternionSE3:
+# """Translates a QuaternionSE3 state along the Z-axis.
+
+# :param state_se3: The QuaternionSE3 state to translate.
+# :param distance: The distance to translate along the Z-axis.
+# :return: The translated QuaternionSE3 state.
+# """
+# R = state_se3.rotation_matrix
+# z_axis = R[:, 2]
+
+# state_se3_array = state_se3.array.copy()
+# state_se3_array[QuaternionSE3Index.XYZ] += distance * z_axis[Vector3DIndex.XYZ]
+# return QuaternionSE3.from_array(state_se3_array, copy=False)
+
+
+# def translate_qse3_along_y(state_se3: QuaternionSE3, distance: float) -> QuaternionSE3:
+# """Translates a QuaternionSE3 state along the Y-axis.
+
+# :param state_se3: The QuaternionSE3 state to translate.
+# :param distance: The distance to translate along the Y-axis.
+# :return: The translated QuaternionSE3 state.
+# """
+# R = state_se3.rotation_matrix
+# y_axis = R[:, 1]
+
+# state_se3_array = state_se3.array.copy()
+# state_se3_array[QuaternionSE3Index.XYZ] += distance * y_axis[Vector3DIndex.XYZ]
+# return QuaternionSE3.from_array(state_se3_array, copy=False)
+
+
+# def translate_qse3_along_x(state_se3: QuaternionSE3, distance: float) -> QuaternionSE3:
+# """Translates a QuaternionSE3 state along the X-axis.
+
+# :param state_se3: The QuaternionSE3 state to translate.
+# :param distance: The distance to translate along the X-axis.
+# :return: The translated QuaternionSE3 state.
+# """
+# R = state_se3.rotation_matrix
+# x_axis = R[:, 0]
+
+# state_se3_array = state_se3.array.copy()
+# state_se3_array[QuaternionSE3Index.XYZ] += distance * x_axis[Vector3DIndex.XYZ]
+# return QuaternionSE3.from_array(state_se3_array, copy=False)
+
+
+# def translate_qse3_along_body_frame(state_se3: QuaternionSE3, vector_3d: Vector3D) -> QuaternionSE3:
+# """Translates a QuaternionSE3 state along a vector in the body frame.
+
+# :param state_se3: The QuaternionSE3 state to translate.
+# :param vector_3d: The vector to translate along in the body frame.
+# :return: The translated QuaternionSE3 state.
+# """
+# R = state_se3.rotation_matrix
+# world_translation = R @ vector_3d.array
+
+# state_se3_array = state_se3.array.copy()
+# state_se3_array[QuaternionSE3Index.XYZ] += world_translation
+# return QuaternionSE3.from_array(state_se3_array, copy=False)
+
+
+# def convert_absolute_to_relative_qse3_array(
+# origin: Union[QuaternionSE3, npt.NDArray[np.float64]], se3_array: npt.NDArray[np.float64]
+# ) -> npt.NDArray[np.float64]:
+# """Converts a QuaternionSE3 array from the absolute frame to the relative frame.
+
+# :param origin: The origin state in the absolute frame, as a QuaternionSE3 or np.ndarray [x,y,z,qw,qx,qy,qz].
+# :param se3_array: The QuaternionSE3 array in the absolute frame [N, 7].
+# :raises TypeError: If the origin is not a QuaternionSE3 or np.ndarray.
+# :return: The QuaternionSE3 array in the relative frame [N, 7].
+# """
+# if isinstance(origin, QuaternionSE3):
+# origin_ = origin
+# elif isinstance(origin, np.ndarray):
+# assert origin.ndim == 1 and origin.shape[-1] == len(QuaternionSE3Index)
+# origin_ = QuaternionSE3.from_array(origin)
+# else:
+# raise TypeError(f"Expected QuaternionSE3 or np.ndarray, got {type(origin)}")
+
+# assert se3_array.ndim >= 1
+# assert se3_array.shape[-1] == len(QuaternionSE3Index)
+
+# t_origin = origin_.point_3d.array
+# R_origin = origin_.rotation_matrix
+
+# # Extract absolute positions and quaternions
+# abs_quaternions = se3_array[..., QuaternionSE3Index.QUATERNION]
+# q_origin = origin_.quaternion
+
+# # Compute relative quaternions: q_rel = q_origin^-1 * q_abs
+# if abs_quaternions.ndim == 1:
+# rel_quaternions = _quaternion_multiply(_quaternion_multiply(q_origin), abs_quaternions)
+# else:
+# rel_quaternions = np.array([_quaternion_multiply(_quaternion_multiply(q_origin), q) for q in abs_quaternions])
+
+
+# # Prepare output array
+# rel_se3_array = np.zeros_like(se3_array)
+# rel_se3_array[..., QuaternionSE3Index.XYZ] = (se3_array[..., QuaternionSE3Index.XYZ] - t_origin) @ R_origin
+# rel_se3_array[..., QuaternionSE3Index.QUATERNION] = rel_quaternions
+
+# return rel_se3_array
+
+
+# def convert_relative_to_absolute_qse3_array(
+# origin: Union[QuaternionSE3, npt.NDArray[np.float64]], se3_array: npt.NDArray[np.float64]
+# ) -> npt.NDArray[np.float64]:
+# """Converts a QuaternionSE3 array from the relative frame to the absolute frame.
+
+# :param origin: The origin state in the absolute frame, as a QuaternionSE3 or np.ndarray [x,y,z,qw,qx,qy,qz].
+# :param se3_array: The QuaternionSE3 array in the relative frame [N, 7].
+# :raises TypeError: If the origin is not a QuaternionSE3 or np.ndarray.
+# :return: The QuaternionSE3 array in the absolute frame [N, 7].
+# """
+# if isinstance(origin, QuaternionSE3):
+# t_origin = origin.translation
+# q_origin = origin.quaternion
+# R_origin = origin.rotation_matrix
+# elif isinstance(origin, np.ndarray):
+# assert origin.ndim == 1 and origin.shape[-1] == 7
+# t_origin = origin[:3]
+# q_origin = origin[3:]
+# origin_quat_se3 = QuaternionSE3.from_array(origin)
+# R_origin = origin_quat_se3.rotation_matrix
+# else:
+# raise TypeError(f"Expected QuaternionSE3 or np.ndarray, got {type(origin)}")
+
+# assert se3_array.ndim >= 1
+# assert se3_array.shape[-1] == len(QuaternionSE3Index)
+
+# # Extract relative positions and quaternions
+# rel_positions = se3_array[..., QuaternionSE3Index.XYZ]
+# rel_quaternions = se3_array[..., QuaternionSE3Index.QUATERNION]
+
+# # Compute absolute positions: R_origin @ p_rel + t_origin
+# abs_positions = (R_origin @ rel_positions.T).T + t_origin
+
+# # Compute absolute quaternions: q_abs = q_origin * q_rel
+# if rel_quaternions.ndim == 1:
+# abs_quaternions = _quaternion_multiply(q_origin, rel_quaternions)
+# else:
+# abs_quaternions = np.array([_quaternion_multiply(q_origin, q) for q in rel_quaternions])
+
+# # Prepare output array
+# abs_se3_array = se3_array.copy()
+# abs_se3_array[..., :3] = abs_positions
+# abs_se3_array[..., 3:] = abs_quaternions
+
+# return abs_se3_array
+
+
+# def convert_absolute_to_relative_points_q3d_array(
+# origin: Union[QuaternionSE3, npt.NDArray[np.float64]], points_3d_array: npt.NDArray[np.float64]
+# ) -> npt.NDArray[np.float64]:
+# """Converts 3D points from the absolute frame to the relative frame.
+
+# :param origin: The origin state in the absolute frame, as a QuaternionSE3 or np.ndarray [x,y,z,qw,qx,qy,qz].
+# :param points_3d_array: The 3D points in the absolute frame [N, 3].
+# :raises TypeError: If the origin is not a QuaternionSE3 or np.ndarray.
+# :return: The 3D points in the relative frame [N, 3].
+# """
+# if isinstance(origin, QuaternionSE3):
+# t_origin = origin.point_3d.array
+# R_origin_inv = origin.rotation_matrix.T
+# elif isinstance(origin, np.ndarray):
+# assert origin.ndim == 1 and origin.shape[-1] == 7
+# t_origin = origin[:3]
+# origin_quat_se3 = QuaternionSE3.from_array(origin)
+# R_origin_inv = origin_quat_se3.rotation_matrix.T
+# else:
+# raise TypeError(f"Expected QuaternionSE3 or np.ndarray, got {type(origin)}")
+
+# assert points_3d_array.ndim >= 1
+# assert points_3d_array.shape[-1] == len(Point3DIndex)
+
+# # Transform points: R_origin^T @ (p_abs - t_origin)
+# relative_points = (points_3d_array - t_origin) @ R_origin_inv.T
+# return relative_points
+
+
+# def convert_relative_to_absolute_points_q3d_array(
+# origin: Union[QuaternionSE3, npt.NDArray[np.float64]], points_3d_array: npt.NDArray[np.float64]
+# ) -> npt.NDArray[np.float64]:
+# """Converts 3D points from the relative frame to the absolute frame.
+
+# :param origin: The origin state in the absolute frame, as a QuaternionSE3 or np.ndarray [x,y,z,qw,qx,qy,qz].
+# :param points_3d_array: The 3D points in the relative frame [N, 3].
+# :raises TypeError: If the origin is not a QuaternionSE3 or np.ndarray.
+# :return: The 3D points in the absolute frame [N, 3].
+# """
+# if isinstance(origin, QuaternionSE3):
+# t_origin = origin.point_3d.array
+# R_origin = origin.rotation_matrix
+# elif isinstance(origin, np.ndarray):
+# assert origin.ndim == 1 and origin.shape[-1] == len(QuaternionSE3Index)
+# t_origin = origin[QuaternionSE3Index.XYZ]
+# origin_quat_se3 = QuaternionSE3.from_array(origin)
+# R_origin = origin_quat_se3.rotation_matrix
+# else:
+# raise TypeError(f"Expected QuaternionSE3 or np.ndarray, got {type(origin)}")
+
+# assert points_3d_array.shape[-1] == 3
+
+# # Transform points: R_origin @ p_rel + t_origin
+# absolute_points = (R_origin @ points_3d_array.T).T + t_origin
+# return absolute_points
+
+
+# def _quaternion_multiply(q1: npt.NDArray[np.float64], q2: npt.NDArray[np.float64]) -> npt.NDArray[np.float64]:
+# """Multiply two quaternions [w, x, y, z].
+
+# :param q1: First quaternion [w, x, y, z].
+# :param q2: Second quaternion [w, x, y, z].
+# :return: Product quaternion [w, x, y, z].
+# """
+# w1, x1, y1, z1 = q1
+# w2, x2, y2, z2 = q2
+
+# return np.array(
+# [
+# w1 * w2 - x1 * x2 - y1 * y2 - z1 * z2,
+# w1 * x2 + x1 * w2 + y1 * z2 - z1 * y2,
+# w1 * y2 - x1 * z2 + y1 * w2 + z1 * x2,
+# w1 * z2 + x1 * y2 - y1 * x2 + z1 * w2,
+# ]
+# )
diff --git a/d123/geometry/utils/rotation_utils.py b/d123/geometry/utils/rotation_utils.py
index de12fc0f..da75c654 100644
--- a/d123/geometry/utils/rotation_utils.py
+++ b/d123/geometry/utils/rotation_utils.py
@@ -5,10 +5,6 @@
from d123.geometry.geometry_index import EulerAnglesIndex
-# TODO: move this somewhere else
-# TODO: Maybe rename wrap angle?
-# TODO: Add implementation for torch, jax, or whatever else is needed.
-
def normalize_angle(angle: Union[float, npt.NDArray[np.float64]]) -> Union[float, npt.NDArray[np.float64]]:
"""
diff --git a/d123/script/config/common/scene_filter/all_scenes.yaml b/d123/script/config/common/scene_filter/all_scenes.yaml
index d5e1b505..06ac76d5 100644
--- a/d123/script/config/common/scene_filter/all_scenes.yaml
+++ b/d123/script/config/common/scene_filter/all_scenes.yaml
@@ -13,3 +13,8 @@ ego_displacement_minimum_m: null
duration_s: 9.2
history_s: 3.0
+
+camera_types: null
+
+max_num_scenes: null
+shuffle: false
From 4c12d3d23012d44f7110036461e3939e70845e8f Mon Sep 17 00:00:00 2001
From: jbwang <1159270049@qq.com>
Date: Fri, 29 Aug 2025 11:06:06 +0800
Subject: [PATCH 032/145] add fisheyecamera
---
d123/common/datatypes/sensor/camera.py | 68 +++++++++++--------
.../kitti_360/kitti_360_data_converter.py | 6 +-
d123/dataset/scene/arrow_scene.py | 4 +-
3 files changed, 45 insertions(+), 33 deletions(-)
diff --git a/d123/common/datatypes/sensor/camera.py b/d123/common/datatypes/sensor/camera.py
index c2a33d9d..e6dc60d6 100644
--- a/d123/common/datatypes/sensor/camera.py
+++ b/d123/common/datatypes/sensor/camera.py
@@ -2,7 +2,7 @@
import json
from dataclasses import dataclass
-from typing import Any, Dict
+from typing import Any, Dict, Union
import numpy as np
import numpy.typing as npt
@@ -80,31 +80,6 @@ def fov_y(self) -> float:
return fov_y_rad
-def camera_metadata_dict_to_json(camera_metadata: Dict[CameraType, CameraMetadata]) -> Dict[str, Dict[str, Any]]:
- """
- Converts a dictionary of CameraMetadata to a JSON-serializable format.
- :param camera_metadata: Dictionary of CameraMetadata.
- :return: JSON-serializable dictionary.
- """
- camera_metadata_dict = {
- camera_type.serialize(): metadata.to_dict() for camera_type, metadata in camera_metadata.items()
- }
- return json.dumps(camera_metadata_dict)
-
-
-def camera_metadata_dict_from_json(json_dict: Dict[str, Dict[str, Any]]) -> Dict[CameraType, CameraMetadata]:
- """
- Converts a JSON-serializable dictionary back to a dictionary of CameraMetadata.
- :param json_dict: JSON-serializable dictionary.
- :return: Dictionary of CameraMetadata.
- """
- camera_metadata_dict = json.loads(json_dict)
- return {
- CameraType.deserialize(camera_type): CameraMetadata.from_dict(metadata)
- for camera_type, metadata in camera_metadata_dict.items()
- }
-
-#TODO Code Refactoring
@dataclass
class FisheyeMEICameraMetadata:
camera_type: CameraType
@@ -124,6 +99,18 @@ def to_dict(self) -> Dict[str, Any]:
"distortion": self.distortion.tolist() if self.distortion is not None else None,
"projection_parameters": self.projection_parameters.tolist() if self.projection_parameters is not None else None,
}
+
+ @classmethod
+ def from_dict(cls, json_dict: Dict[str, Any]) -> CameraMetadata:
+ # TODO: remove None types. Only a placeholder for now.
+ return cls(
+ camera_type=CameraType(json_dict["camera_type"]),
+ width=json_dict["width"],
+ height=json_dict["height"],
+ mirror_parameters=json_dict["mirror_parameters"],
+ distortion=np.array(json_dict["distortion"]) if json_dict["distortion"] is not None else None,
+ projection_parameters=np.array(json_dict["projection_parameters"]) if json_dict["projection_parameters"] is not None else None,
+ )
def cam2image(self, points_3d: npt.NDArray[np.float64]) -> npt.NDArray[np.float64]:
''' camera coordinate to image plane '''
@@ -151,7 +138,34 @@ def cam2image(self, points_3d: npt.NDArray[np.float64]) -> npt.NDArray[np.float6
y = gamma2*y + v0
return x, y, norm * points_3d[:,2] / np.abs(points_3d[:,2])
-
+
+def camera_metadata_dict_to_json(camera_metadata: Dict[CameraType, CameraMetadata]) -> Dict[str, Dict[str, Any]]:
+ """
+ Converts a dictionary of CameraMetadata to a JSON-serializable format.
+ :param camera_metadata: Dictionary of CameraMetadata.
+ :return: JSON-serializable dictionary.
+ """
+ camera_metadata_dict = {
+ camera_type.serialize(): metadata.to_dict() for camera_type, metadata in camera_metadata.items()
+ }
+ return json.dumps(camera_metadata_dict)
+
+
+def camera_metadata_dict_from_json(json_dict: Dict[str, Dict[str, Any]]) -> Dict[CameraType, Union[CameraMetadata, FisheyeMEICameraMetadata]]:
+ """
+ Converts a JSON-serializable dictionary back to a dictionary of CameraMetadata.
+ :param json_dict: JSON-serializable dictionary.
+ :return: Dictionary of CameraMetadata.
+ """
+ camera_metadata_dict = json.loads(json_dict)
+ out: Dict[CameraType, Union[CameraMetadata, FisheyeMEICameraMetadata]] = {}
+ for camera_type, metadata in camera_metadata_dict.items():
+ cam_type = CameraType.deserialize(camera_type)
+ if isinstance(metadata, dict) and "mirror_parameters" in metadata:
+ out[cam_type] = FisheyeMEICameraMetadata.from_dict(metadata)
+ else:
+ out[cam_type] = CameraMetadata.from_dict(metadata)
+ return out
@dataclass
class Camera:
diff --git a/d123/dataset/dataset_specific/kitti_360/kitti_360_data_converter.py b/d123/dataset/dataset_specific/kitti_360/kitti_360_data_converter.py
index 1b967fca..93a84c9e 100644
--- a/d123/dataset/dataset_specific/kitti_360/kitti_360_data_converter.py
+++ b/d123/dataset/dataset_specific/kitti_360/kitti_360_data_converter.py
@@ -43,9 +43,8 @@
KITTI360_CAMERA_TYPES = {
CameraType.CAM_STEREO_L: "image_00",
CameraType.CAM_STEREO_R: "image_01",
- # TODO need code refactoring to support fisheye cameras
- # CameraType.CAM_L1: "image_02",
- # CameraType.CAM_R1: "image_03",
+ CameraType.CAM_L1: "image_02",
+ CameraType.CAM_R1: "image_03",
}
DIR_2D_RAW = "data_2d_raw"
@@ -655,7 +654,6 @@ def _extract_cameras(
with open(img_path_png, "rb") as f:
camera_data = f.read(), cam2pose
else:
- #TODO
camera_data = None, cam2pose.flatten().tolist()
camera_dict[camera_type] = camera_data
return camera_dict
diff --git a/d123/dataset/scene/arrow_scene.py b/d123/dataset/scene/arrow_scene.py
index ecd68111..39d90c9c 100644
--- a/d123/dataset/scene/arrow_scene.py
+++ b/d123/dataset/scene/arrow_scene.py
@@ -6,7 +6,7 @@
from d123.common.datatypes.detection.detection import BoxDetectionWrapper, TrafficLightDetectionWrapper
from d123.common.datatypes.recording.detection_recording import DetectionRecording
-from d123.common.datatypes.sensor.camera import Camera, CameraMetadata, CameraType, camera_metadata_dict_from_json
+from d123.common.datatypes.sensor.camera import Camera, CameraMetadata, FisheyeMEICameraMetadata, CameraType, camera_metadata_dict_from_json
from d123.common.datatypes.sensor.lidar import LiDAR, LiDARMetadata, LiDARType, lidar_metadata_dict_from_json
from d123.common.datatypes.time.time_point import TimePoint
from d123.common.datatypes.vehicle_state.ego_state import EgoStateSE3
@@ -70,7 +70,7 @@ def __init__(
) = _get_scene_data(arrow_file_path)
self._metadata: LogMetadata = _metadata
self._vehicle_parameters: VehicleParameters = _vehicle_parameters
- self._camera_metadata: Dict[CameraType, CameraMetadata] = _camera_metadata
+ self._camera_metadata: Dict[CameraType, Union[CameraMetadata, FisheyeMEICameraMetadata]] = _camera_metadata
self._lidar_metadata: Dict[LiDARType, LiDARMetadata] = _lidar_metadata
self._map_api: Optional[AbstractMap] = None
From 551bed981ce38b7a56084429743b881070f230b7 Mon Sep 17 00:00:00 2001
From: Daniel Dauner
Date: Tue, 2 Sep 2025 16:43:14 +0200
Subject: [PATCH 033/145] Move nuplan specific dependencies as optional (#45)
---
pyproject.toml | 35 ++++++++++++++++-------------------
1 file changed, 16 insertions(+), 19 deletions(-)
diff --git a/pyproject.toml b/pyproject.toml
index dda67ef7..44061249 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -20,43 +20,28 @@ readme = "README.md"
requires-python = ">=3.9"
license = {text = "Apache-2.0"}
dependencies = [
- "aioboto3",
- "aiofiles",
"bokeh",
- "casadi",
- "control",
- "Fiona",
"geopandas",
- "guppy3",
"joblib",
"matplotlib",
- "nest_asyncio",
"numpy",
"opencv-python",
"pandas",
"Pillow",
"psutil",
"pyarrow",
- "pyinstrument",
"pyogrio",
"pyquaternion",
"pytest",
"rasterio",
"ray",
- "retry",
"rtree",
"scipy",
- "selenium",
"setuptools",
"shapely>=2.0.0",
- "SQLAlchemy==1.4.27",
- "sympy",
- "tornado",
"tqdm",
- "ujson",
"notebook",
"pre-commit",
- "cachetools",
"hydra_colorlog",
"hydra-core",
"lxml",
@@ -84,20 +69,32 @@ docs = [
]
nuplan = [
"nuplan-devkit @ git+https://github.com/motional/nuplan-devkit/@nuplan-devkit-v1.2",
+ "ujson",
+ "tornado",
+ "sympy",
+ "SQLAlchemy==1.4.27",
+ "selenium",
+ "nest_asyncio",
+ "cachetools",
+ "aioboto3",
+ "aiofiles",
+ "casadi",
+ "control",
+ "pyinstrument",
+ "Fiona",
+ "guppy3",
+ "retry",
]
waymo = [
"tensorflow==2.11.0",
"waymo-open-dataset-tf-2-11-0",
"intervaltree",
]
-av2 = [
- "av2==0.2.1",
-]
[tool.setuptools.packages.find]
where = ["."]
include = ["d123*"] # Only include d123 package
-exclude = ["notebooks*", "tests*", "docs*"] # Explicitly exclude notebooks
+exclude = ["notebooks*", "docs*"] # Explicitly exclude notebooks
[project.urls]
"Homepage" = "https://github.com/DanielDauner/d123"
From 3bcf0caacea61d263fe1be12d40140af21dc3d8a Mon Sep 17 00:00:00 2001
From: Daniel Dauner
Date: Tue, 2 Sep 2025 18:33:58 +0200
Subject: [PATCH 034/145] Add import errors for optional dependencies (#45)
---
d123/common/utils/dependencies.py | 18 ++++++
.../dataset_specific/nuplan/load_sensor.py | 12 ++--
.../nuplan/nuplan_data_converter.py | 18 +++---
.../wopd/wopd_data_converter.py | 8 ++-
requirements.txt | 58 -------------------
5 files changed, 39 insertions(+), 75 deletions(-)
create mode 100644 d123/common/utils/dependencies.py
delete mode 100644 requirements.txt
diff --git a/d123/common/utils/dependencies.py b/d123/common/utils/dependencies.py
new file mode 100644
index 00000000..d750061e
--- /dev/null
+++ b/d123/common/utils/dependencies.py
@@ -0,0 +1,18 @@
+from typing import List, Union
+
+
+def check_dependencies(modules: Union[str, List[str,]], optional_name: str) -> None:
+ """
+ Checks if the given modules can be imported, otherwise raises an ImportError with a message
+ :param modules: Module name or list of module names to check
+ :param optional_name: Name of the optional feature
+ :raises ImportError: If any of the modules cannot be imported
+ """
+ modules = modules if isinstance(modules, list) else [modules]
+ for module in modules:
+ try:
+ __import__(module)
+ except ImportError:
+ raise ImportError(
+ f"Missing '{module}'. Install with: `pip install d123[{optional_name}]` or `pip install -e .[{optional_name}]`"
+ )
diff --git a/d123/dataset/dataset_specific/nuplan/load_sensor.py b/d123/dataset/dataset_specific/nuplan/load_sensor.py
index 0bbdc406..3e0033b3 100644
--- a/d123/dataset/dataset_specific/nuplan/load_sensor.py
+++ b/d123/dataset/dataset_specific/nuplan/load_sensor.py
@@ -1,10 +1,13 @@
import io
from pathlib import Path
-from nuplan.database.utils.pointclouds.lidar import LidarPointCloud
+from d123.common.utils.dependencies import check_dependencies
from d123.common.datatypes.sensor.lidar import LiDAR, LiDARMetadata
+check_dependencies(["nuplan"], "nuplan")
+from nuplan.database.utils.pointclouds.lidar import LidarPointCloud
+
def load_nuplan_lidar_from_path(filepath: Path, lidar_metadata: LiDARMetadata) -> LiDAR:
assert filepath.exists(), f"LiDAR file not found: {filepath}"
@@ -12,10 +15,3 @@ def load_nuplan_lidar_from_path(filepath: Path, lidar_metadata: LiDARMetadata) -
buffer = io.BytesIO(fp.read())
return LiDAR(metadata=lidar_metadata, point_cloud=LidarPointCloud.from_buffer(buffer, "pcd").points)
-
-# def load_camera_from_path(filename: str, metadata: CameraMetadata) -> Camera:
-# camera_full_path = NUPLAN_DATA_ROOT / "nuplan-v1.1" / "sensor_blobs" / filename
-# assert camera_full_path.exists(), f"Camera file not found: {camera_full_path}"
-# img = Image.open(camera_full_path)
-# img.load()
-# return Camera(metadata=metadata, image=np.asarray(img, dtype=np.uint8))
diff --git a/d123/dataset/dataset_specific/nuplan/nuplan_data_converter.py b/d123/dataset/dataset_specific/nuplan/nuplan_data_converter.py
index f57a5f5e..b7b52e0b 100644
--- a/d123/dataset/dataset_specific/nuplan/nuplan_data_converter.py
+++ b/d123/dataset/dataset_specific/nuplan/nuplan_data_converter.py
@@ -10,14 +10,8 @@
import numpy as np
import pyarrow as pa
import yaml
-from nuplan.database.nuplan_db.nuplan_scenario_queries import get_cameras, get_images_from_lidar_tokens
-from nuplan.database.nuplan_db_orm.ego_pose import EgoPose
-from nuplan.database.nuplan_db_orm.lidar_box import LidarBox
-from nuplan.database.nuplan_db_orm.lidar_pc import LidarPc
-from nuplan.database.nuplan_db_orm.nuplandb import NuPlanDB
-from nuplan.planning.simulation.observation.observation_type import CameraChannel
from pyquaternion import Quaternion
-from sqlalchemy import func
+
import d123.dataset.dataset_specific.nuplan.utils as nuplan_utils
from d123.common.datatypes.detection.detection import TrafficLightStatus
@@ -32,6 +26,7 @@
rear_axle_se3_to_center_se3,
)
from d123.common.multithreading.worker_utils import WorkerPool, worker_map
+from d123.common.utils.dependencies import check_dependencies
from d123.dataset.arrow.helper import open_arrow_table, write_arrow_table
from d123.dataset.dataset_specific.nuplan.nuplan_map_conversion import MAP_LOCATIONS, NuPlanMapConverter
from d123.dataset.dataset_specific.raw_data_converter import DataConverterConfig, RawDataConverter
@@ -39,6 +34,15 @@
from d123.geometry import BoundingBoxSE3, BoundingBoxSE3Index, StateSE3, Vector3D, Vector3DIndex
from d123.geometry.utils.constants import DEFAULT_PITCH, DEFAULT_ROLL
+check_dependencies(["nuplan", "sqlalchemy"], "nuplan")
+from nuplan.database.nuplan_db.nuplan_scenario_queries import get_cameras, get_images_from_lidar_tokens
+from nuplan.database.nuplan_db_orm.ego_pose import EgoPose
+from nuplan.database.nuplan_db_orm.lidar_box import LidarBox
+from nuplan.database.nuplan_db_orm.lidar_pc import LidarPc
+from nuplan.database.nuplan_db_orm.nuplandb import NuPlanDB
+from nuplan.planning.simulation.observation.observation_type import CameraChannel
+from sqlalchemy import func
+
TARGET_DT: Final[float] = 0.1
NUPLAN_DT: Final[float] = 0.05
SORT_BY_TIMESTAMP: Final[bool] = True
diff --git a/d123/dataset/dataset_specific/wopd/wopd_data_converter.py b/d123/dataset/dataset_specific/wopd/wopd_data_converter.py
index 812d40c6..3e577a04 100644
--- a/d123/dataset/dataset_specific/wopd/wopd_data_converter.py
+++ b/d123/dataset/dataset_specific/wopd/wopd_data_converter.py
@@ -10,9 +10,8 @@
import numpy as np
import numpy.typing as npt
import pyarrow as pa
-import tensorflow as tf
from pyquaternion import Quaternion
-from waymo_open_dataset import dataset_pb2
+
from d123.common.datatypes.detection.detection_types import DetectionType
from d123.common.datatypes.sensor.camera import CameraMetadata, CameraType, camera_metadata_dict_to_json
@@ -21,6 +20,7 @@
from d123.common.datatypes.vehicle_state.ego_state import DynamicStateSE3, EgoStateSE3, EgoStateSE3Index
from d123.common.datatypes.vehicle_state.vehicle_parameters import get_wopd_chrysler_pacifica_parameters
from d123.common.multithreading.worker_utils import WorkerPool, worker_map
+from d123.common.utils.dependencies import check_dependencies
from d123.dataset.arrow.helper import open_arrow_table, write_arrow_table
from d123.dataset.dataset_specific.raw_data_converter import DataConverterConfig, RawDataConverter
from d123.dataset.dataset_specific.wopd.waymo_map_utils.wopd_map_utils import convert_wopd_map
@@ -30,6 +30,10 @@
from d123.geometry.transform.transform_se3 import convert_relative_to_absolute_se3_array
from d123.geometry.utils.constants import DEFAULT_PITCH, DEFAULT_ROLL
+check_dependencies(modules=["tensorflow", "waymo_open_dataset"], optional_name="waymo")
+import tensorflow as tf
+from waymo_open_dataset import dataset_pb2
+
# TODO: Make keep_polar_features an optional argument.
# With polar features, the lidar loading time is SIGNIFICANTLY higher.
diff --git a/requirements.txt b/requirements.txt
deleted file mode 100644
index 4c17c209..00000000
--- a/requirements.txt
+++ /dev/null
@@ -1,58 +0,0 @@
-# nuplan-devkit @ git+https://github.com/motional/nuplan-devkit/@nuplan-devkit-v1.2
-
-# # # nuplan requirements
-# aioboto3
-# aiofiles
-# bokeh
-# casadi
-# control
-# Fiona
-# geopandas
-# guppy3
-# joblib
-# matplotlib
-# nest_asyncio
-# numpy
-# opencv-python
-# pandas
-# Pillow
-# psutil
-# pyarrow
-# pyinstrument
-# pyogrio
-# pyquaternion
-# pytest
-# rasterio
-# ray
-# retry
-# rtree
-# scipy
-# selenium
-# setuptools
-# shapely>=2.0.0
-# SQLAlchemy==1.4.27
-# sympy
-# tornado
-# tqdm
-# ujson
-# notebook
-# pre-commit
-# cachetools
-
-# # hydra
-# hydra_colorlog
-# hydra-core
-
-# # d123
-# lxml
-# trimesh
-# viser
-# gym==0.17.2
-# gymnasium==0.26.3
-
-# # torch & lighting
-# torch==2.6.0
-# torchvision==0.21.0
-# lightning
-# tensorboard
-# protobuf==4.25.3
From 4241723b677a3c1908b3d0548411b32b5b18d0fd Mon Sep 17 00:00:00 2001
From: jbwang <1159270049@qq.com>
Date: Sun, 14 Sep 2025 16:04:11 +0800
Subject: [PATCH 035/145] refactor camera.py, create base CameraMetadata and
rename origin into PinholeCameraMetadata
---
d123/common/datatypes/sensor/camera.py | 38 +++++++++++-------
.../av2/av2_data_converter.py | 8 ++--
.../carla/carla_data_converter.py | 6 +--
.../kitti_360/kitti_360_data_converter.py | 40 +++++--------------
.../kitti_360/kitti_360_helper.py | 32 ++++++++++++++-
.../kitti_360/preprocess_detection.py | 10 ++---
.../nuplan/nuplan_data_converter.py | 10 ++---
.../wopd/wopd_data_converter.py | 8 ++--
d123/dataset/scene/arrow_scene.py | 4 +-
9 files changed, 87 insertions(+), 69 deletions(-)
diff --git a/d123/common/datatypes/sensor/camera.py b/d123/common/datatypes/sensor/camera.py
index e6dc60d6..a9cc209e 100644
--- a/d123/common/datatypes/sensor/camera.py
+++ b/d123/common/datatypes/sensor/camera.py
@@ -3,6 +3,7 @@
import json
from dataclasses import dataclass
from typing import Any, Dict, Union
+from abc import ABC, abstractmethod
import numpy as np
import numpy.typing as npt
@@ -26,13 +27,24 @@ class CameraType(SerialIntEnum):
CAM_STEREO_L = 8
CAM_STEREO_R = 9
-
@dataclass
-class CameraMetadata:
-
+class CameraMetadata(ABC):
camera_type: CameraType
width: int
height: int
+
+ @abstractmethod
+ def to_dict(self) -> Dict[str, Any]:
+ ...
+
+ @classmethod
+ @abstractmethod
+ def from_dict(cls, json_dict: Dict[str, Any]) -> CameraMetadata:
+ ...
+
+@dataclass
+class PinholeCameraMetadata(CameraMetadata):
+
intrinsic: npt.NDArray[np.float64] # 3x3 matrix # TODO: don't store matrix but values.
distortion: npt.NDArray[np.float64] # 5x1 vector # TODO: don't store matrix but values.
@@ -47,7 +59,7 @@ def to_dict(self) -> Dict[str, Any]:
}
@classmethod
- def from_dict(cls, json_dict: Dict[str, Any]) -> CameraMetadata:
+ def from_dict(cls, json_dict: Dict[str, Any]) -> PinholeCameraMetadata:
# TODO: remove None types. Only a placeholder for now.
return cls(
camera_type=CameraType(json_dict["camera_type"]),
@@ -81,11 +93,9 @@ def fov_y(self) -> float:
@dataclass
-class FisheyeMEICameraMetadata:
- camera_type: CameraType
- width: int
- height: int
- mirror_parameters: int
+class FisheyeMEICameraMetadata(CameraMetadata):
+
+ mirror_parameters: float
distortion: npt.NDArray[np.float64] # k1,k2,p1,p2
projection_parameters: npt.NDArray[np.float64] #gamma1,gamma2,u0,v0
@@ -101,7 +111,7 @@ def to_dict(self) -> Dict[str, Any]:
}
@classmethod
- def from_dict(cls, json_dict: Dict[str, Any]) -> CameraMetadata:
+ def from_dict(cls, json_dict: Dict[str, Any]) -> FisheyeMEICameraMetadata:
# TODO: remove None types. Only a placeholder for now.
return cls(
camera_type=CameraType(json_dict["camera_type"]),
@@ -151,26 +161,26 @@ def camera_metadata_dict_to_json(camera_metadata: Dict[CameraType, CameraMetadat
return json.dumps(camera_metadata_dict)
-def camera_metadata_dict_from_json(json_dict: Dict[str, Dict[str, Any]]) -> Dict[CameraType, Union[CameraMetadata, FisheyeMEICameraMetadata]]:
+def camera_metadata_dict_from_json(json_dict: Dict[str, Dict[str, Any]]) -> Dict[CameraType, Union[PinholeCameraMetadata, FisheyeMEICameraMetadata]]:
"""
Converts a JSON-serializable dictionary back to a dictionary of CameraMetadata.
:param json_dict: JSON-serializable dictionary.
:return: Dictionary of CameraMetadata.
"""
camera_metadata_dict = json.loads(json_dict)
- out: Dict[CameraType, Union[CameraMetadata, FisheyeMEICameraMetadata]] = {}
+ out: Dict[CameraType, Union[PinholeCameraMetadata, FisheyeMEICameraMetadata]] = {}
for camera_type, metadata in camera_metadata_dict.items():
cam_type = CameraType.deserialize(camera_type)
if isinstance(metadata, dict) and "mirror_parameters" in metadata:
out[cam_type] = FisheyeMEICameraMetadata.from_dict(metadata)
else:
- out[cam_type] = CameraMetadata.from_dict(metadata)
+ out[cam_type] = PinholeCameraMetadata.from_dict(metadata)
return out
@dataclass
class Camera:
- metadata: CameraMetadata
+ metadata: PinholeCameraMetadata
image: npt.NDArray[np.uint8]
extrinsic: npt.NDArray[np.float64] # 4x4 matrix
diff --git a/d123/dataset/dataset_specific/av2/av2_data_converter.py b/d123/dataset/dataset_specific/av2/av2_data_converter.py
index f5e5e44a..d1dace89 100644
--- a/d123/dataset/dataset_specific/av2/av2_data_converter.py
+++ b/d123/dataset/dataset_specific/av2/av2_data_converter.py
@@ -11,7 +11,7 @@
import pyarrow as pa
from pyquaternion import Quaternion
-from d123.common.datatypes.sensor.camera import CameraMetadata, CameraType, camera_metadata_dict_to_json
+from d123.common.datatypes.sensor.camera import PinholeCameraMetadata, CameraType, camera_metadata_dict_to_json
from d123.common.datatypes.sensor.lidar import LiDARMetadata, LiDARType, lidar_metadata_dict_to_json
from d123.common.datatypes.time.time_point import TimePoint
from d123.common.datatypes.vehicle_state.ego_state import DynamicStateSE3, EgoStateSE3, EgoStateSE3Index
@@ -234,17 +234,17 @@ def convert_av2_log_to_arrow(
return []
-def get_av2_camera_metadata(log_path: Path) -> Dict[CameraType, CameraMetadata]:
+def get_av2_camera_metadata(log_path: Path) -> Dict[CameraType, PinholeCameraMetadata]:
intrinsics_file = log_path / "calibration" / "intrinsics.feather"
intrinsics_df = pd.read_feather(intrinsics_file)
- camera_metadata: Dict[CameraType, CameraMetadata] = {}
+ camera_metadata: Dict[CameraType, PinholeCameraMetadata] = {}
for _, row in intrinsics_df.iterrows():
row = row.to_dict()
camera_type = AV2_CAMERA_TYPE_MAPPING[row["sensor_name"]]
- camera_metadata[camera_type] = CameraMetadata(
+ camera_metadata[camera_type] = PinholeCameraMetadata(
camera_type=camera_type,
width=row["width_px"],
height=row["height_px"],
diff --git a/d123/dataset/dataset_specific/carla/carla_data_converter.py b/d123/dataset/dataset_specific/carla/carla_data_converter.py
index c6ce3622..5dede534 100644
--- a/d123/dataset/dataset_specific/carla/carla_data_converter.py
+++ b/d123/dataset/dataset_specific/carla/carla_data_converter.py
@@ -11,7 +11,7 @@
import numpy as np
import pyarrow as pa
-from d123.common.datatypes.sensor.camera import CameraMetadata, CameraType, camera_metadata_dict_to_json
+from d123.common.datatypes.sensor.camera import PinholeCameraMetadata, CameraType, camera_metadata_dict_to_json
from d123.common.datatypes.sensor.lidar import LiDARMetadata, LiDARType, lidar_metadata_dict_to_json
from d123.common.datatypes.sensor.lidar_index import CarlaLidarIndex
from d123.common.datatypes.vehicle_state.ego_state import EgoStateSE3Index
@@ -247,7 +247,7 @@ def _get_metadata(location: str, log_name: str) -> LogMetadata:
)
-def get_carla_camera_metadata(first_log_dict: Dict[str, Any]) -> Dict[CameraType, CameraMetadata]:
+def get_carla_camera_metadata(first_log_dict: Dict[str, Any]) -> Dict[CameraType, PinholeCameraMetadata]:
# FIXME: This is a placeholder function to return camera metadata.
@@ -256,7 +256,7 @@ def get_carla_camera_metadata(first_log_dict: Dict[str, Any]) -> Dict[CameraType
dtype=np.float64,
)
camera_metadata = {
- CameraType.CAM_F0: CameraMetadata(
+ CameraType.CAM_F0: PinholeCameraMetadata(
camera_type=CameraType.CAM_F0,
width=1024,
height=512,
diff --git a/d123/dataset/dataset_specific/kitti_360/kitti_360_data_converter.py b/d123/dataset/dataset_specific/kitti_360/kitti_360_data_converter.py
index 93a84c9e..aee14883 100644
--- a/d123/dataset/dataset_specific/kitti_360/kitti_360_data_converter.py
+++ b/d123/dataset/dataset_specific/kitti_360/kitti_360_data_converter.py
@@ -22,7 +22,7 @@
from nuplan.planning.utils.multithreading.worker_utils import WorkerPool, worker_map
from d123.common.datatypes.detection.detection_types import DetectionType
-from d123.common.datatypes.sensor.camera import CameraMetadata, FisheyeMEICameraMetadata, CameraType, camera_metadata_dict_to_json
+from d123.common.datatypes.sensor.camera import PinholeCameraMetadata, FisheyeMEICameraMetadata, CameraType, camera_metadata_dict_to_json
from d123.common.datatypes.sensor.lidar import LiDARMetadata, LiDARType, lidar_metadata_dict_to_json
from d123.common.datatypes.sensor.lidar_index import Kitti360LidarIndex
from d123.common.datatypes.time.time_point import TimePoint
@@ -31,7 +31,7 @@
from d123.dataset.arrow.helper import open_arrow_table, write_arrow_table
from d123.dataset.dataset_specific.raw_data_converter import DataConverterConfig, RawDataConverter
from d123.dataset.logs.log_metadata import LogMetadata
-from d123.dataset.dataset_specific.kitti_360.kitti_360_helper import KITTI360Bbox3D, KITTI3602NUPLAN_IMU_CALIBRATION
+from d123.dataset.dataset_specific.kitti_360.kitti_360_helper import KITTI360Bbox3D, KITTI3602NUPLAN_IMU_CALIBRATION, get_lidar_extrinsic
from d123.dataset.dataset_specific.kitti_360.labels import KIITI360_DETECTION_NAME_DICT,kittiId2label
from d123.geometry import BoundingBoxSE3, BoundingBoxSE3Index, StateSE3, Vector3D, Vector3DIndex
@@ -55,8 +55,8 @@
DIR_POSES = "data_poses"
DIR_CALIB = "calibration"
-PATH_2D_RAW_ROOT: Path = KITTI360_DATA_ROOT / DIR_2D_RAW
-# PATH_2D_RAW_ROOT: Path = KITTI360_DATA_ROOT
+# PATH_2D_RAW_ROOT: Path = KITTI360_DATA_ROOT / DIR_2D_RAW
+PATH_2D_RAW_ROOT: Path = KITTI360_DATA_ROOT
PATH_2D_SMT_ROOT: Path = KITTI360_DATA_ROOT / DIR_2D_SMT
PATH_3D_RAW_ROOT: Path = KITTI360_DATA_ROOT / DIR_3D_RAW
# PATH_3D_RAW_ROOT: Path = Path("/data/jbwang/d123/data_3d_raw")
@@ -244,7 +244,7 @@ def convert_kitti360_log_to_arrow(
return []
-def get_kitti360_camera_metadata() -> Dict[CameraType, Union[CameraMetadata, FisheyeMEICameraMetadata]]:
+def get_kitti360_camera_metadata() -> Dict[CameraType, Union[PinholeCameraMetadata, FisheyeMEICameraMetadata]]:
persp = PATH_CALIB_ROOT / "perspective.txt"
@@ -270,10 +270,10 @@ def get_kitti360_camera_metadata() -> Dict[CameraType, Union[CameraMetadata, Fis
fisheye03 = _readYAMLFile(fisheye_camera03_path)
fisheye_result = {"image_02": fisheye02, "image_03": fisheye03}
- log_cam_infos: Dict[str, Union[CameraMetadata, FisheyeMEICameraMetadata]] = {}
+ log_cam_infos: Dict[str, Union[PinholeCameraMetadata, FisheyeMEICameraMetadata]] = {}
for cam_type, cam_name in KITTI360_CAMERA_TYPES.items():
if cam_name in ["image_00", "image_01"]:
- log_cam_infos[cam_type] = CameraMetadata(
+ log_cam_infos[cam_type] = PinholeCameraMetadata(
camera_type=cam_type,
width=persp_result[cam_name]["wh"][0],
height=persp_result[cam_name]["wh"][1],
@@ -324,28 +324,6 @@ def get_kitti360_lidar_metadata() -> Dict[LiDARType, LiDARMetadata]:
)
return metadata
-def get_lidar_extrinsic() -> np.ndarray:
- cam2pose_txt = PATH_CALIB_ROOT / "calib_cam_to_pose.txt"
- if not cam2pose_txt.exists():
- raise FileNotFoundError(f"calib_cam_to_pose.txt file not found: {cam2pose_txt}")
-
- cam2velo_txt = PATH_CALIB_ROOT / "calib_cam_to_velo.txt"
- if not cam2velo_txt.exists():
- raise FileNotFoundError(f"calib_cam_to_velo.txt file not found: {cam2velo_txt}")
-
- lastrow = np.array([0,0,0,1]).reshape(1,4)
-
- with open(cam2pose_txt, 'r') as f:
- image_00 = next(f)
- values = list(map(float, image_00.strip().split()[1:]))
- matrix = np.array(values).reshape(3, 4)
- cam2pose = np.concatenate((matrix, lastrow))
- cam2pose = KITTI3602NUPLAN_IMU_CALIBRATION @ cam2pose
-
- cam2velo = np.concatenate((np.loadtxt(cam2velo_txt).reshape(3,4), lastrow))
- extrinsic = cam2pose @ np.linalg.inv(cam2velo)
- return extrinsic
-
def _write_recording_table(
log_name: str,
recording_schema: pa.Schema,
@@ -451,8 +429,8 @@ def _extract_ego_state_all(log_name: str) -> List[List[float]]:
poses = np.loadtxt(pose_file)
poses_time = poses[:, 0] - 1 # Adjusting time to start from 0
- oxts_path = PATH_POSES_ROOT / log_name / "oxts" / "data"
- # oxts_path = Path("/data/jbwang/d123/data_poses/") / log_name / "oxts" / "data"
+ # oxts_path = PATH_POSES_ROOT / log_name / "oxts" / "data"
+ oxts_path = Path("/data/jbwang/d123/data_poses/") / log_name / "oxts" / "data"
pose_idx = 0
poses_time_len = len(poses_time)
diff --git a/d123/dataset/dataset_specific/kitti_360/kitti_360_helper.py b/d123/dataset/dataset_specific/kitti_360/kitti_360_helper.py
index 76e3c9e0..77217b5d 100644
--- a/d123/dataset/dataset_specific/kitti_360/kitti_360_helper.py
+++ b/d123/dataset/dataset_specific/kitti_360/kitti_360_helper.py
@@ -8,6 +8,13 @@
from d123.geometry import BoundingBoxSE3, StateSE3
from d123.dataset.dataset_specific.kitti_360.labels import kittiId2label
+import os
+from pathlib import Path
+
+KITTI360_DATA_ROOT = Path(os.environ["KITTI360_DATA_ROOT"])
+DIR_CALIB = "calibration"
+PATH_CALIB_ROOT: Path = KITTI360_DATA_ROOT / DIR_CALIB
+
DEFAULT_ROLL = 0.0
DEFAULT_PITCH = 0.0
@@ -162,4 +169,27 @@ def box_visible_in_point_cloud(self, points):
def load_detection_preprocess(self, records_dict: Dict[int, Any]):
if self.globalID in records_dict:
- self.valid_frames["records"] = records_dict[self.globalID]["records"]
\ No newline at end of file
+ self.valid_frames["records"] = records_dict[self.globalID]["records"]
+
+
+def get_lidar_extrinsic() -> np.ndarray:
+ cam2pose_txt = PATH_CALIB_ROOT / "calib_cam_to_pose.txt"
+ if not cam2pose_txt.exists():
+ raise FileNotFoundError(f"calib_cam_to_pose.txt file not found: {cam2pose_txt}")
+
+ cam2velo_txt = PATH_CALIB_ROOT / "calib_cam_to_velo.txt"
+ if not cam2velo_txt.exists():
+ raise FileNotFoundError(f"calib_cam_to_velo.txt file not found: {cam2velo_txt}")
+
+ lastrow = np.array([0,0,0,1]).reshape(1,4)
+
+ with open(cam2pose_txt, 'r') as f:
+ image_00 = next(f)
+ values = list(map(float, image_00.strip().split()[1:]))
+ matrix = np.array(values).reshape(3, 4)
+ cam2pose = np.concatenate((matrix, lastrow))
+ cam2pose = KITTI3602NUPLAN_IMU_CALIBRATION @ cam2pose
+
+ cam2velo = np.concatenate((np.loadtxt(cam2velo_txt).reshape(3,4), lastrow))
+ extrinsic = cam2pose @ np.linalg.inv(cam2velo)
+ return extrinsic
\ No newline at end of file
diff --git a/d123/dataset/dataset_specific/kitti_360/preprocess_detection.py b/d123/dataset/dataset_specific/kitti_360/preprocess_detection.py
index 8b7c284f..5827e779 100644
--- a/d123/dataset/dataset_specific/kitti_360/preprocess_detection.py
+++ b/d123/dataset/dataset_specific/kitti_360/preprocess_detection.py
@@ -25,13 +25,13 @@
DIR_3D_BBOX = "data_3d_bboxes"
DIR_POSES = "data_poses"
-PATH_3D_RAW_ROOT = KITTI360_DATA_ROOT / DIR_3D_RAW
+# PATH_3D_RAW_ROOT = KITTI360_DATA_ROOT / DIR_3D_RAW
+PATH_3D_RAW_ROOT = Path("/data/jbwang/d123/data_3d_raw/")
PATH_3D_BBOX_ROOT = KITTI360_DATA_ROOT / DIR_3D_BBOX
PATH_POSES_ROOT = KITTI360_DATA_ROOT / DIR_POSES
-from d123.dataset.dataset_specific.kitti_360.kitti_360_helper import KITTI360Bbox3D, KITTI3602NUPLAN_IMU_CALIBRATION
+from d123.dataset.dataset_specific.kitti_360.kitti_360_helper import KITTI360Bbox3D, KITTI3602NUPLAN_IMU_CALIBRATION, get_lidar_extrinsic
from d123.dataset.dataset_specific.kitti_360.labels import KIITI360_DETECTION_NAME_DICT, kittiId2label
-from d123.dataset.dataset_specific.kitti_360.kitti_360_data_converter import get_lidar_extrinsic
def _bbox_xml_path(log_name: str) -> Path:
return PATH_3D_BBOX_ROOT / "train" / f"{log_name}.xml"
@@ -178,9 +178,9 @@ def process_detection(
import argparse
logging.basicConfig(level=logging.INFO)
parser = argparse.ArgumentParser(description="Precompute KITTI-360 static detections filters")
- parser.add_argument("--log_name", default="2013_05_28_drive_0000_sync")
+ parser.add_argument("--log_name", default="2013_05_28_drive_0007_sync")
parser.add_argument("--radius", type=float, default=60.0)
- parser.add_argument("--out", type=Path, default=None, help="output directory for pkl")
+ parser.add_argument("--out", type=Path, default="detection_preprocess", help="output directory for pkl")
args = parser.parse_args()
process_detection(
log_name=args.log_name,
diff --git a/d123/dataset/dataset_specific/nuplan/nuplan_data_converter.py b/d123/dataset/dataset_specific/nuplan/nuplan_data_converter.py
index b7b52e0b..47e4bb02 100644
--- a/d123/dataset/dataset_specific/nuplan/nuplan_data_converter.py
+++ b/d123/dataset/dataset_specific/nuplan/nuplan_data_converter.py
@@ -16,7 +16,7 @@
import d123.dataset.dataset_specific.nuplan.utils as nuplan_utils
from d123.common.datatypes.detection.detection import TrafficLightStatus
from d123.common.datatypes.detection.detection_types import DetectionType
-from d123.common.datatypes.sensor.camera import CameraMetadata, CameraType, camera_metadata_dict_to_json
+from d123.common.datatypes.sensor.camera import PinholeCameraMetadata, CameraType, camera_metadata_dict_to_json
from d123.common.datatypes.sensor.lidar import LiDARMetadata, LiDARType, lidar_metadata_dict_to_json
from d123.common.datatypes.sensor.lidar_index import NuplanLidarIndex
from d123.common.datatypes.time.time_point import TimePoint
@@ -256,15 +256,15 @@ def convert_nuplan_log_to_arrow(
return []
-def get_nuplan_camera_metadata(log_path: Path) -> Dict[CameraType, CameraMetadata]:
+def get_nuplan_camera_metadata(log_path: Path) -> Dict[CameraType, PinholeCameraMetadata]:
- def _get_camera_metadata(camera_type: CameraType) -> CameraMetadata:
+ def _get_camera_metadata(camera_type: CameraType) -> PinholeCameraMetadata:
cam = list(get_cameras(log_path, [str(NUPLAN_CAMERA_TYPES[camera_type].value)]))[0]
intrinsic = np.array(pickle.loads(cam.intrinsic))
rotation = np.array(pickle.loads(cam.rotation))
rotation = Quaternion(rotation).rotation_matrix
distortion = np.array(pickle.loads(cam.distortion))
- return CameraMetadata(
+ return PinholeCameraMetadata(
camera_type=camera_type,
width=cam.width,
height=cam.height,
@@ -272,7 +272,7 @@ def _get_camera_metadata(camera_type: CameraType) -> CameraMetadata:
distortion=distortion,
)
- log_cam_infos: Dict[str, CameraMetadata] = {}
+ log_cam_infos: Dict[str, PinholeCameraMetadata] = {}
for camera_type in NUPLAN_CAMERA_TYPES.keys():
log_cam_infos[camera_type] = _get_camera_metadata(camera_type)
diff --git a/d123/dataset/dataset_specific/wopd/wopd_data_converter.py b/d123/dataset/dataset_specific/wopd/wopd_data_converter.py
index 3e577a04..2a0c6425 100644
--- a/d123/dataset/dataset_specific/wopd/wopd_data_converter.py
+++ b/d123/dataset/dataset_specific/wopd/wopd_data_converter.py
@@ -14,7 +14,7 @@
from d123.common.datatypes.detection.detection_types import DetectionType
-from d123.common.datatypes.sensor.camera import CameraMetadata, CameraType, camera_metadata_dict_to_json
+from d123.common.datatypes.sensor.camera import PinholeCameraMetadata, CameraType, camera_metadata_dict_to_json
from d123.common.datatypes.sensor.lidar import LiDARMetadata, LiDARType, lidar_metadata_dict_to_json
from d123.common.datatypes.sensor.lidar_index import WopdLidarIndex
from d123.common.datatypes.vehicle_state.ego_state import DynamicStateSE3, EgoStateSE3, EgoStateSE3Index
@@ -275,9 +275,9 @@ def convert_wopd_tfrecord_log_to_arrow(
def get_wopd_camera_metadata(
initial_frame: dataset_pb2.Frame, data_converter_config: DataConverterConfig
-) -> Dict[CameraType, CameraMetadata]:
+) -> Dict[CameraType, PinholeCameraMetadata]:
- cam_metadatas: Dict[CameraType, CameraMetadata] = {}
+ cam_metadatas: Dict[CameraType, PinholeCameraMetadata] = {}
if data_converter_config.camera_store_option is not None:
for calibration in initial_frame.context.camera_calibrations:
camera_type = WOPD_CAMERA_TYPES[calibration.name]
@@ -289,7 +289,7 @@ def get_wopd_camera_metadata(
_distortions = np.array([k1, k2, p1, p2, k3])
if camera_type in WOPD_CAMERA_TYPES.values():
- cam_metadatas[camera_type] = CameraMetadata(
+ cam_metadatas[camera_type] = PinholeCameraMetadata(
camera_type=camera_type,
width=calibration.width,
height=calibration.height,
diff --git a/d123/dataset/scene/arrow_scene.py b/d123/dataset/scene/arrow_scene.py
index 39d90c9c..6670f138 100644
--- a/d123/dataset/scene/arrow_scene.py
+++ b/d123/dataset/scene/arrow_scene.py
@@ -6,7 +6,7 @@
from d123.common.datatypes.detection.detection import BoxDetectionWrapper, TrafficLightDetectionWrapper
from d123.common.datatypes.recording.detection_recording import DetectionRecording
-from d123.common.datatypes.sensor.camera import Camera, CameraMetadata, FisheyeMEICameraMetadata, CameraType, camera_metadata_dict_from_json
+from d123.common.datatypes.sensor.camera import Camera, CameraMetadata, PinholeCameraMetadata, FisheyeMEICameraMetadata, CameraType, camera_metadata_dict_from_json
from d123.common.datatypes.sensor.lidar import LiDAR, LiDARMetadata, LiDARType, lidar_metadata_dict_from_json
from d123.common.datatypes.time.time_point import TimePoint
from d123.common.datatypes.vehicle_state.ego_state import EgoStateSE3
@@ -70,7 +70,7 @@ def __init__(
) = _get_scene_data(arrow_file_path)
self._metadata: LogMetadata = _metadata
self._vehicle_parameters: VehicleParameters = _vehicle_parameters
- self._camera_metadata: Dict[CameraType, Union[CameraMetadata, FisheyeMEICameraMetadata]] = _camera_metadata
+ self._camera_metadata: Dict[CameraType, Union[PinholeCameraMetadata, FisheyeMEICameraMetadata]] = _camera_metadata
self._lidar_metadata: Dict[LiDARType, LiDARMetadata] = _lidar_metadata
self._map_api: Optional[AbstractMap] = None
From c4bab5f9210b91def0e5b2c3cf8677fa01269747 Mon Sep 17 00:00:00 2001
From: Daniel Dauner
Date: Sun, 21 Sep 2025 21:25:03 +0200
Subject: [PATCH 036/145] Update transform functions for quaternion se3 case.
#43 #44
---
d123/geometry/geometry_index.py | 16 +
d123/geometry/test/test_transform.py | 105 ++++-
.../test/test_transform_quaternion.py | 193 ++++++++
.../transform/transform_quaternion_se3.py | 444 +++++++++---------
d123/geometry/transform/transform_se3.py | 23 +-
d123/geometry/utils/rotation_utils.py | 107 ++++-
6 files changed, 616 insertions(+), 272 deletions(-)
create mode 100644 d123/geometry/test/test_transform_quaternion.py
diff --git a/d123/geometry/geometry_index.py b/d123/geometry/geometry_index.py
index 5f56abd9..93ff4f3a 100644
--- a/d123/geometry/geometry_index.py
+++ b/d123/geometry/geometry_index.py
@@ -95,6 +95,14 @@ class QuaternionIndex(IntEnum):
QY = 2
QZ = 3
+ @classproperty
+ def SCALAR(cls) -> int:
+ return cls.QW
+
+ @classproperty
+ def VECTOR(cls) -> slice:
+ return slice(cls.QX, cls.QZ + 1)
+
class StateSE3Index(IntEnum):
"""
@@ -147,6 +155,14 @@ def XYZ(cls) -> slice:
def QUATERNION(cls) -> slice:
return slice(cls.QW, cls.QZ + 1)
+ @classproperty
+ def SCALAR(cls) -> slice:
+ return cls.QW
+
+ @classproperty
+ def VECTOR(cls) -> slice:
+ return slice(cls.QX, cls.QZ + 1)
+
class BoundingBoxSE2Index(IntEnum):
"""
diff --git a/d123/geometry/test/test_transform.py b/d123/geometry/test/test_transform.py
index 89161153..8c033803 100644
--- a/d123/geometry/test/test_transform.py
+++ b/d123/geometry/test/test_transform.py
@@ -18,6 +18,7 @@
from d123.geometry.transform.transform_se3 import (
convert_absolute_to_relative_points_3d_array,
convert_absolute_to_relative_se3_array,
+ convert_absolute_to_relative_se3_array,
convert_relative_to_absolute_points_3d_array,
convert_relative_to_absolute_se3_array,
translate_se3_along_body_frame,
@@ -537,7 +538,7 @@ class TestTransformConsistency(unittest.TestCase):
"""Tests to ensure consistency between different transformation functions."""
def setUp(self):
- self.decimal = 6 # Decimal places for np.testing.assert_array_almost_equal
+ self.decimal = 4 # Decimal places for np.testing.assert_array_almost_equal
self.num_consistency_tests = 10 # Number of random test cases for consistency checks
self.max_pose_xyz = 100.0
@@ -554,9 +555,10 @@ def _get_random_se3_array(self, size: int) -> npt.NDArray[np.float64]:
"""Generate a random SE3 poses"""
random_se3_array = np.zeros((size, len(StateSE3Index)), dtype=np.float64)
random_se3_array[:, StateSE3Index.XYZ] = np.random.uniform(-self.max_pose_xyz, self.max_pose_xyz, (size, 3))
- random_se3_array[:, StateSE3Index.EULER_ANGLES] = np.random.uniform(
- -np.pi, np.pi, (size, len(EulerAnglesIndex))
- )
+ random_se3_array[:, StateSE3Index.YAW] = np.random.uniform(-np.pi, np.pi, size)
+ random_se3_array[:, StateSE3Index.PITCH] = np.random.uniform(-np.pi / 2, np.pi / 2, size)
+ random_se3_array[:, StateSE3Index.ROLL] = np.random.uniform(-np.pi, np.pi, size)
+
return random_se3_array
def test_se2_absolute_relative_conversion_consistency(self) -> None:
@@ -790,6 +792,59 @@ def test_se2_se3_point_conversion_consistency(self) -> None:
absolute_3d_recovered[:, Point3DIndex.Z], np.zeros(num_points), decimal=self.decimal
)
+ def test_se2_se3_pose_conversion_consistency(self) -> None:
+ """Test that SE2 and SE3 pose conversions are consistent for 2D points embedded in 3D"""
+ for _ in range(self.num_consistency_tests):
+ # Create equivalent SE2 and SE3 reference poses
+ x = np.random.uniform(-10.0, 10.0)
+ y = np.random.uniform(-10.0, 10.0)
+ yaw = np.random.uniform(-np.pi, np.pi)
+
+ reference_se2 = StateSE2.from_array(np.array([x, y, yaw], dtype=np.float64))
+ reference_se3 = StateSE3.from_array(np.array([x, y, 0.0, 0.0, 0.0, yaw], dtype=np.float64))
+
+ # Generate 2D poses and embed them in 3D with z=0 and zero roll/pitch
+ num_poses = np.random.randint(1, 8)
+ pose_2d = self._get_random_se2_array(num_poses)
+ pose_3d = np.zeros((num_poses, len(StateSE3Index)), dtype=np.float64)
+ pose_3d[:, StateSE3Index.XY] = pose_2d[:, StateSE2Index.XY]
+ pose_3d[:, StateSE3Index.YAW] = pose_2d[:, StateSE2Index.YAW]
+
+ # Convert using SE2 functions
+ relative_se2 = convert_absolute_to_relative_se2_array(reference_se2, pose_2d)
+ absolute_se2_recovered = convert_relative_to_absolute_se2_array(reference_se2, relative_se2)
+
+ # Convert using SE3 functions
+ relative_se3 = convert_absolute_to_relative_se3_array(reference_se3, pose_3d)
+ absolute_se3_recovered = convert_relative_to_absolute_se3_array(reference_se3, relative_se3)
+
+ # Check that SE2 and SE3 conversions are consistent for the x,y components
+ np.testing.assert_array_almost_equal(
+ relative_se2[:, StateSE2Index.XY], relative_se3[:, StateSE3Index.XY], decimal=self.decimal
+ )
+ np.testing.assert_array_almost_equal(
+ absolute_se2_recovered[:, StateSE2Index.XY],
+ absolute_se3_recovered[:, StateSE3Index.XY],
+ decimal=self.decimal,
+ )
+ # Check that SE2 and SE3 conversions are consistent for the yaw component
+ np.testing.assert_array_almost_equal(
+ relative_se2[:, StateSE2Index.YAW], relative_se3[:, StateSE3Index.YAW], decimal=self.decimal
+ )
+ np.testing.assert_array_almost_equal(
+ absolute_se2_recovered[:, StateSE2Index.YAW],
+ absolute_se3_recovered[:, StateSE3Index.YAW],
+ decimal=self.decimal,
+ )
+
+ # Check that z-components remain zero
+ np.testing.assert_array_almost_equal(
+ relative_se3[:, Point3DIndex.Z], np.zeros(num_poses), decimal=self.decimal
+ )
+ np.testing.assert_array_almost_equal(
+ absolute_se3_recovered[:, Point3DIndex.Z], np.zeros(num_poses), decimal=self.decimal
+ )
+
def test_se2_array_translation_consistency(self) -> None:
"""Test that SE2 array translation is consistent with single pose translation"""
for _ in range(self.num_consistency_tests):
@@ -814,30 +869,30 @@ def test_se2_array_translation_consistency(self) -> None:
np.testing.assert_array_almost_equal(result_array, result_individual, decimal=self.decimal)
- def test_transform_empty_arrays(self) -> None:
- """Test that transform functions handle empty arrays correctly"""
- reference_se2 = StateSE2.from_array(np.array([1.0, 2.0, np.pi / 4], dtype=np.float64))
- reference_se3 = StateSE3.from_array(np.array([1.0, 2.0, 3.0, 0.1, 0.2, 0.3], dtype=np.float64))
+ # def test_transform_empty_arrays(self) -> None:
+ # """Test that transform functions handle empty arrays correctly"""
+ # reference_se2 = StateSE2.from_array(np.array([1.0, 2.0, np.pi / 4], dtype=np.float64))
+ # reference_se3 = StateSE3.from_array(np.array([1.0, 2.0, 3.0, 0.1, 0.2, 0.3], dtype=np.float64))
- # Test SE2 empty arrays
- empty_se2_poses = np.array([], dtype=np.float64).reshape(0, len(StateSE2Index))
- empty_2d_points = np.array([], dtype=np.float64).reshape(0, len(Point2DIndex))
+ # # Test SE2 empty arrays
+ # empty_se2_poses = np.array([], dtype=np.float64).reshape(0, len(StateSE2Index))
+ # empty_2d_points = np.array([], dtype=np.float64).reshape(0, len(Point2DIndex))
- result_se2_poses = convert_absolute_to_relative_se2_array(reference_se2, empty_se2_poses)
- result_2d_points = convert_absolute_to_relative_point_2d_array(reference_se2, empty_2d_points)
+ # result_se2_poses = convert_absolute_to_relative_se2_array(reference_se2, empty_se2_poses)
+ # result_2d_points = convert_absolute_to_relative_point_2d_array(reference_se2, empty_2d_points)
- self.assertEqual(result_se2_poses.shape, (0, len(StateSE2Index)))
- self.assertEqual(result_2d_points.shape, (0, len(Point2DIndex)))
+ # self.assertEqual(result_se2_poses.shape, (0, len(StateSE2Index)))
+ # self.assertEqual(result_2d_points.shape, (0, len(Point2DIndex)))
- # Test SE3 empty arrays
- empty_se3_poses = np.array([], dtype=np.float64).reshape(0, len(StateSE3Index))
- empty_3d_points = np.array([], dtype=np.float64).reshape(0, len(Point3DIndex))
+ # # Test SE3 empty arrays
+ # empty_se3_poses = np.array([], dtype=np.float64).reshape(0, len(StateSE3Index))
+ # empty_3d_points = np.array([], dtype=np.float64).reshape(0, len(Point3DIndex))
- result_se3_poses = convert_absolute_to_relative_se3_array(reference_se3, empty_se3_poses)
- result_3d_points = convert_absolute_to_relative_points_3d_array(reference_se3, empty_3d_points)
+ # result_se3_poses = convert_absolute_to_relative_se3_array(reference_se3, empty_se3_poses)
+ # result_3d_points = convert_absolute_to_relative_points_3d_array(reference_se3, empty_3d_points)
- self.assertEqual(result_se3_poses.shape, (0, len(StateSE3Index)))
- self.assertEqual(result_3d_points.shape, (0, len(Point3DIndex)))
+ # self.assertEqual(result_se3_poses.shape, (0, len(StateSE3Index)))
+ # self.assertEqual(result_3d_points.shape, (0, len(Point3DIndex)))
def test_transform_identity_operations(self) -> None:
"""Test that transforms with identity reference frames work correctly"""
@@ -864,7 +919,11 @@ def test_transform_identity_operations(self) -> None:
relative_se3_poses = convert_absolute_to_relative_se3_array(identity_se3, se3_poses)
relative_se3_points = convert_absolute_to_relative_points_3d_array(identity_se3, se3_points)
- np.testing.assert_array_almost_equal(se3_poses, relative_se3_poses, decimal=self.decimal)
+ np.testing.assert_array_almost_equal(
+ se3_poses[..., StateSE3Index.EULER_ANGLES],
+ relative_se3_poses[..., StateSE3Index.EULER_ANGLES],
+ decimal=self.decimal,
+ )
np.testing.assert_array_almost_equal(se3_points, relative_se3_points, decimal=self.decimal)
def test_transform_large_rotations(self) -> None:
diff --git a/d123/geometry/test/test_transform_quaternion.py b/d123/geometry/test/test_transform_quaternion.py
new file mode 100644
index 00000000..12c8433e
--- /dev/null
+++ b/d123/geometry/test/test_transform_quaternion.py
@@ -0,0 +1,193 @@
+import unittest
+
+import numpy as np
+import numpy.typing as npt
+
+from d123.geometry.geometry_index import (
+ # EulerAnglesIndex,
+ # Point2DIndex,
+ # Point3DIndex,
+ QuaternionSE3Index,
+ # StateSE2Index,
+ StateSE3Index,
+)
+from d123.geometry.point import Point3D
+from d123.geometry.rotation import EulerAngles, Quaternion
+from d123.geometry.se import StateSE3, QuaternionSE3
+from d123.geometry.transform.transform_quaternion_se3 import (
+ convert_absolute_to_relative_points_3d_array,
+ convert_absolute_to_relative_se3_array,
+ convert_relative_to_absolute_points_3d_array,
+ convert_relative_to_absolute_se3_array,
+ # translate_se3_along_body_frame,
+ # translate_se3_along_x,
+ # translate_se3_along_y,
+ # translate_se3_along_z,
+)
+import d123.geometry.transform.transform_se3 as euler_transform_se3
+from d123.geometry.utils.rotation_utils import (
+ get_rotation_matrices_from_euler_array,
+ get_rotation_matrices_from_quaternion_array,
+)
+
+# from d123.geometry.vector import Vector2D, Vector3D
+
+
+class TestTransformQuaternion(unittest.TestCase):
+
+ def setUp(self):
+ euler_se3_a = StateSE3(
+ x=1.0,
+ y=2.0,
+ z=3.0,
+ roll=np.deg2rad(90),
+ pitch=0.0,
+ yaw=0.0,
+ )
+ euler_se3_b = StateSE3(
+ x=1.0,
+ y=-2.0,
+ z=3.0,
+ roll=0.0,
+ pitch=np.deg2rad(90),
+ yaw=0.0,
+ )
+ euler_se3_c = StateSE3(
+ x=-1.0,
+ y=2.0,
+ z=-3.0,
+ roll=0.0,
+ pitch=0.0,
+ yaw=np.deg2rad(90),
+ )
+
+ quat_se3_a = euler_se3_a.quaternion_se3
+ quat_se3_b = euler_se3_b.quaternion_se3
+ quat_se3_c = euler_se3_c.quaternion_se3
+
+ self.euler_se3 = [euler_se3_a, euler_se3_b, euler_se3_c]
+ self.quat_se3 = [quat_se3_a, quat_se3_b, quat_se3_c]
+
+ self.max_pose_xyz = 100.0
+
+ def _get_random_euler_se3_array(self, size: int) -> npt.NDArray[np.float64]:
+ """Generate a random SE3 poses"""
+ random_se3_array = np.zeros((size, len(StateSE3Index)), dtype=np.float64)
+ random_se3_array[:, StateSE3Index.XYZ] = np.random.uniform(-self.max_pose_xyz, self.max_pose_xyz, (size, 3))
+ random_se3_array[:, StateSE3Index.YAW] = np.random.uniform(-np.pi, np.pi, size)
+ random_se3_array[:, StateSE3Index.PITCH] = np.random.uniform(-np.pi / 2, np.pi / 2, size)
+ random_se3_array[:, StateSE3Index.ROLL] = np.random.uniform(-np.pi, np.pi, size)
+
+ return random_se3_array
+
+ def _convert_euler_se3_array_to_quat_se3_array(
+ self, euler_se3_array: npt.NDArray[np.float64]
+ ) -> npt.NDArray[np.float64]:
+ """Convert an array of SE3 poses from Euler angles to Quaternion representation"""
+ quat_se3_array = np.zeros((euler_se3_array.shape[0], len(QuaternionSE3Index)), dtype=np.float64)
+ quat_se3_array[:, QuaternionSE3Index.XYZ] = euler_se3_array[:, StateSE3Index.XYZ]
+ rotation_matrices = get_rotation_matrices_from_euler_array(euler_se3_array[:, StateSE3Index.EULER_ANGLES])
+ for idx, rotation_matrix in enumerate(rotation_matrices):
+ quat = Quaternion.from_rotation_matrix(rotation_matrix)
+ quat_se3_array[idx, QuaternionSE3Index.QUATERNION] = quat.array
+ return quat_se3_array
+
+ def test_sanity(self):
+ for quat_se3, euler_se3 in zip(self.quat_se3, self.euler_se3):
+ for quat_se3, euler_se3 in zip(self.quat_se3, self.euler_se3):
+ np.testing.assert_allclose(
+ quat_se3.point_3d.array,
+ euler_se3.point_3d.array,
+ atol=1e-6,
+ )
+ np.testing.assert_allclose(
+ quat_se3.rotation_matrix,
+ euler_se3.rotation_matrix,
+ atol=1e-6,
+ )
+
+ def test_random_sanity(self):
+ for _ in range(10):
+ random_euler_se3_array = self._get_random_euler_se3_array(np.random.randint(1, 10))
+ random_quat_se3_array = self._convert_euler_se3_array_to_quat_se3_array(random_euler_se3_array)
+
+ np.testing.assert_allclose(
+ random_euler_se3_array[:, StateSE3Index.XYZ],
+ random_quat_se3_array[:, QuaternionSE3Index.XYZ],
+ atol=1e-6,
+ )
+ quat_rotation_matrices = get_rotation_matrices_from_quaternion_array(
+ random_quat_se3_array[:, QuaternionSE3Index.QUATERNION]
+ )
+ euler_rotation_matrices = get_rotation_matrices_from_euler_array(
+ random_euler_se3_array[:, StateSE3Index.EULER_ANGLES]
+ )
+ np.testing.assert_allclose(euler_rotation_matrices, quat_rotation_matrices, atol=1e-6)
+
+ def test_convert_absolute_to_relative_points_3d_array(self):
+
+ random_points_3d = np.random.rand(10, 3)
+ for quat_se3, euler_se3 in zip(self.quat_se3, self.euler_se3):
+ rel_points_quat = convert_absolute_to_relative_points_3d_array(quat_se3, random_points_3d)
+ rel_points_euler = euler_transform_se3.convert_absolute_to_relative_points_3d_array(
+ euler_se3, random_points_3d
+ )
+ np.testing.assert_allclose(rel_points_quat, rel_points_euler, atol=1e-6)
+
+ def test_convert_absolute_to_relative_se3_array(self):
+
+ for quat_se3, euler_se3 in zip(self.quat_se3, self.euler_se3):
+ random_euler_se3_array = self._get_random_euler_se3_array(np.random.randint(1, 10))
+ random_quat_se3_array = self._convert_euler_se3_array_to_quat_se3_array(random_euler_se3_array)
+
+ rel_se3_quat = convert_absolute_to_relative_se3_array(quat_se3, random_quat_se3_array)
+ rel_se3_euler = euler_transform_se3.convert_absolute_to_relative_se3_array(
+ euler_se3, random_euler_se3_array
+ )
+ np.testing.assert_allclose(
+ rel_se3_euler[..., StateSE3Index.XYZ], rel_se3_quat[..., QuaternionSE3Index.XYZ], atol=1e-6
+ )
+ # We compare rotation matrices to avoid issues with quaternion sign ambiguity
+ quat_rotation_matrices = get_rotation_matrices_from_quaternion_array(
+ rel_se3_quat[..., QuaternionSE3Index.QUATERNION]
+ )
+ euler_rotation_matrices = get_rotation_matrices_from_euler_array(
+ rel_se3_euler[..., StateSE3Index.EULER_ANGLES]
+ )
+ np.testing.assert_allclose(quat_rotation_matrices, euler_rotation_matrices, atol=1e-6)
+
+ def test_convert_relative_to_absolute_points_3d_array(self):
+
+ random_points_3d = np.random.rand(10, 3)
+ for quat_se3, euler_se3 in zip(self.quat_se3, self.euler_se3):
+ rel_points_quat = convert_relative_to_absolute_points_3d_array(quat_se3, random_points_3d)
+ rel_points_euler = euler_transform_se3.convert_relative_to_absolute_points_3d_array(
+ euler_se3, random_points_3d
+ )
+ np.testing.assert_allclose(rel_points_quat, rel_points_euler, atol=1e-6)
+
+ def test_convert_relative_to_absolute_se3_array(self):
+
+ for quat_se3, euler_se3 in zip(self.quat_se3, self.euler_se3):
+ random_euler_se3_array = self._get_random_euler_se3_array(np.random.randint(1, 10))
+ random_quat_se3_array = self._convert_euler_se3_array_to_quat_se3_array(random_euler_se3_array)
+
+ abs_se3_quat = convert_relative_to_absolute_se3_array(quat_se3, random_quat_se3_array)
+ abs_se3_euler = euler_transform_se3.convert_relative_to_absolute_se3_array(
+ euler_se3, random_euler_se3_array
+ )
+ np.testing.assert_allclose(
+ abs_se3_euler[..., StateSE3Index.XYZ], abs_se3_quat[..., QuaternionSE3Index.XYZ], atol=1e-6
+ )
+ # We compare rotation matrices to avoid issues with quaternion sign ambiguity
+ quat_rotation_matrices = get_rotation_matrices_from_quaternion_array(
+ abs_se3_quat[..., QuaternionSE3Index.QUATERNION]
+ )
+ euler_rotation_matrices = get_rotation_matrices_from_euler_array(
+ abs_se3_euler[..., StateSE3Index.EULER_ANGLES]
+ )
+ np.testing.assert_allclose(quat_rotation_matrices, euler_rotation_matrices, atol=1e-6)
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/d123/geometry/transform/transform_quaternion_se3.py b/d123/geometry/transform/transform_quaternion_se3.py
index 9a74b601..32a6858b 100644
--- a/d123/geometry/transform/transform_quaternion_se3.py
+++ b/d123/geometry/transform/transform_quaternion_se3.py
@@ -1,237 +1,213 @@
-# TODO: Properly implement and test these functions
+from typing import Union
+
+import numpy as np
+import numpy.typing as npt
+
+from d123.geometry import Vector3D
+from d123.geometry.geometry_index import Point3DIndex, QuaternionSE3Index, Vector3DIndex
+from d123.geometry.se import QuaternionSE3
+from d123.geometry.utils.rotation_utils import (
+ conjugate_quaternion_array,
+ get_rotation_matrix_from_quaternion_array,
+ multiply_quaternion_arrays,
+)
+
+
+def convert_absolute_to_relative_points_3d_array(
+ origin: Union[QuaternionSE3, npt.NDArray[np.float64]], points_3d_array: npt.NDArray[np.float64]
+) -> npt.NDArray[np.float64]:
+ """Converts 3D points from the absolute frame to the relative frame.
+
+ :param origin: The origin state in the absolute frame, as a StateSE3 or np.ndarray.
+ :param points_3d_array: The 3D points in the absolute frame.
+ :raises TypeError: If the origin is not a StateSE3 or np.ndarray.
+ :return: The 3D points in the relative frame, indexed by :class:`~d123.geometry.Point3DIndex`.
+ """
+
+ if isinstance(origin, QuaternionSE3):
+ t_origin = origin.point_3d.array
+ R_origin = origin.rotation_matrix
+ elif isinstance(origin, np.ndarray):
+ assert origin.ndim == 1 and origin.shape[-1] == len(QuaternionSE3Index)
+ t_origin = origin[QuaternionSE3Index.XYZ]
+ R_origin = get_rotation_matrix_from_quaternion_array(origin[QuaternionSE3Index.QUATERNION])
+ else:
+ raise TypeError(f"Expected StateSE3 or np.ndarray, got {type(origin)}")
+
+ assert points_3d_array.ndim >= 1
+ assert points_3d_array.shape[-1] == len(Point3DIndex)
+
+ # Translate points to origin frame, then rotate to body frame
+ relative_points = (points_3d_array - t_origin) @ R_origin
+ return relative_points
+
+
+def convert_absolute_to_relative_se3_array(
+ origin: Union[QuaternionSE3, npt.NDArray[np.float64]], se3_array: npt.NDArray[np.float64]
+) -> npt.NDArray[np.float64]:
+ """Converts an SE3 array from the absolute frame to the relative frame.
+
+ :param origin: The origin state in the absolute frame, as a StateSE3 or np.ndarray.
+ :param se3_array: The SE3 array in the absolute frame.
+ :raises TypeError: If the origin is not a StateSE3 or np.ndarray.
+ :return: The SE3 array in the relative frame, indexed by :class:`~d123.geometry.StateSE3Index`.
+ """
+ if isinstance(origin, QuaternionSE3):
+ origin_array = origin.array
+ t_origin = origin.point_3d.array
+ R_origin = origin.rotation_matrix
+ elif isinstance(origin, np.ndarray):
+ assert origin.ndim == 1 and origin.shape[-1] == len(QuaternionSE3Index)
+ origin_array = origin
+ t_origin = origin_array[QuaternionSE3Index.XYZ]
+ R_origin = get_rotation_matrix_from_quaternion_array(origin_array[QuaternionSE3Index.QUATERNION])
+ else:
+ raise TypeError(f"Expected StateSE3 or np.ndarray, got {type(origin)}")
+
+ assert se3_array.ndim >= 1
+ assert se3_array.shape[-1] == len(QuaternionSE3Index)
+
+ abs_positions = se3_array[..., QuaternionSE3Index.XYZ]
+ abs_quaternions = se3_array[..., QuaternionSE3Index.QUATERNION]
+
+ rel_se3_array = np.zeros_like(se3_array)
+
+ # 1. Vectorized relative position calculation: translate and rotate
+ rel_positions = (abs_positions - t_origin) @ R_origin
+ rel_se3_array[..., QuaternionSE3Index.XYZ] = rel_positions
+
+ # 2. Vectorized relative orientation calculation: quaternion multiplication with conjugate
+ q_origin_conj = conjugate_quaternion_array(origin_array[QuaternionSE3Index.QUATERNION])
+ rel_quaternions = multiply_quaternion_arrays(q_origin_conj, abs_quaternions)
+
+ rel_se3_array[..., QuaternionSE3Index.QUATERNION] = rel_quaternions
+
+ return rel_se3_array
+
+
+def convert_relative_to_absolute_points_3d_array(
+ origin: Union[QuaternionSE3, npt.NDArray[np.float64]], points_3d_array: npt.NDArray[np.float64]
+) -> npt.NDArray[np.float64]:
+ """Converts 3D points from the relative frame to the absolute frame.
+
+ :param origin: The origin state in the absolute frame, as a StateSE3 or np.ndarray.
+ :param points_3d_array: The 3D points in the relative frame, indexed by :class:`~d123.geometry.Point3DIndex`.
+ :raises TypeError: If the origin is not a StateSE3 or np.ndarray.
+ :return: The 3D points in the absolute frame, indexed by :class:`~d123.geometry.Point3DIndex`.
+ """
+ if isinstance(origin, QuaternionSE3):
+ t_origin = origin.point_3d.array
+ R_origin = origin.rotation_matrix
+ elif isinstance(origin, np.ndarray):
+ assert origin.ndim == 1 and origin.shape[-1] == len(QuaternionSE3Index)
+ t_origin = origin[QuaternionSE3Index.XYZ]
+ R_origin = get_rotation_matrix_from_quaternion_array(origin[QuaternionSE3Index.QUATERNION])
+ else:
+ raise TypeError(f"Expected QuaternionSE3 or np.ndarray, got {type(origin)}")
+
+ assert points_3d_array.shape[-1] == len(Point3DIndex)
+
+ absolute_points = points_3d_array @ R_origin.T + t_origin
+ return absolute_points
+
+
+def convert_relative_to_absolute_se3_array(
+ origin: QuaternionSE3, se3_array: npt.NDArray[np.float64]
+) -> npt.NDArray[np.float64]:
+ """Converts an SE3 array from the relative frame to the absolute frame.
+
+ :param origin: The origin state in the relative frame, as a StateSE3 or np.ndarray.
+ :param se3_array: The SE3 array in the relative frame.
+ :raises TypeError: If the origin is not a StateSE3 or np.ndarray.
+ :return: The SE3 array in the absolute frame, indexed by :class:`~d123.geometry.StateSE3Index`.
+ """
+
+ if isinstance(origin, QuaternionSE3):
+ origin_array = origin.array
+ t_origin = origin.point_3d.array
+ R_origin = origin.rotation_matrix
+ elif isinstance(origin, np.ndarray):
+ assert origin.ndim == 1 and origin.shape[-1] == len(QuaternionSE3Index)
+ origin_array = origin
+ t_origin = origin_array[QuaternionSE3Index.XYZ]
+ R_origin = get_rotation_matrix_from_quaternion_array(origin_array[QuaternionSE3Index.QUATERNION])
+ else:
+ raise TypeError(f"Expected QuaternionSE3 or np.ndarray, got {type(origin)}")
+
+ assert se3_array.ndim >= 1
+ assert se3_array.shape[-1] == len(QuaternionSE3Index)
+
+ # Extract relative positions and orientations
+ rel_positions = se3_array[..., QuaternionSE3Index.XYZ]
+ rel_quaternions = se3_array[..., QuaternionSE3Index.QUATERNION]
+
+ # Vectorized absolute position calculation: rotate and translate
+ abs_positions = (R_origin @ rel_positions.T).T + t_origin
+ abs_quaternions = multiply_quaternion_arrays(origin_array[QuaternionSE3Index.QUATERNION], rel_quaternions)
+
+ # Prepare output array
+ abs_se3_array = se3_array.copy()
+ abs_se3_array[..., QuaternionSE3Index.XYZ] = abs_positions
+ abs_se3_array[..., QuaternionSE3Index.QUATERNION] = abs_quaternions
+
+ return abs_se3_array
+
+
+def translate_se3_along_z(state_se3: QuaternionSE3, distance: float) -> QuaternionSE3:
+ """Translates a QuaternionSE3 state along the Z-axis.
+
+ :param state_se3: The QuaternionSE3 state to translate.
+ :param distance: The distance to translate along the Z-axis.
+ :return: The translated QuaternionSE3 state.
+ """
+ R = state_se3.rotation_matrix
+ z_axis = R[:, 2]
-# from typing import Union
+ state_se3_array = state_se3.array.copy()
+ state_se3_array[QuaternionSE3Index.XYZ] += distance * z_axis[Vector3DIndex.XYZ]
+ return QuaternionSE3.from_array(state_se3_array, copy=False)
-# import numpy as np
-# import numpy.typing as npt
-# from d123.geometry import Vector3D
-# from d123.geometry.geometry_index import Point3DIndex, QuaternionSE3Index, Vector3DIndex
-# from d123.geometry.se import QuaternionSE3
-
-
-# def translate_qse3_along_z(state_se3: QuaternionSE3, distance: float) -> QuaternionSE3:
-# """Translates a QuaternionSE3 state along the Z-axis.
-
-# :param state_se3: The QuaternionSE3 state to translate.
-# :param distance: The distance to translate along the Z-axis.
-# :return: The translated QuaternionSE3 state.
-# """
-# R = state_se3.rotation_matrix
-# z_axis = R[:, 2]
-
-# state_se3_array = state_se3.array.copy()
-# state_se3_array[QuaternionSE3Index.XYZ] += distance * z_axis[Vector3DIndex.XYZ]
-# return QuaternionSE3.from_array(state_se3_array, copy=False)
-
-
-# def translate_qse3_along_y(state_se3: QuaternionSE3, distance: float) -> QuaternionSE3:
-# """Translates a QuaternionSE3 state along the Y-axis.
-
-# :param state_se3: The QuaternionSE3 state to translate.
-# :param distance: The distance to translate along the Y-axis.
-# :return: The translated QuaternionSE3 state.
-# """
-# R = state_se3.rotation_matrix
-# y_axis = R[:, 1]
-
-# state_se3_array = state_se3.array.copy()
-# state_se3_array[QuaternionSE3Index.XYZ] += distance * y_axis[Vector3DIndex.XYZ]
-# return QuaternionSE3.from_array(state_se3_array, copy=False)
-
-
-# def translate_qse3_along_x(state_se3: QuaternionSE3, distance: float) -> QuaternionSE3:
-# """Translates a QuaternionSE3 state along the X-axis.
-
-# :param state_se3: The QuaternionSE3 state to translate.
-# :param distance: The distance to translate along the X-axis.
-# :return: The translated QuaternionSE3 state.
-# """
-# R = state_se3.rotation_matrix
-# x_axis = R[:, 0]
-
-# state_se3_array = state_se3.array.copy()
-# state_se3_array[QuaternionSE3Index.XYZ] += distance * x_axis[Vector3DIndex.XYZ]
-# return QuaternionSE3.from_array(state_se3_array, copy=False)
-
-
-# def translate_qse3_along_body_frame(state_se3: QuaternionSE3, vector_3d: Vector3D) -> QuaternionSE3:
-# """Translates a QuaternionSE3 state along a vector in the body frame.
-
-# :param state_se3: The QuaternionSE3 state to translate.
-# :param vector_3d: The vector to translate along in the body frame.
-# :return: The translated QuaternionSE3 state.
-# """
-# R = state_se3.rotation_matrix
-# world_translation = R @ vector_3d.array
-
-# state_se3_array = state_se3.array.copy()
-# state_se3_array[QuaternionSE3Index.XYZ] += world_translation
-# return QuaternionSE3.from_array(state_se3_array, copy=False)
-
-
-# def convert_absolute_to_relative_qse3_array(
-# origin: Union[QuaternionSE3, npt.NDArray[np.float64]], se3_array: npt.NDArray[np.float64]
-# ) -> npt.NDArray[np.float64]:
-# """Converts a QuaternionSE3 array from the absolute frame to the relative frame.
-
-# :param origin: The origin state in the absolute frame, as a QuaternionSE3 or np.ndarray [x,y,z,qw,qx,qy,qz].
-# :param se3_array: The QuaternionSE3 array in the absolute frame [N, 7].
-# :raises TypeError: If the origin is not a QuaternionSE3 or np.ndarray.
-# :return: The QuaternionSE3 array in the relative frame [N, 7].
-# """
-# if isinstance(origin, QuaternionSE3):
-# origin_ = origin
-# elif isinstance(origin, np.ndarray):
-# assert origin.ndim == 1 and origin.shape[-1] == len(QuaternionSE3Index)
-# origin_ = QuaternionSE3.from_array(origin)
-# else:
-# raise TypeError(f"Expected QuaternionSE3 or np.ndarray, got {type(origin)}")
-
-# assert se3_array.ndim >= 1
-# assert se3_array.shape[-1] == len(QuaternionSE3Index)
-
-# t_origin = origin_.point_3d.array
-# R_origin = origin_.rotation_matrix
-
-# # Extract absolute positions and quaternions
-# abs_quaternions = se3_array[..., QuaternionSE3Index.QUATERNION]
-# q_origin = origin_.quaternion
-
-# # Compute relative quaternions: q_rel = q_origin^-1 * q_abs
-# if abs_quaternions.ndim == 1:
-# rel_quaternions = _quaternion_multiply(_quaternion_multiply(q_origin), abs_quaternions)
-# else:
-# rel_quaternions = np.array([_quaternion_multiply(_quaternion_multiply(q_origin), q) for q in abs_quaternions])
-
-
-# # Prepare output array
-# rel_se3_array = np.zeros_like(se3_array)
-# rel_se3_array[..., QuaternionSE3Index.XYZ] = (se3_array[..., QuaternionSE3Index.XYZ] - t_origin) @ R_origin
-# rel_se3_array[..., QuaternionSE3Index.QUATERNION] = rel_quaternions
-
-# return rel_se3_array
-
-
-# def convert_relative_to_absolute_qse3_array(
-# origin: Union[QuaternionSE3, npt.NDArray[np.float64]], se3_array: npt.NDArray[np.float64]
-# ) -> npt.NDArray[np.float64]:
-# """Converts a QuaternionSE3 array from the relative frame to the absolute frame.
-
-# :param origin: The origin state in the absolute frame, as a QuaternionSE3 or np.ndarray [x,y,z,qw,qx,qy,qz].
-# :param se3_array: The QuaternionSE3 array in the relative frame [N, 7].
-# :raises TypeError: If the origin is not a QuaternionSE3 or np.ndarray.
-# :return: The QuaternionSE3 array in the absolute frame [N, 7].
-# """
-# if isinstance(origin, QuaternionSE3):
-# t_origin = origin.translation
-# q_origin = origin.quaternion
-# R_origin = origin.rotation_matrix
-# elif isinstance(origin, np.ndarray):
-# assert origin.ndim == 1 and origin.shape[-1] == 7
-# t_origin = origin[:3]
-# q_origin = origin[3:]
-# origin_quat_se3 = QuaternionSE3.from_array(origin)
-# R_origin = origin_quat_se3.rotation_matrix
-# else:
-# raise TypeError(f"Expected QuaternionSE3 or np.ndarray, got {type(origin)}")
-
-# assert se3_array.ndim >= 1
-# assert se3_array.shape[-1] == len(QuaternionSE3Index)
-
-# # Extract relative positions and quaternions
-# rel_positions = se3_array[..., QuaternionSE3Index.XYZ]
-# rel_quaternions = se3_array[..., QuaternionSE3Index.QUATERNION]
-
-# # Compute absolute positions: R_origin @ p_rel + t_origin
-# abs_positions = (R_origin @ rel_positions.T).T + t_origin
-
-# # Compute absolute quaternions: q_abs = q_origin * q_rel
-# if rel_quaternions.ndim == 1:
-# abs_quaternions = _quaternion_multiply(q_origin, rel_quaternions)
-# else:
-# abs_quaternions = np.array([_quaternion_multiply(q_origin, q) for q in rel_quaternions])
-
-# # Prepare output array
-# abs_se3_array = se3_array.copy()
-# abs_se3_array[..., :3] = abs_positions
-# abs_se3_array[..., 3:] = abs_quaternions
-
-# return abs_se3_array
-
-
-# def convert_absolute_to_relative_points_q3d_array(
-# origin: Union[QuaternionSE3, npt.NDArray[np.float64]], points_3d_array: npt.NDArray[np.float64]
-# ) -> npt.NDArray[np.float64]:
-# """Converts 3D points from the absolute frame to the relative frame.
-
-# :param origin: The origin state in the absolute frame, as a QuaternionSE3 or np.ndarray [x,y,z,qw,qx,qy,qz].
-# :param points_3d_array: The 3D points in the absolute frame [N, 3].
-# :raises TypeError: If the origin is not a QuaternionSE3 or np.ndarray.
-# :return: The 3D points in the relative frame [N, 3].
-# """
-# if isinstance(origin, QuaternionSE3):
-# t_origin = origin.point_3d.array
-# R_origin_inv = origin.rotation_matrix.T
-# elif isinstance(origin, np.ndarray):
-# assert origin.ndim == 1 and origin.shape[-1] == 7
-# t_origin = origin[:3]
-# origin_quat_se3 = QuaternionSE3.from_array(origin)
-# R_origin_inv = origin_quat_se3.rotation_matrix.T
-# else:
-# raise TypeError(f"Expected QuaternionSE3 or np.ndarray, got {type(origin)}")
-
-# assert points_3d_array.ndim >= 1
-# assert points_3d_array.shape[-1] == len(Point3DIndex)
-
-# # Transform points: R_origin^T @ (p_abs - t_origin)
-# relative_points = (points_3d_array - t_origin) @ R_origin_inv.T
-# return relative_points
-
-
-# def convert_relative_to_absolute_points_q3d_array(
-# origin: Union[QuaternionSE3, npt.NDArray[np.float64]], points_3d_array: npt.NDArray[np.float64]
-# ) -> npt.NDArray[np.float64]:
-# """Converts 3D points from the relative frame to the absolute frame.
-
-# :param origin: The origin state in the absolute frame, as a QuaternionSE3 or np.ndarray [x,y,z,qw,qx,qy,qz].
-# :param points_3d_array: The 3D points in the relative frame [N, 3].
-# :raises TypeError: If the origin is not a QuaternionSE3 or np.ndarray.
-# :return: The 3D points in the absolute frame [N, 3].
-# """
-# if isinstance(origin, QuaternionSE3):
-# t_origin = origin.point_3d.array
-# R_origin = origin.rotation_matrix
-# elif isinstance(origin, np.ndarray):
-# assert origin.ndim == 1 and origin.shape[-1] == len(QuaternionSE3Index)
-# t_origin = origin[QuaternionSE3Index.XYZ]
-# origin_quat_se3 = QuaternionSE3.from_array(origin)
-# R_origin = origin_quat_se3.rotation_matrix
-# else:
-# raise TypeError(f"Expected QuaternionSE3 or np.ndarray, got {type(origin)}")
-
-# assert points_3d_array.shape[-1] == 3
-
-# # Transform points: R_origin @ p_rel + t_origin
-# absolute_points = (R_origin @ points_3d_array.T).T + t_origin
-# return absolute_points
-
-
-# def _quaternion_multiply(q1: npt.NDArray[np.float64], q2: npt.NDArray[np.float64]) -> npt.NDArray[np.float64]:
-# """Multiply two quaternions [w, x, y, z].
-
-# :param q1: First quaternion [w, x, y, z].
-# :param q2: Second quaternion [w, x, y, z].
-# :return: Product quaternion [w, x, y, z].
-# """
-# w1, x1, y1, z1 = q1
-# w2, x2, y2, z2 = q2
-
-# return np.array(
-# [
-# w1 * w2 - x1 * x2 - y1 * y2 - z1 * z2,
-# w1 * x2 + x1 * w2 + y1 * z2 - z1 * y2,
-# w1 * y2 - x1 * z2 + y1 * w2 + z1 * x2,
-# w1 * z2 + x1 * y2 - y1 * x2 + z1 * w2,
-# ]
-# )
+def translate_se3_along_y(state_se3: QuaternionSE3, distance: float) -> QuaternionSE3:
+ """Translates a QuaternionSE3 state along the Y-axis.
+
+ :param state_se3: The QuaternionSE3 state to translate.
+ :param distance: The distance to translate along the Y-axis.
+ :return: The translated QuaternionSE3 state.
+ """
+ R = state_se3.rotation_matrix
+ y_axis = R[:, 1]
+
+ state_se3_array = state_se3.array.copy()
+ state_se3_array[QuaternionSE3Index.XYZ] += distance * y_axis[Vector3DIndex.XYZ]
+ return QuaternionSE3.from_array(state_se3_array, copy=False)
+
+
+def translate_se3_along_x(state_se3: QuaternionSE3, distance: float) -> QuaternionSE3:
+ """Translates a QuaternionSE3 state along the X-axis.
+
+ :param state_se3: The QuaternionSE3 state to translate.
+ :param distance: The distance to translate along the X-axis.
+ :return: The translated QuaternionSE3 state.
+ """
+ R = state_se3.rotation_matrix
+ x_axis = R[:, 0]
+
+ state_se3_array = state_se3.array.copy()
+ state_se3_array[QuaternionSE3Index.XYZ] += distance * x_axis[Vector3DIndex.XYZ]
+ return QuaternionSE3.from_array(state_se3_array, copy=False)
+
+
+def translate_se3_along_body_frame(state_se3: QuaternionSE3, vector_3d: Vector3D) -> QuaternionSE3:
+ """Translates a QuaternionSE3 state along a vector in the body frame.
+
+ :param state_se3: The QuaternionSE3 state to translate.
+ :param vector_3d: The vector to translate along in the body frame.
+ :return: The translated QuaternionSE3 state.
+ """
+ R = state_se3.rotation_matrix
+ world_translation = R @ vector_3d.array
+
+ state_se3_array = state_se3.array.copy()
+ state_se3_array[QuaternionSE3Index.XYZ] += world_translation
+ return QuaternionSE3.from_array(state_se3_array, copy=False)
diff --git a/d123/geometry/transform/transform_se3.py b/d123/geometry/transform/transform_se3.py
index abed7552..affb70a3 100644
--- a/d123/geometry/transform/transform_se3.py
+++ b/d123/geometry/transform/transform_se3.py
@@ -7,6 +7,7 @@
from d123.geometry.geometry_index import Point3DIndex, Vector3DIndex
from d123.geometry.rotation import EulerAngles
from d123.geometry.utils.rotation_utils import (
+ get_rotation_matrices_from_euler_array,
get_rotation_matrix_from_euler_array,
normalize_angle,
)
@@ -101,18 +102,16 @@ def convert_absolute_to_relative_se3_array(
assert se3_array.ndim >= 1
assert se3_array.shape[-1] == len(StateSE3Index)
- # Extract positions and orientations from se3_array
abs_positions = se3_array[..., StateSE3Index.XYZ]
- abs_euler_angles = se3_array[..., StateSE3Index.EULER_ANGLES]
+ abs_rotation_matrices = get_rotation_matrices_from_euler_array(se3_array[..., StateSE3Index.EULER_ANGLES])
+
+ # Convert absolute rotation matrices to relative rotation matrices
+ rel_rotation_matrices = np.einsum("ij,...jk->...ik", R_origin.T, abs_rotation_matrices)
+ rel_euler_angles = np.array([EulerAngles.from_rotation_matrix(R).array for R in rel_rotation_matrices])
# Vectorized relative position calculation
rel_positions = (abs_positions - t_origin) @ R_origin
- # Convert back to Euler angles (this may need a custom function)
- # For now, using simple subtraction as approximation (this is incorrect for general rotations)
- origin_euler = origin_array[StateSE3Index.EULER_ANGLES]
- rel_euler_angles = abs_euler_angles - origin_euler
-
# Prepare output array
rel_se3_array = se3_array.copy()
rel_se3_array[..., StateSE3Index.XYZ] = rel_positions
@@ -149,14 +148,14 @@ def convert_relative_to_absolute_se3_array(
# Extract relative positions and orientations
rel_positions = se3_array[..., StateSE3Index.XYZ]
- rel_euler_angles = se3_array[..., StateSE3Index.EULER_ANGLES]
+ rel_rotation_matrices = get_rotation_matrices_from_euler_array(se3_array[..., StateSE3Index.EULER_ANGLES])
# Vectorized absolute position calculation: rotate and translate
- abs_positions = (R_origin @ rel_positions.T).T + t_origin
+ abs_positions = (rel_positions @ R_origin.T) + t_origin
- # Vectorized absolute orientation: add origin's euler angles
- origin_euler = np.array([origin.roll, origin.pitch, origin.yaw], dtype=np.float64)
- abs_euler_angles = rel_euler_angles + origin_euler
+ # Convert relative rotation matrices to absolute rotation matrices
+ abs_rotation_matrices = np.einsum("ij,...jk->...ik", R_origin, rel_rotation_matrices)
+ abs_euler_angles = np.array([EulerAngles.from_rotation_matrix(R).array for R in abs_rotation_matrices])
# Prepare output array
abs_se3_array = se3_array.copy()
diff --git a/d123/geometry/utils/rotation_utils.py b/d123/geometry/utils/rotation_utils.py
index da75c654..1ad2396f 100644
--- a/d123/geometry/utils/rotation_utils.py
+++ b/d123/geometry/utils/rotation_utils.py
@@ -2,8 +2,9 @@
import numpy as np
import numpy.typing as npt
+import pyquaternion
-from d123.geometry.geometry_index import EulerAnglesIndex
+from d123.geometry.geometry_index import EulerAnglesIndex, QuaternionIndex
def normalize_angle(angle: Union[float, npt.NDArray[np.float64]]) -> Union[float, npt.NDArray[np.float64]]:
@@ -16,6 +17,12 @@ def normalize_angle(angle: Union[float, npt.NDArray[np.float64]]) -> Union[float
def get_rotation_matrices_from_euler_array(euler_angles_array: npt.NDArray[np.float64]) -> npt.NDArray[np.float64]:
+ """
+ Convert Euler angles to rotation matrices using Tait-Bryan ZYX convention (yaw-pitch-roll).
+
+ Convention: Intrinsic rotations in order Z-Y-X (yaw, pitch, roll)
+ Equivalent to: R = R_x(roll) @ R_y(pitch) @ R_z(yaw)
+ """
assert euler_angles_array.ndim == 2 and euler_angles_array.shape[1] == len(EulerAnglesIndex)
# Extract roll, pitch, yaw for all samples at once
@@ -32,7 +39,7 @@ def get_rotation_matrices_from_euler_array(euler_angles_array: npt.NDArray[np.fl
batch_size = euler_angles_array.shape[0]
rotation_matrices = np.zeros((batch_size, 3, 3), dtype=np.float64)
- # R_x @ R_y @ R_z components
+ # ZYX Tait-Bryan rotation matrix elements
rotation_matrices[:, 0, 0] = cos_pitch * cos_yaw
rotation_matrices[:, 0, 1] = -cos_pitch * sin_yaw
rotation_matrices[:, 0, 2] = sin_pitch
@@ -50,4 +57,98 @@ def get_rotation_matrices_from_euler_array(euler_angles_array: npt.NDArray[np.fl
def get_rotation_matrix_from_euler_array(euler_angles: npt.NDArray[np.float64]) -> npt.NDArray[np.float64]:
assert euler_angles.ndim == 1 and euler_angles.shape[0] == len(EulerAnglesIndex)
- return get_rotation_matrices_from_euler_array(euler_angles[None, :])[0]
+ return get_rotation_matrices_from_euler_array(euler_angles[None, ...])[0]
+
+
+def get_rotation_matrices_from_quaternion_array(quaternion_array: npt.NDArray[np.float64]) -> npt.NDArray[np.float64]:
+ assert quaternion_array.ndim == 2 and quaternion_array.shape[1] == len(QuaternionIndex)
+ # TODO: Optimize this function to avoid the for loop, possibly by using pyquaternion's internal methods directly.
+ rotation_matrices = np.zeros((quaternion_array.shape[0], 3, 3), dtype=np.float64)
+ for i, quaternion in enumerate(quaternion_array):
+ rotation_matrices[i] = pyquaternion.Quaternion(array=quaternion).rotation_matrix
+ return rotation_matrices
+
+
+def get_rotation_matrix_from_quaternion_array(quaternion: npt.NDArray[np.float64]) -> npt.NDArray[np.float64]:
+ assert quaternion.ndim == 1 and quaternion.shape[0] == len(QuaternionIndex)
+ return get_rotation_matrices_from_quaternion_array(quaternion[None, :])[0]
+
+
+def conjugate_quaternion_array(quaternion: npt.NDArray[np.float64]) -> npt.NDArray[np.float64]:
+ """Computes the conjugate of an array of quaternions.
+ in the order [qw, qx, qy, qz].
+ :param quaternion: Array of quaternions.
+ :return: Array of conjugated quaternions.
+ """
+ assert quaternion.ndim >= 1
+ assert quaternion.shape[-1] == len(QuaternionIndex)
+ conjugated_quaternions = np.zeros_like(quaternion)
+ conjugated_quaternions[..., QuaternionIndex.SCALAR] = quaternion[..., QuaternionIndex.SCALAR]
+ conjugated_quaternions[..., QuaternionIndex.VECTOR] = -quaternion[..., QuaternionIndex.VECTOR]
+ return conjugated_quaternions
+
+
+def invert_quaternion_array(quaternion: npt.NDArray[np.float64]) -> npt.NDArray[np.float64]:
+ """Computes the inverse of an array of quaternions.
+ in the order [qw, qx, qy, qz].
+ :param quaternion: Array of quaternions.
+ :return: Array of inverted quaternions.
+ """
+ assert quaternion.ndim >= 1
+ assert quaternion.shape[-1] == len(QuaternionIndex)
+ norm_squared = np.sum(quaternion**2, axis=-1, keepdims=True)
+ assert np.all(norm_squared > 0), "Cannot invert a quaternion with zero norm."
+ conjugated_quaternions = conjugate_quaternion_array(quaternion)
+ inverted_quaternions = conjugated_quaternions / norm_squared
+ return inverted_quaternions
+
+
+def normalize_quaternion_array(quaternion: npt.NDArray[np.float64]) -> npt.NDArray[np.float64]:
+ """Normalizes an array of quaternions.
+ in the order [qw, qx, qy, qz].
+ :param quaternion: Array of quaternions.
+ :return: Array of normalized quaternions.
+ """
+ assert quaternion.ndim >= 1
+ assert quaternion.shape[-1] == len(QuaternionIndex)
+ norm = np.linalg.norm(quaternion, axis=-1, keepdims=True)
+ assert np.all(norm > 0), "Cannot normalize a quaternion with zero norm."
+ normalized_quaternions = quaternion / norm
+ return normalized_quaternions
+
+
+def multiply_quaternion_arrays(q1: npt.NDArray[np.float64], q2: npt.NDArray[np.float64]) -> npt.NDArray[np.float64]:
+ """Multiplies two arrays of quaternions element-wise.
+ in the order [qw, qx, qy, qz].
+ :param q1: First array of quaternions.
+ :param q2: Second array of quaternions.
+ :return: Array of resulting quaternions after multiplication.
+ """
+ assert q1.ndim >= 1
+ assert q2.ndim >= 1
+ assert q1.shape[-1] == q2.shape[-1] == len(QuaternionIndex)
+
+ # Vectorized quaternion multiplication
+ qw1, qx1, qy1, qz1 = (
+ q1[..., QuaternionIndex.QW],
+ q1[..., QuaternionIndex.QX],
+ q1[..., QuaternionIndex.QY],
+ q1[..., QuaternionIndex.QZ],
+ )
+ qw2, qx2, qy2, qz2 = (
+ q2[..., QuaternionIndex.QW],
+ q2[..., QuaternionIndex.QX],
+ q2[..., QuaternionIndex.QY],
+ q2[..., QuaternionIndex.QZ],
+ )
+
+ quaternions = np.stack(
+ [
+ qw1 * qw2 - qx1 * qx2 - qy1 * qy2 - qz1 * qz2,
+ qw1 * qx2 + qx1 * qw2 + qy1 * qz2 - qz1 * qy2,
+ qw1 * qy2 - qx1 * qz2 + qy1 * qw2 + qz1 * qx2,
+ qw1 * qz2 + qx1 * qy2 - qy1 * qx2 + qz1 * qw2,
+ ],
+ axis=-1,
+ )
+ return quaternions
From 78592a1bfee311e93d1ab31e613ae7adff5a6b34 Mon Sep 17 00:00:00 2001
From: Daniel Dauner
Date: Mon, 22 Sep 2025 10:40:10 +0200
Subject: [PATCH 037/145] Expand rotation utils and rely less on pyquaternion
lib (#43)
---
d123/geometry/rotation.py | 20 ++-
d123/geometry/utils/rotation_utils.py | 237 +++++++++++++++++++++++---
2 files changed, 224 insertions(+), 33 deletions(-)
diff --git a/d123/geometry/rotation.py b/d123/geometry/rotation.py
index 3fc4afa9..1ee21e25 100644
--- a/d123/geometry/rotation.py
+++ b/d123/geometry/rotation.py
@@ -8,7 +8,12 @@
from d123.common.utils.mixin import ArrayMixin
from d123.geometry.geometry_index import EulerAnglesIndex, QuaternionIndex
-from d123.geometry.utils.rotation_utils import get_rotation_matrix_from_euler_array
+from d123.geometry.utils.rotation_utils import (
+ get_euler_array_from_quaternion_array,
+ get_quaternion_array_from_rotation_matrix,
+ get_rotation_matrix_from_euler_array,
+ get_rotation_matrix_from_quaternion_array,
+)
class EulerAngles(ArrayMixin):
@@ -89,6 +94,10 @@ def array(self) -> npt.NDArray[np.float64]:
"""
return self._array
+ @property
+ def quaternion(self) -> Quaternion:
+ return Quaternion.from_euler_angles(self)
+
@cached_property
def rotation_matrix(self) -> npt.NDArray[np.float64]:
"""Returns the 3x3 rotation matrix representation of the Euler angles.
@@ -110,7 +119,6 @@ def __hash__(self):
class Quaternion(ArrayMixin):
"""
Represents a quaternion for 3D rotations.
- NOTE: This class uses the pyquaternion library for internal computations.
"""
_array: npt.NDArray[np.float64]
@@ -147,8 +155,7 @@ def from_rotation_matrix(cls, rotation_matrix: npt.NDArray[np.float64]) -> Quate
"""
assert rotation_matrix.ndim == 2
assert rotation_matrix.shape == (3, 3)
- quaternion = pyquaternion.Quaternion(matrix=rotation_matrix)
- return Quaternion(qw=quaternion.w, qx=quaternion.x, qy=quaternion.y, qz=quaternion.z)
+ return Quaternion.from_array(get_quaternion_array_from_rotation_matrix(rotation_matrix), copy=False)
@classmethod
def from_euler_angles(cls, euler_angles: EulerAngles) -> Quaternion:
@@ -217,8 +224,7 @@ def euler_angles(self) -> EulerAngles:
:return: An EulerAngles instance representing the Euler angles.
"""
- yaw, pitch, roll = self.pyquaternion.yaw_pitch_roll
- return EulerAngles(roll=roll, pitch=pitch, yaw=yaw)
+ return EulerAngles.from_array(get_euler_array_from_quaternion_array(self.array), copy=False)
@cached_property
def rotation_matrix(self) -> npt.NDArray[np.float64]:
@@ -226,7 +232,7 @@ def rotation_matrix(self) -> npt.NDArray[np.float64]:
:return: A 3x3 numpy array representing the rotation matrix.
"""
- return self.pyquaternion.rotation_matrix
+ return get_rotation_matrix_from_quaternion_array(self.array)
def __iter__(self):
"""Iterator over quaternion components."""
diff --git a/d123/geometry/utils/rotation_utils.py b/d123/geometry/utils/rotation_utils.py
index 1ad2396f..871ec29c 100644
--- a/d123/geometry/utils/rotation_utils.py
+++ b/d123/geometry/utils/rotation_utils.py
@@ -2,11 +2,27 @@
import numpy as np
import numpy.typing as npt
-import pyquaternion
+
+# import pyquaternion
from d123.geometry.geometry_index import EulerAnglesIndex, QuaternionIndex
+def batch_matmul(A: npt.NDArray[np.float64], B: npt.NDArray[np.float64]) -> npt.NDArray[np.float64]:
+ """Batch matrix multiplication for arrays of matrices.
+ # TODO: move somewhere else
+
+ :param A: Array of shape (..., M, N)
+ :param B: Array of shape (..., N, P)
+ :return: Array of shape (..., M, P) resulting from batch matrix multiplication of A and B.
+ """
+ assert A.ndim >= 2 and B.ndim >= 2
+ assert (
+ A.shape[-1] == B.shape[-2]
+ ), f"Inner dimensions must match for matrix multiplication, got {A.shape} and {B.shape}"
+ return np.einsum("...ij,...jk->...ik", A, B)
+
+
def normalize_angle(angle: Union[float, npt.NDArray[np.float64]]) -> Union[float, npt.NDArray[np.float64]]:
"""
Map a angle in range [-π, π]
@@ -55,65 +71,154 @@ def get_rotation_matrices_from_euler_array(euler_angles_array: npt.NDArray[np.fl
return rotation_matrices
+def get_euler_array_from_rotation_matrix(rotation_matrix: npt.NDArray[np.float64]) -> npt.NDArray[np.float64]:
+ raise NotImplementedError
+
+
+def get_quaternion_array_from_rotation_matrices(rotation_matrices: npt.NDArray[np.float64]) -> npt.NDArray[np.float64]:
+ assert rotation_matrices.ndim == 3
+ assert rotation_matrices.shape[-1] == rotation_matrices.shape[-2] == 3
+ # http://www.euclideanspace.com/maths/geometry/rotations/conversions/matrixToQuaternion/
+
+ # TODO: Update with:
+ # https://d3cw3dd2w32x2b.cloudfront.net/wp-content/uploads/2015/01/matrix-to-quat.pdf
+
+ N = rotation_matrices.shape[0]
+ quaternions = np.zeros((N, 4), dtype=np.float64)
+
+ # Extract rotation matrix elements for vectorized operations
+ R = rotation_matrices
+
+ # Compute trace for each matrix
+ trace = np.trace(R, axis1=1, axis2=2)
+
+ # Case 1: trace > 0 (most common case)
+ mask1 = trace > 0
+ s1 = np.sqrt(trace[mask1] + 1.0) * 2 # s = 4 * qw
+ quaternions[mask1, QuaternionIndex.QW] = 0.25 * s1
+ quaternions[mask1, QuaternionIndex.QX] = (R[mask1, 2, 1] - R[mask1, 1, 2]) / s1
+ quaternions[mask1, QuaternionIndex.QY] = (R[mask1, 0, 2] - R[mask1, 2, 0]) / s1
+ quaternions[mask1, QuaternionIndex.QZ] = (R[mask1, 1, 0] - R[mask1, 0, 1]) / s1
+
+ # Case 2: R[0,0] > R[1,1] and R[0,0] > R[2,2]
+ mask2 = (~mask1) & (R[:, 0, 0] > R[:, 1, 1]) & (R[:, 0, 0] > R[:, 2, 2])
+ s2 = np.sqrt(1.0 + R[mask2, 0, 0] - R[mask2, 1, 1] - R[mask2, 2, 2]) * 2 # s = 4 * qx
+ quaternions[mask2, QuaternionIndex.QW] = (R[mask2, 2, 1] - R[mask2, 1, 2]) / s2
+ quaternions[mask2, QuaternionIndex.QX] = 0.25 * s2 # x
+ quaternions[mask2, QuaternionIndex.QY] = (R[mask2, 0, 1] + R[mask2, 1, 0]) / s2
+ quaternions[mask2, QuaternionIndex.QZ] = (R[mask2, 0, 2] + R[mask2, 2, 0]) / s2
+
+ # Case 3: R[1,1] > R[2,2]
+ mask3 = (~mask1) & (~mask2) & (R[:, 1, 1] > R[:, 2, 2])
+ s3 = np.sqrt(1.0 + R[mask3, 1, 1] - R[mask3, 0, 0] - R[mask3, 2, 2]) * 2 # s = 4 * qy
+ quaternions[mask3, QuaternionIndex.QW] = (R[mask3, 0, 2] - R[mask3, 2, 0]) / s3
+ quaternions[mask3, QuaternionIndex.QX] = (R[mask3, 0, 1] + R[mask3, 1, 0]) / s3
+ quaternions[mask3, QuaternionIndex.QY] = 0.25 * s3 # y
+ quaternions[mask3, QuaternionIndex.QZ] = (R[mask3, 1, 2] + R[mask3, 2, 1]) / s3
+
+ # Case 4: R[2,2] is largest
+ mask4 = (~mask1) & (~mask2) & (~mask3)
+ s4 = np.sqrt(1.0 + R[mask4, 2, 2] - R[mask4, 0, 0] - R[mask4, 1, 1]) * 2 # s = 4 * qz
+ quaternions[mask4, QuaternionIndex.QW] = (R[mask4, 1, 0] - R[mask4, 0, 1]) / s4
+ quaternions[mask4, QuaternionIndex.QX] = (R[mask4, 0, 2] + R[mask4, 2, 0]) / s4
+ quaternions[mask4, QuaternionIndex.QY] = (R[mask4, 1, 2] + R[mask4, 2, 1]) / s4
+ quaternions[mask4, QuaternionIndex.QZ] = 0.25 * s4 # z
+
+ assert np.all(mask1 | mask2 | mask3 | mask4), "All matrices should fall into one of the four cases."
+
+ return normalize_quaternion_array(quaternions)
+
+
+def get_quaternion_array_from_rotation_matrix(rotation_matrix: npt.NDArray[np.float64]) -> npt.NDArray[np.float64]:
+ assert rotation_matrix.ndim == 2 and rotation_matrix.shape == (3, 3)
+ return get_quaternion_array_from_rotation_matrices(rotation_matrix[None, ...])[0]
+
+
def get_rotation_matrix_from_euler_array(euler_angles: npt.NDArray[np.float64]) -> npt.NDArray[np.float64]:
assert euler_angles.ndim == 1 and euler_angles.shape[0] == len(EulerAnglesIndex)
return get_rotation_matrices_from_euler_array(euler_angles[None, ...])[0]
def get_rotation_matrices_from_quaternion_array(quaternion_array: npt.NDArray[np.float64]) -> npt.NDArray[np.float64]:
- assert quaternion_array.ndim == 2 and quaternion_array.shape[1] == len(QuaternionIndex)
- # TODO: Optimize this function to avoid the for loop, possibly by using pyquaternion's internal methods directly.
- rotation_matrices = np.zeros((quaternion_array.shape[0], 3, 3), dtype=np.float64)
- for i, quaternion in enumerate(quaternion_array):
- rotation_matrices[i] = pyquaternion.Quaternion(array=quaternion).rotation_matrix
- return rotation_matrices
+ assert quaternion_array.ndim == 2 and quaternion_array.shape[-1] == len(QuaternionIndex)
+ norm_quaternion = normalize_quaternion_array(quaternion_array)
+ Q_matrices = get_q_matrices(norm_quaternion)
+ Q_bar_matrices = get_q_bar_matrices(norm_quaternion)
+ rotation_matrix = batch_matmul(Q_matrices, Q_bar_matrices.conj().swapaxes(-1, -2))
+ return rotation_matrix[:, 1:][:, :, 1:]
+
+
+def get_rotation_matrix_from_quaternion_array(quaternion_array: npt.NDArray[np.float64]) -> npt.NDArray[np.float64]:
+ assert quaternion_array.ndim == 1 and quaternion_array.shape[0] == len(QuaternionIndex)
+ return get_rotation_matrices_from_quaternion_array(quaternion_array[None, :])[0]
+
+
+def get_euler_array_from_quaternion_array(quaternion_array: npt.NDArray[np.float64]) -> npt.NDArray[np.float64]:
+ assert quaternion_array.ndim >= 1 and quaternion_array.shape[-1] == len(QuaternionIndex)
+ norm_quaternion = normalize_quaternion_array(quaternion_array)
+ QW, QX, QY, QZ = (
+ norm_quaternion[..., QuaternionIndex.QW],
+ norm_quaternion[..., QuaternionIndex.QX],
+ norm_quaternion[..., QuaternionIndex.QY],
+ norm_quaternion[..., QuaternionIndex.QZ],
+ )
+ euler_angles = np.zeros_like(quaternion_array[..., :3])
+ euler_angles[..., EulerAnglesIndex.YAW] = np.arctan2(
+ 2 * (QW * QZ - QX * QY),
+ 1 - 2 * (QY**2 + QZ**2),
+ )
+ euler_angles[..., EulerAnglesIndex.PITCH] = np.arcsin(
+ np.clip(2 * (QW * QY + QZ * QX), -1.0, 1.0),
+ )
+ euler_angles[..., EulerAnglesIndex.ROLL] = np.arctan2(
+ 2 * (QW * QX - QY * QZ),
+ 1 - 2 * (QX**2 + QY**2),
+ )
-def get_rotation_matrix_from_quaternion_array(quaternion: npt.NDArray[np.float64]) -> npt.NDArray[np.float64]:
- assert quaternion.ndim == 1 and quaternion.shape[0] == len(QuaternionIndex)
- return get_rotation_matrices_from_quaternion_array(quaternion[None, :])[0]
+ return euler_angles
-def conjugate_quaternion_array(quaternion: npt.NDArray[np.float64]) -> npt.NDArray[np.float64]:
+def conjugate_quaternion_array(quaternion_array: npt.NDArray[np.float64]) -> npt.NDArray[np.float64]:
"""Computes the conjugate of an array of quaternions.
in the order [qw, qx, qy, qz].
:param quaternion: Array of quaternions.
:return: Array of conjugated quaternions.
"""
- assert quaternion.ndim >= 1
- assert quaternion.shape[-1] == len(QuaternionIndex)
- conjugated_quaternions = np.zeros_like(quaternion)
- conjugated_quaternions[..., QuaternionIndex.SCALAR] = quaternion[..., QuaternionIndex.SCALAR]
- conjugated_quaternions[..., QuaternionIndex.VECTOR] = -quaternion[..., QuaternionIndex.VECTOR]
+ assert quaternion_array.ndim >= 1
+ assert quaternion_array.shape[-1] == len(QuaternionIndex)
+ conjugated_quaternions = np.zeros_like(quaternion_array)
+ conjugated_quaternions[..., QuaternionIndex.SCALAR] = quaternion_array[..., QuaternionIndex.SCALAR]
+ conjugated_quaternions[..., QuaternionIndex.VECTOR] = -quaternion_array[..., QuaternionIndex.VECTOR]
return conjugated_quaternions
-def invert_quaternion_array(quaternion: npt.NDArray[np.float64]) -> npt.NDArray[np.float64]:
+def invert_quaternion_array(quaternion_array: npt.NDArray[np.float64]) -> npt.NDArray[np.float64]:
"""Computes the inverse of an array of quaternions.
in the order [qw, qx, qy, qz].
:param quaternion: Array of quaternions.
:return: Array of inverted quaternions.
"""
- assert quaternion.ndim >= 1
- assert quaternion.shape[-1] == len(QuaternionIndex)
- norm_squared = np.sum(quaternion**2, axis=-1, keepdims=True)
+ assert quaternion_array.ndim >= 1
+ assert quaternion_array.shape[-1] == len(QuaternionIndex)
+ norm_squared = np.sum(quaternion_array**2, axis=-1, keepdims=True)
assert np.all(norm_squared > 0), "Cannot invert a quaternion with zero norm."
- conjugated_quaternions = conjugate_quaternion_array(quaternion)
+ conjugated_quaternions = conjugate_quaternion_array(quaternion_array)
inverted_quaternions = conjugated_quaternions / norm_squared
return inverted_quaternions
-def normalize_quaternion_array(quaternion: npt.NDArray[np.float64]) -> npt.NDArray[np.float64]:
+def normalize_quaternion_array(quaternion_array: npt.NDArray[np.float64]) -> npt.NDArray[np.float64]:
"""Normalizes an array of quaternions.
in the order [qw, qx, qy, qz].
:param quaternion: Array of quaternions.
:return: Array of normalized quaternions.
"""
- assert quaternion.ndim >= 1
- assert quaternion.shape[-1] == len(QuaternionIndex)
- norm = np.linalg.norm(quaternion, axis=-1, keepdims=True)
+ assert quaternion_array.ndim >= 1
+ assert quaternion_array.shape[-1] == len(QuaternionIndex)
+ norm = np.linalg.norm(quaternion_array, axis=-1, keepdims=True)
assert np.all(norm > 0), "Cannot normalize a quaternion with zero norm."
- normalized_quaternions = quaternion / norm
+ normalized_quaternions = quaternion_array / norm
return normalized_quaternions
@@ -152,3 +257,83 @@ def multiply_quaternion_arrays(q1: npt.NDArray[np.float64], q2: npt.NDArray[np.f
axis=-1,
)
return quaternions
+
+
+def get_q_matrices(quaternion_array: npt.NDArray[np.float64]) -> npt.NDArray[np.float64]:
+ """Computes the Q matrices for an array of quaternions.
+ in the order [qw, qx, qy, qz].
+ :param quaternion: Array of quaternions.
+ :return: Array of Q matrices.
+ """
+ assert quaternion_array.ndim >= 1
+ assert quaternion_array.shape[-1] == len(QuaternionIndex)
+
+ qw = quaternion_array[..., QuaternionIndex.QW]
+ qx = quaternion_array[..., QuaternionIndex.QX]
+ qy = quaternion_array[..., QuaternionIndex.QY]
+ qz = quaternion_array[..., QuaternionIndex.QZ]
+
+ batch_shape = quaternion_array.shape[:-1]
+ Q_matrices = np.zeros(batch_shape + (4, 4), dtype=np.float64)
+
+ Q_matrices[..., 0, 0] = qw
+ Q_matrices[..., 0, 1] = -qx
+ Q_matrices[..., 0, 2] = -qy
+ Q_matrices[..., 0, 3] = -qz
+
+ Q_matrices[..., 1, 0] = qx
+ Q_matrices[..., 1, 1] = qw
+ Q_matrices[..., 1, 2] = -qz
+ Q_matrices[..., 1, 3] = qy
+
+ Q_matrices[..., 2, 0] = qy
+ Q_matrices[..., 2, 1] = qz
+ Q_matrices[..., 2, 2] = qw
+ Q_matrices[..., 2, 3] = -qx
+
+ Q_matrices[..., 3, 0] = qz
+ Q_matrices[..., 3, 1] = -qy
+ Q_matrices[..., 3, 2] = qx
+ Q_matrices[..., 3, 3] = qw
+
+ return Q_matrices
+
+
+def get_q_bar_matrices(quaternion_array: npt.NDArray[np.float64]) -> npt.NDArray[np.float64]:
+ """Computes the Q-bar matrices for an array of quaternions.
+ in the order [qw, qx, qy, qz].
+ :param quaternion: Array of quaternions.
+ :return: Array of Q-bar matrices.
+ """
+ assert quaternion_array.ndim >= 1
+ assert quaternion_array.shape[-1] == len(QuaternionIndex)
+
+ qw = quaternion_array[..., QuaternionIndex.QW]
+ qx = quaternion_array[..., QuaternionIndex.QX]
+ qy = quaternion_array[..., QuaternionIndex.QY]
+ qz = quaternion_array[..., QuaternionIndex.QZ]
+
+ batch_shape = quaternion_array.shape[:-1]
+ Q_bar_matrices = np.zeros(batch_shape + (4, 4), dtype=np.float64)
+
+ Q_bar_matrices[..., 0, 0] = qw
+ Q_bar_matrices[..., 0, 1] = -qx
+ Q_bar_matrices[..., 0, 2] = -qy
+ Q_bar_matrices[..., 0, 3] = -qz
+
+ Q_bar_matrices[..., 1, 0] = qx
+ Q_bar_matrices[..., 1, 1] = qw
+ Q_bar_matrices[..., 1, 2] = qz
+ Q_bar_matrices[..., 1, 3] = -qy
+
+ Q_bar_matrices[..., 2, 0] = qy
+ Q_bar_matrices[..., 2, 1] = -qz
+ Q_bar_matrices[..., 2, 2] = qw
+ Q_bar_matrices[..., 2, 3] = qx
+
+ Q_bar_matrices[..., 3, 0] = qz
+ Q_bar_matrices[..., 3, 1] = qy
+ Q_bar_matrices[..., 3, 2] = -qx
+ Q_bar_matrices[..., 3, 3] = qw
+
+ return Q_bar_matrices
From af72fbf11c7c59c670725b63fd463ad0c6d6aebc Mon Sep 17 00:00:00 2001
From: Daniel Dauner
Date: Mon, 22 Sep 2025 11:42:33 +0200
Subject: [PATCH 038/145] Renaming to transition se3 to quaternion
representation in `geometry` (#43), added tests (#44)
---
d123/common/datatypes/detection/detection.py | 6 +-
.../datatypes/vehicle_state/ego_state.py | 16 +-
.../vehicle_state/vehicle_parameters.py | 12 +-
.../common/visualization/matplotlib/camera.py | 4 +-
d123/common/visualization/matplotlib/utils.py | 4 +-
d123/common/visualization/viser/utils.py | 6 +-
d123/common/visualization/viser/utils_v2.py | 4 +-
.../av2/av2_data_converter.py | 8 +-
.../nuplan/nuplan_data_converter.py | 6 +-
.../wopd/wopd_data_converter.py | 12 +-
d123/geometry/__init__.py | 4 +-
d123/geometry/bounding_box.py | 10 +-
d123/geometry/geometry_index.py | 4 +-
d123/geometry/se.py | 326 +++---
d123/geometry/test/test_bounding_box.py | 6 +-
d123/geometry/test/test_transform.py | 976 ------------------
d123/geometry/transform/__init__.py | 15 -
d123/geometry/transform/test/__init__.py | 0
.../test/test_transform_consistency.py | 475 +++++++++
.../test/test_transform_euler_se3.py | 335 ++++++
.../transform/test/test_transform_se2.py | 220 ++++
.../test/test_transform_se3.py} | 129 ++-
.../geometry/transform/transform_euler_se3.py | 169 +++
.../transform/transform_quaternion_se3.py | 213 ----
d123/geometry/transform/transform_se3.py | 240 +++--
25 files changed, 1609 insertions(+), 1591 deletions(-)
delete mode 100644 d123/geometry/test/test_transform.py
create mode 100644 d123/geometry/transform/test/__init__.py
create mode 100644 d123/geometry/transform/test/test_transform_consistency.py
create mode 100644 d123/geometry/transform/test/test_transform_euler_se3.py
create mode 100644 d123/geometry/transform/test/test_transform_se2.py
rename d123/geometry/{test/test_transform_quaternion.py => transform/test/test_transform_se3.py} (51%)
create mode 100644 d123/geometry/transform/transform_euler_se3.py
delete mode 100644 d123/geometry/transform/transform_quaternion_se3.py
diff --git a/d123/common/datatypes/detection/detection.py b/d123/common/datatypes/detection/detection.py
index 075129b4..ff18a561 100644
--- a/d123/common/datatypes/detection/detection.py
+++ b/d123/common/datatypes/detection/detection.py
@@ -7,7 +7,7 @@
from d123.common.datatypes.detection.detection_types import DetectionType
from d123.common.datatypes.time.time_point import TimePoint
from d123.common.utils.enums import SerialIntEnum
-from d123.geometry import BoundingBoxSE2, BoundingBoxSE3, OccupancyMap2D, StateSE2, StateSE3, Vector2D, Vector3D
+from d123.geometry import BoundingBoxSE2, BoundingBoxSE3, OccupancyMap2D, StateSE2, EulerStateSE3, Vector2D, Vector3D
@dataclass
@@ -51,11 +51,11 @@ def shapely_polygon(self) -> shapely.geometry.Polygon:
return self.bounding_box_se3.shapely_polygon
@property
- def center(self) -> StateSE3:
+ def center(self) -> EulerStateSE3:
return self.bounding_box_se3.center
@property
- def center_se3(self) -> StateSE3:
+ def center_se3(self) -> EulerStateSE3:
return self.bounding_box_se3.center_se3
@property
diff --git a/d123/common/datatypes/vehicle_state/ego_state.py b/d123/common/datatypes/vehicle_state/ego_state.py
index d0487e60..1d378fcd 100644
--- a/d123/common/datatypes/vehicle_state/ego_state.py
+++ b/d123/common/datatypes/vehicle_state/ego_state.py
@@ -23,7 +23,7 @@
rear_axle_se3_to_center_se3,
)
from d123.common.utils.enums import classproperty
-from d123.geometry import BoundingBoxSE2, BoundingBoxSE3, StateSE2, StateSE3, Vector2D, Vector3D
+from d123.geometry import BoundingBoxSE2, BoundingBoxSE3, StateSE2, EulerStateSE3, Vector2D, Vector3D
# TODO: Find an appropriate way to handle SE2 and SE3 states.
@@ -59,7 +59,7 @@ def DYNAMIC_VEHICLE_STATE(cls) -> slice:
@dataclass
class EgoStateSE3:
- center_se3: StateSE3
+ center_se3: EulerStateSE3
dynamic_state_se3: DynamicStateSE3
vehicle_parameters: VehicleParameters
timepoint: Optional[TimePoint] = None
@@ -72,14 +72,14 @@ def from_array(
vehicle_parameters: VehicleParameters,
timepoint: Optional[TimePoint] = None,
) -> EgoStateSE3:
- state_se3 = StateSE3.from_array(array[EgoStateSE3Index.SE3])
+ state_se3 = EulerStateSE3.from_array(array[EgoStateSE3Index.SE3])
dynamic_state = DynamicStateSE3.from_array(array[EgoStateSE3Index.DYNAMIC_VEHICLE_STATE])
return EgoStateSE3(state_se3, dynamic_state, vehicle_parameters, timepoint)
@classmethod
def from_rear_axle(
cls,
- rear_axle_se3: StateSE3,
+ rear_axle_se3: EulerStateSE3,
dynamic_state_se3: DynamicStateSE3,
vehicle_parameters: VehicleParameters,
time_point: TimePoint,
@@ -100,7 +100,7 @@ def array(self) -> npt.NDArray[np.float64]:
Convert the EgoVehicleState to an array.
:return: An array containing the bounding box and dynamic state information.
"""
- assert isinstance(self.center_se3, StateSE3)
+ assert isinstance(self.center_se3, EulerStateSE3)
assert isinstance(self.dynamic_state_se3, DynamicStateSE3)
center_array = self.center_se3.array
@@ -109,11 +109,11 @@ def array(self) -> npt.NDArray[np.float64]:
return np.concatenate((center_array, dynamic_array), axis=0)
@property
- def center(self) -> StateSE3:
+ def center(self) -> EulerStateSE3:
return self.center_se3
@property
- def rear_axle_se3(self) -> StateSE3:
+ def rear_axle_se3(self) -> EulerStateSE3:
return center_se3_to_rear_axle_se3(center_se3=self.center_se3, vehicle_parameters=self.vehicle_parameters)
@property
@@ -121,7 +121,7 @@ def rear_axle_se2(self) -> StateSE2:
return self.rear_axle_se3.state_se2
@property
- def rear_axle(self) -> StateSE3:
+ def rear_axle(self) -> EulerStateSE3:
return self.rear_axle_se3
@cached_property
diff --git a/d123/common/datatypes/vehicle_state/vehicle_parameters.py b/d123/common/datatypes/vehicle_state/vehicle_parameters.py
index 152b9382..4698206b 100644
--- a/d123/common/datatypes/vehicle_state/vehicle_parameters.py
+++ b/d123/common/datatypes/vehicle_state/vehicle_parameters.py
@@ -1,8 +1,8 @@
from dataclasses import dataclass
-from d123.geometry import StateSE2, StateSE3, Vector2D, Vector3D
+from d123.geometry import StateSE2, EulerStateSE3, Vector2D, Vector3D
from d123.geometry.transform.transform_se2 import translate_se2_along_body_frame
-from d123.geometry.transform.transform_se3 import translate_se3_along_body_frame
+from d123.geometry.transform.transform_euler_se3 import translate_euler_se3_along_body_frame
# TODO: Add more vehicle parameters, potentially extend the parameters.
@@ -76,14 +76,14 @@ def get_av2_ford_fusion_hybrid_parameters() -> VehicleParameters:
)
-def center_se3_to_rear_axle_se3(center_se3: StateSE3, vehicle_parameters: VehicleParameters) -> StateSE3:
+def center_se3_to_rear_axle_se3(center_se3: EulerStateSE3, vehicle_parameters: VehicleParameters) -> EulerStateSE3:
"""
Converts a center state to a rear axle state.
:param center_se3: The center state.
:param vehicle_parameters: The vehicle parameters.
:return: The rear axle state.
"""
- return translate_se3_along_body_frame(
+ return translate_euler_se3_along_body_frame(
center_se3,
Vector3D(
-vehicle_parameters.rear_axle_to_center_longitudinal,
@@ -93,14 +93,14 @@ def center_se3_to_rear_axle_se3(center_se3: StateSE3, vehicle_parameters: Vehicl
)
-def rear_axle_se3_to_center_se3(rear_axle_se3: StateSE3, vehicle_parameters: VehicleParameters) -> StateSE3:
+def rear_axle_se3_to_center_se3(rear_axle_se3: EulerStateSE3, vehicle_parameters: VehicleParameters) -> EulerStateSE3:
"""
Converts a rear axle state to a center state.
:param rear_axle_se3: The rear axle state.
:param vehicle_parameters: The vehicle parameters.
:return: The center state.
"""
- return translate_se3_along_body_frame(
+ return translate_euler_se3_along_body_frame(
rear_axle_se3,
Vector3D(
vehicle_parameters.rear_axle_to_center_longitudinal,
diff --git a/d123/common/visualization/matplotlib/camera.py b/d123/common/visualization/matplotlib/camera.py
index 071a5284..6bdee5f6 100644
--- a/d123/common/visualization/matplotlib/camera.py
+++ b/d123/common/visualization/matplotlib/camera.py
@@ -16,7 +16,7 @@
from d123.common.datatypes.vehicle_state.ego_state import EgoStateSE3
from d123.common.visualization.color.default import BOX_DETECTION_CONFIG
from d123.geometry import BoundingBoxSE3Index, Corners3DIndex
-from d123.geometry.transform.transform_se3 import convert_absolute_to_relative_se3_array
+from d123.geometry.transform.transform_euler_se3 import convert_absolute_to_relative_euler_se3_array
# from navsim.common.dataclasses import Annotations, Camera, Lidar
# from navsim.common.enums import BoundingBoxIndex, LidarIndex
@@ -98,7 +98,7 @@ def add_box_detections_to_camera_ax(
box_detection_array[idx] = box_detection.bounding_box_se3.array
# FIXME
- box_detection_array[..., BoundingBoxSE3Index.STATE_SE3] = convert_absolute_to_relative_se3_array(
+ box_detection_array[..., BoundingBoxSE3Index.STATE_SE3] = convert_absolute_to_relative_euler_se3_array(
ego_state_se3.rear_axle_se3, box_detection_array[..., BoundingBoxSE3Index.STATE_SE3]
)
# box_detection_array[..., BoundingBoxSE3Index.XYZ] -= ego_state_se3.rear_axle_se3.point_3d.array
diff --git a/d123/common/visualization/matplotlib/utils.py b/d123/common/visualization/matplotlib/utils.py
index 9e030b80..34cc2819 100644
--- a/d123/common/visualization/matplotlib/utils.py
+++ b/d123/common/visualization/matplotlib/utils.py
@@ -9,7 +9,7 @@
from matplotlib.path import Path
from d123.common.visualization.color.config import PlotConfig
-from d123.geometry import StateSE2, StateSE3
+from d123.geometry import StateSE2, EulerStateSE3
def add_shapely_polygon_to_ax(
@@ -114,7 +114,7 @@ def get_pose_triangle(size: float) -> geom.Polygon:
def shapely_geometry_local_coords(
- geometry: geom.base.BaseGeometry, origin: Union[StateSE2, StateSE3]
+ geometry: geom.base.BaseGeometry, origin: Union[StateSE2, EulerStateSE3]
) -> geom.base.BaseGeometry:
"""Helper for transforming shapely geometry in coord-frame"""
# TODO: move somewhere else for general use
diff --git a/d123/common/visualization/viser/utils.py b/d123/common/visualization/viser/utils.py
index 16717654..18af79d3 100644
--- a/d123/common/visualization/viser/utils.py
+++ b/d123/common/visualization/viser/utils.py
@@ -15,8 +15,8 @@
from d123.dataset.maps.abstract_map import MapLayer
from d123.dataset.maps.abstract_map_objects import AbstractLane, AbstractSurfaceMapObject
from d123.dataset.scene.abstract_scene import AbstractScene
-from d123.geometry import BoundingBoxSE3, Point3D, Polyline3D, StateSE3
-from d123.geometry.transform.transform_se3 import convert_relative_to_absolute_points_3d_array
+from d123.geometry import BoundingBoxSE3, Point3D, Polyline3D, EulerStateSE3
+from d123.geometry.transform.transform_euler_se3 import convert_relative_to_absolute_points_3d_array
# TODO: Refactor this file.
# TODO: Add general utilities for 3D primitives and mesh support.
@@ -229,7 +229,7 @@ def get_camera_values(scene: AbstractScene, camera: Camera, iteration: int) -> T
rear_axle_array = rear_axle.array
rear_axle_array[:3] -= initial_point_3d.array
- rear_axle = StateSE3.from_array(rear_axle_array)
+ rear_axle = EulerStateSE3.from_array(rear_axle_array)
camera_to_ego = camera.extrinsic # 4x4 transformation from camera to ego frame
diff --git a/d123/common/visualization/viser/utils_v2.py b/d123/common/visualization/viser/utils_v2.py
index 6a747698..54c4eaab 100644
--- a/d123/common/visualization/viser/utils_v2.py
+++ b/d123/common/visualization/viser/utils_v2.py
@@ -7,7 +7,7 @@
# from d123.common.datatypes.sensor.camera_parameters import get_nuplan_camera_parameters
from d123.geometry import BoundingBoxSE3, Corners3DIndex, Point3D, Point3DIndex, Vector3D
-from d123.geometry.transform.transform_se3 import translate_se3_along_body_frame
+from d123.geometry.transform.transform_euler_se3 import translate_euler_se3_along_body_frame
# TODO: Refactor this file.
# TODO: Add general utilities for 3D primitives and mesh support.
@@ -31,7 +31,7 @@ def _get_bounding_box_corners(bounding_box: BoundingBoxSE3) -> npt.NDArray[np.fl
bounding_box_extent = np.array([bounding_box.length, bounding_box.width, bounding_box.height], dtype=np.float64)
for idx, vec in corner_extent_factors.items():
vector_3d = Vector3D.from_array(bounding_box_extent * vec.array)
- corners[idx] = translate_se3_along_body_frame(bounding_box.center, vector_3d).point_3d.array
+ corners[idx] = translate_euler_se3_along_body_frame(bounding_box.center, vector_3d).point_3d.array
return corners
diff --git a/d123/dataset/dataset_specific/av2/av2_data_converter.py b/d123/dataset/dataset_specific/av2/av2_data_converter.py
index f5e5e44a..d32e18e5 100644
--- a/d123/dataset/dataset_specific/av2/av2_data_converter.py
+++ b/d123/dataset/dataset_specific/av2/av2_data_converter.py
@@ -34,8 +34,8 @@
from d123.dataset.dataset_specific.av2.av2_map_conversion import convert_av2_map
from d123.dataset.dataset_specific.raw_data_converter import DataConverterConfig, RawDataConverter
from d123.dataset.logs.log_metadata import LogMetadata
-from d123.geometry import BoundingBoxSE3Index, StateSE3, Vector3D, Vector3DIndex
-from d123.geometry.transform.transform_se3 import convert_relative_to_absolute_se3_array, get_rotation_matrix
+from d123.geometry import BoundingBoxSE3Index, EulerStateSE3, Vector3D, Vector3DIndex
+from d123.geometry.transform.transform_euler_se3 import convert_relative_to_absolute_euler_se3_array, get_rotation_matrix
from d123.geometry.utils.constants import DEFAULT_PITCH, DEFAULT_ROLL
@@ -399,7 +399,7 @@ def _extract_box_detections(
av2_detection_type = AV2SensorBoxDetectionType.deserialize(row["category"])
detections_types.append(int(AV2_TO_DETECTION_TYPE[av2_detection_type]))
- detections_state[:, BoundingBoxSE3Index.STATE_SE3] = convert_relative_to_absolute_se3_array(
+ detections_state[:, BoundingBoxSE3Index.STATE_SE3] = convert_relative_to_absolute_euler_se3_array(
origin=ego_state_se3.rear_axle_se3, se3_array=detections_state[:, BoundingBoxSE3Index.STATE_SE3]
)
@@ -428,7 +428,7 @@ def _extract_ego_state(city_se3_egovehicle_df: pd.DataFrame, lidar_timestamp_ns:
yaw, pitch, roll = ego_pose_quat.yaw_pitch_roll
- rear_axle_pose = StateSE3(
+ rear_axle_pose = EulerStateSE3(
x=ego_pose_dict["tx_m"],
y=ego_pose_dict["ty_m"],
z=ego_pose_dict["tz_m"],
diff --git a/d123/dataset/dataset_specific/nuplan/nuplan_data_converter.py b/d123/dataset/dataset_specific/nuplan/nuplan_data_converter.py
index b7b52e0b..a87d0c31 100644
--- a/d123/dataset/dataset_specific/nuplan/nuplan_data_converter.py
+++ b/d123/dataset/dataset_specific/nuplan/nuplan_data_converter.py
@@ -31,7 +31,7 @@
from d123.dataset.dataset_specific.nuplan.nuplan_map_conversion import MAP_LOCATIONS, NuPlanMapConverter
from d123.dataset.dataset_specific.raw_data_converter import DataConverterConfig, RawDataConverter
from d123.dataset.logs.log_metadata import LogMetadata
-from d123.geometry import BoundingBoxSE3, BoundingBoxSE3Index, StateSE3, Vector3D, Vector3DIndex
+from d123.geometry import BoundingBoxSE3, BoundingBoxSE3Index, EulerStateSE3, Vector3D, Vector3DIndex
from d123.geometry.utils.constants import DEFAULT_PITCH, DEFAULT_ROLL
check_dependencies(["nuplan", "sqlalchemy"], "nuplan")
@@ -366,7 +366,7 @@ def _extract_detections(lidar_pc: LidarPc) -> Tuple[List[List[float]], List[List
for lidar_box in lidar_pc.lidar_boxes:
lidar_box: LidarBox
- center = StateSE3(
+ center = EulerStateSE3(
x=lidar_box.x,
y=lidar_box.y,
z=lidar_box.z,
@@ -390,7 +390,7 @@ def _extract_ego_state(lidar_pc: LidarPc) -> List[float]:
vehicle_parameters = get_nuplan_chrysler_pacifica_parameters()
# vehicle_parameters = get_pacifica_parameters()
- rear_axle_pose = StateSE3(
+ rear_axle_pose = EulerStateSE3(
x=lidar_pc.ego_pose.x,
y=lidar_pc.ego_pose.y,
z=lidar_pc.ego_pose.z,
diff --git a/d123/dataset/dataset_specific/wopd/wopd_data_converter.py b/d123/dataset/dataset_specific/wopd/wopd_data_converter.py
index 3e577a04..1fd26b6b 100644
--- a/d123/dataset/dataset_specific/wopd/wopd_data_converter.py
+++ b/d123/dataset/dataset_specific/wopd/wopd_data_converter.py
@@ -26,8 +26,8 @@
from d123.dataset.dataset_specific.wopd.waymo_map_utils.wopd_map_utils import convert_wopd_map
from d123.dataset.dataset_specific.wopd.wopd_utils import parse_range_image_and_camera_projection
from d123.dataset.logs.log_metadata import LogMetadata
-from d123.geometry import BoundingBoxSE3Index, Point3D, StateSE3, Vector3D, Vector3DIndex
-from d123.geometry.transform.transform_se3 import convert_relative_to_absolute_se3_array
+from d123.geometry import BoundingBoxSE3Index, Point3D, EulerStateSE3, Vector3D, Vector3DIndex
+from d123.geometry.transform.transform_euler_se3 import convert_relative_to_absolute_euler_se3_array
from d123.geometry.utils.constants import DEFAULT_PITCH, DEFAULT_ROLL
check_dependencies(modules=["tensorflow", "waymo_open_dataset"], optional_name="waymo")
@@ -389,12 +389,12 @@ def _write_recording_table(
write_arrow_table(recording_table, log_file_path)
-def _get_ego_pose_se3(frame: dataset_pb2.Frame) -> StateSE3:
+def _get_ego_pose_se3(frame: dataset_pb2.Frame) -> EulerStateSE3:
ego_pose_matrix = np.array(frame.pose.transform).reshape(4, 4)
yaw, pitch, roll = Quaternion(matrix=ego_pose_matrix[:3, :3]).yaw_pitch_roll
ego_point_3d = Point3D.from_array(ego_pose_matrix[:3, 3])
- return StateSE3(x=ego_point_3d.x, y=ego_point_3d.y, z=ego_point_3d.z, roll=roll, pitch=pitch, yaw=yaw)
+ return EulerStateSE3(x=ego_point_3d.x, y=ego_point_3d.y, z=ego_point_3d.z, roll=roll, pitch=pitch, yaw=yaw)
def _extract_detections(frame: dataset_pb2.Frame) -> Tuple[List[List[float]], List[List[float]], List[str], List[int]]:
@@ -434,7 +434,7 @@ def _extract_detections(frame: dataset_pb2.Frame) -> Tuple[List[List[float]], Li
detections_token.append(str(detection.id))
detections_types.append(int(WOPD_DETECTION_NAME_DICT[detection.type]))
- detections_state[:, BoundingBoxSE3Index.STATE_SE3] = convert_relative_to_absolute_se3_array(
+ detections_state[:, BoundingBoxSE3Index.STATE_SE3] = convert_relative_to_absolute_euler_se3_array(
origin=ego_rear_axle, se3_array=detections_state[:, BoundingBoxSE3Index.STATE_SE3]
)
if DETECTION_ROLL_PITCH == "ego":
@@ -490,7 +490,7 @@ def _extract_camera(
transform = np.array(calibration.extrinsic.transform).reshape(4, 4)
# FIXME: This is an ugly hack to convert to uniform camera convention.
- flip_camera = StateSE3(
+ flip_camera = EulerStateSE3(
x=0.0,
y=0.0,
z=0.0,
diff --git a/d123/geometry/__init__.py b/d123/geometry/__init__.py
index ca210677..ec44efd5 100644
--- a/d123/geometry/__init__.py
+++ b/d123/geometry/__init__.py
@@ -7,12 +7,12 @@
Point2DIndex,
Point3DIndex,
StateSE2Index,
- StateSE3Index,
+ EulerStateSE3Index,
Vector2DIndex,
Vector3DIndex,
)
from d123.geometry.occupancy_map import OccupancyMap2D
from d123.geometry.point import Point2D, Point3D
from d123.geometry.polyline import Polyline2D, Polyline3D, PolylineSE2
-from d123.geometry.se import StateSE2, StateSE3
+from d123.geometry.se import StateSE2, EulerStateSE3
from d123.geometry.vector import Vector2D, Vector3D
diff --git a/d123/geometry/bounding_box.py b/d123/geometry/bounding_box.py
index 459e56bf..6ace64e7 100644
--- a/d123/geometry/bounding_box.py
+++ b/d123/geometry/bounding_box.py
@@ -11,7 +11,7 @@
from d123.common.utils.mixin import ArrayMixin
from d123.geometry.geometry_index import BoundingBoxSE2Index, BoundingBoxSE3Index, Corners2DIndex, Corners3DIndex
from d123.geometry.point import Point2D, Point3D
-from d123.geometry.se import StateSE2, StateSE3
+from d123.geometry.se import StateSE2, EulerStateSE3
from d123.geometry.utils.bounding_box_utils import bbse2_array_to_corners_array, bbse3_array_to_corners_array
@@ -152,7 +152,7 @@ class BoundingBoxSE3(ArrayMixin):
_array: npt.NDArray[np.float64]
- def __init__(self, center: StateSE3, length: float, width: float, height: float):
+ def __init__(self, center: EulerStateSE3, length: float, width: float, height: float):
"""Initialize BoundingBoxSE3 with center (StateSE3), length, width and height.
:param center: Center of the bounding box as a StateSE3 instance.
@@ -183,15 +183,15 @@ def from_array(cls, array: npt.NDArray[np.float64], copy: bool = True) -> Boundi
return instance
@property
- def center(self) -> StateSE3:
+ def center(self) -> EulerStateSE3:
"""The center of the bounding box as a StateSE3 instance.
:return: The center of the bounding box as a StateSE3 instance.
"""
- return StateSE3.from_array(self._array[BoundingBoxSE3Index.STATE_SE3])
+ return EulerStateSE3.from_array(self._array[BoundingBoxSE3Index.STATE_SE3])
@property
- def center_se3(self) -> StateSE3:
+ def center_se3(self) -> EulerStateSE3:
"""The center of the bounding box as a StateSE3 instance.
:return: The center of the bounding box as a StateSE3 instance.
diff --git a/d123/geometry/geometry_index.py b/d123/geometry/geometry_index.py
index 93ff4f3a..aa0924db 100644
--- a/d123/geometry/geometry_index.py
+++ b/d123/geometry/geometry_index.py
@@ -104,7 +104,7 @@ def VECTOR(cls) -> slice:
return slice(cls.QX, cls.QZ + 1)
-class StateSE3Index(IntEnum):
+class EulerStateSE3Index(IntEnum):
"""
Indexes array-like representations of SE3 states (x,y,z,roll,pitch,yaw).
TODO: Use quaternions for rotation.
@@ -130,7 +130,7 @@ def EULER_ANGLES(cls) -> slice:
return slice(cls.ROLL, cls.YAW + 1)
-class QuaternionSE3Index(IntEnum):
+class StateSE3Index(IntEnum):
"""
Indexes array-like representations of SE3 states with quaternions (x,y,z,qw,qx,qy,qz).
"""
diff --git a/d123/geometry/se.py b/d123/geometry/se.py
index 2bc56da9..148868ea 100644
--- a/d123/geometry/se.py
+++ b/d123/geometry/se.py
@@ -8,7 +8,7 @@
from pyparsing import cached_property
from d123.common.utils.mixin import ArrayMixin
-from d123.geometry.geometry_index import Point3DIndex, QuaternionSE3Index, StateSE2Index, StateSE3Index
+from d123.geometry.geometry_index import Point3DIndex, StateSE3Index, StateSE2Index, EulerStateSE3Index
from d123.geometry.point import Point2D, Point3D
from d123.geometry.rotation import EulerAngles, Quaternion
@@ -106,32 +106,32 @@ def shapely_point(self) -> geom.Point:
class StateSE3(ArrayMixin):
- """
- Class to represents a 3D pose as SE3 (x, y, z, roll, pitch, yaw).
- TODO: Use quaternions for rotation representation.
+ """Class representing a quaternion in SE3 space.
+
+ TODO: Implement and replace StateSE3.
"""
_array: npt.NDArray[np.float64]
- def __init__(self, x: float, y: float, z: float, roll: float, pitch: float, yaw: float):
- """Initialize StateSE3 with x, y, z, roll, pitch, yaw coordinates."""
+ def __init__(self, x: float, y: float, z: float, qw: float, qx: float, qy: float, qz: float):
+ """Initialize QuaternionSE3 with x, y, z, qw, qx, qy, qz coordinates."""
array = np.zeros(len(StateSE3Index), dtype=np.float64)
array[StateSE3Index.X] = x
array[StateSE3Index.Y] = y
array[StateSE3Index.Z] = z
- array[StateSE3Index.ROLL] = roll
- array[StateSE3Index.PITCH] = pitch
- array[StateSE3Index.YAW] = yaw
+ array[StateSE3Index.QW] = qw
+ array[StateSE3Index.QX] = qx
+ array[StateSE3Index.QY] = qy
+ array[StateSE3Index.QZ] = qz
object.__setattr__(self, "_array", array)
@classmethod
def from_array(cls, array: npt.NDArray[np.float64], copy: bool = True) -> StateSE3:
- """Constructs a StateSE3 from a numpy array.
+ """Constructs a QuaternionSE3 from a numpy array.
- :param array: Array of shape (6,) representing the state [x, y, z, roll, pitch, yaw], indexed by \
- :class:`~d123.geometry.geometry_index.StateSE3Index`.
+ :param array: Array of shape (7,), indexed by :class:`~d123.geometry.geometry_index.QuaternionSE3Index`.
:param copy: Whether to copy the input array. Defaults to True.
- :return: A StateSE3 instance.
+ :return: A QuaternionSE3 instance.
"""
assert array.ndim == 1
assert array.shape[0] == len(StateSE3Index)
@@ -139,31 +139,9 @@ def from_array(cls, array: npt.NDArray[np.float64], copy: bool = True) -> StateS
object.__setattr__(instance, "_array", array.copy() if copy else array)
return instance
- @classmethod
- def from_transformation_matrix(cls, array: npt.NDArray[np.float64]) -> StateSE3:
- """Constructs a StateSE3 from a 4x4 transformation matrix.
-
- :param array: A 4x4 numpy array representing the transformation matrix.
- :return: A StateSE3 instance.
- """
- assert array.ndim == 2
- assert array.shape == (4, 4)
- translation = array[:3, 3]
- rotation = array[:3, :3]
- roll, pitch, yaw = EulerAngles.from_rotation_matrix(rotation)
-
- return StateSE3(
- x=translation[Point3DIndex.X],
- y=translation[Point3DIndex.Y],
- z=translation[Point3DIndex.Z],
- roll=roll,
- pitch=pitch,
- yaw=yaw,
- )
-
@property
def x(self) -> float:
- """Returns the x-coordinate of the 3D state.
+ """Returns the x-coordinate of the quaternion.
:return: The x-coordinate.
"""
@@ -171,7 +149,7 @@ def x(self) -> float:
@property
def y(self) -> float:
- """Returns the y-coordinate of the 3D state.
+ """Returns the y-coordinate of the quaternion.
:return: The y-coordinate.
"""
@@ -179,52 +157,61 @@ def y(self) -> float:
@property
def z(self) -> float:
- """Returns the z-coordinate of the 3D state.
+ """Returns the z-coordinate of the quaternion.
:return: The z-coordinate.
"""
return self._array[StateSE3Index.Z]
@property
- def roll(self) -> float:
- """Returns the roll (x-axis rotation) of the 3D state.
+ def qw(self) -> float:
+ """Returns the w-coordinate of the quaternion.
- :return: The roll angle.
+ :return: The w-coordinate.
"""
- return self._array[StateSE3Index.ROLL]
+ return self._array[StateSE3Index.QW]
@property
- def pitch(self) -> float:
- """Returns the pitch (y-axis rotation) of the 3D state.
+ def qx(self) -> float:
+ """Returns the x-coordinate of the quaternion.
- :return: The pitch angle.
+ :return: The x-coordinate.
"""
- return self._array[StateSE3Index.PITCH]
+ return self._array[StateSE3Index.QX]
@property
- def yaw(self) -> float:
- """Returns the yaw (z-axis rotation) of the 3D state.
+ def qy(self) -> float:
+ """Returns the y-coordinate of the quaternion.
- :return: The yaw angle.
+ :return: The y-coordinate.
"""
- return self._array[StateSE3Index.YAW]
+ return self._array[StateSE3Index.QY]
+
+ @property
+ def qz(self) -> float:
+ """Returns the z-coordinate of the quaternion.
+
+ :return: The z-coordinate.
+ """
+ return self._array[StateSE3Index.QZ]
@property
def array(self) -> npt.NDArray[np.float64]:
- """Returns the StateSE3 instance as a numpy array.
+ """Converts the QuaternionSE3 instance to a numpy array.
- :return: A numpy array of shape (6,), indexed by \
- :class:`~d123.geometry.geometry_index.StateSE3Index`.
+ :return: A numpy array of shape (7,), indexed by :class:`~d123.geometry.geometry_index.QuaternionSE3Index`.
"""
return self._array
@property
def state_se2(self) -> StateSE2:
- """Returns the 3D state as a 2D state by ignoring the z-axis.
+ """Returns the quaternion state as a 2D state by ignoring the z-axis.
:return: A StateSE2 instance representing the 2D projection of the 3D state.
"""
- return StateSE2(self.x, self.y, self.yaw)
+ # Convert quaternion to yaw angle
+ yaw = self.quaternion.euler_angles.yaw
+ return StateSE2(self.x, self.y, yaw)
@property
def point_3d(self) -> Point3D:
@@ -250,174 +237,143 @@ def shapely_point(self) -> geom.Point:
"""
return self.point_3d.shapely_point
- @property
- def rotation_matrix(self) -> npt.NDArray[np.float64]:
- """Returns the 3x3 rotation matrix representation of the state's orientation.
-
- :return: A 3x3 numpy array representing the rotation matrix.
- """
- return self.euler_angles.rotation_matrix
-
- @property
- def transformation_matrix(self) -> npt.NDArray[np.float64]:
- """Returns the 4x4 transformation matrix representation of the state.
-
- :return: A 4x4 numpy array representing the transformation matrix.
- """
- rotation_matrix = self.rotation_matrix
- transformation_matrix = np.eye(4, dtype=np.float64)
- transformation_matrix[:3, :3] = rotation_matrix
- transformation_matrix[:3, 3] = self.array[StateSE3Index.XYZ]
- return transformation_matrix
-
@cached_property
- def euler_angles(self) -> EulerAngles:
- return EulerAngles.from_array(self.array[StateSE3Index.EULER_ANGLES])
-
- @property
- def quaternion_se3(self) -> QuaternionSE3:
- """Returns the QuaternionSE3 representation of the state.
-
- :return: A QuaternionSE3 instance representing the quaternion.
- """
- quaternion_se3_array = np.zeros(len(QuaternionSE3Index), dtype=np.float64)
- quaternion_se3_array[QuaternionSE3Index.XYZ] = self.array[StateSE3Index.XYZ]
- quaternion_se3_array[QuaternionSE3Index.QUATERNION] = Quaternion.from_euler_angles(self.euler_angles)
-
- return QuaternionSE3.from_array(quaternion_se3_array)
-
- @property
- def quaternion(self) -> npt.NDArray[np.float64]:
+ def quaternion(self) -> Quaternion:
"""Returns the quaternion (w, x, y, z) representation of the state's orientation.
- :return: A numpy array of shape (4,) representing the quaternion.
+ :return: A Quaternion instance representing the quaternion.
"""
- raise NotImplementedError("Quaternion conversion not implemented yet.")
+ return Quaternion.from_array(self.array[StateSE3Index.QUATERNION])
- def __iter__(self) -> Iterable[float]:
- """Iterator over the state coordinates (x, y, z, roll, pitch, yaw)."""
- return iter((self.x, self.y, self.z, self.roll, self.pitch, self.yaw))
-
- def __hash__(self) -> int:
- """Hash method"""
- return hash((self.x, self.y, self.z, self.roll, self.pitch, self.yaw))
-
- def __matmul__(self, other: StateSE3) -> StateSE3:
- """Combines two SE3 states by applying the transformation of the other state to this state.
+ @property
+ def rotation_matrix(self) -> npt.NDArray[np.float64]:
+ """Returns the 3x3 rotation matrix representation of the state's orientation.
- :param other: Another StateSE3 instance representing the transformation to apply.
- :return: A new StateSE3 instance representing the combined transformation.
+ :return: A 3x3 numpy array representing the rotation matrix.
"""
- return StateSE3.from_transformation_matrix(self.transformation_matrix @ other.transformation_matrix)
-
+ return self.quaternion.rotation_matrix
-class QuaternionSE3(ArrayMixin):
- """Class representing a quaternion in SE3 space.
- TODO: Implement and replace StateSE3.
+class EulerStateSE3(ArrayMixin):
+ """
+ Class to represents a 3D pose as SE3 (x, y, z, roll, pitch, yaw).
+ TODO: Use quaternions for rotation representation.
"""
_array: npt.NDArray[np.float64]
- def __init__(self, x: float, y: float, z: float, qw: float, qx: float, qy: float, qz: float):
- """Initialize QuaternionSE3 with x, y, z, qw, qx, qy, qz coordinates."""
- array = np.zeros(len(QuaternionSE3Index), dtype=np.float64)
- array[QuaternionSE3Index.X] = x
- array[QuaternionSE3Index.Y] = y
- array[QuaternionSE3Index.Z] = z
- array[QuaternionSE3Index.QW] = qw
- array[QuaternionSE3Index.QX] = qx
- array[QuaternionSE3Index.QY] = qy
- array[QuaternionSE3Index.QZ] = qz
+ def __init__(self, x: float, y: float, z: float, roll: float, pitch: float, yaw: float):
+ """Initialize StateSE3 with x, y, z, roll, pitch, yaw coordinates."""
+ array = np.zeros(len(EulerStateSE3Index), dtype=np.float64)
+ array[EulerStateSE3Index.X] = x
+ array[EulerStateSE3Index.Y] = y
+ array[EulerStateSE3Index.Z] = z
+ array[EulerStateSE3Index.ROLL] = roll
+ array[EulerStateSE3Index.PITCH] = pitch
+ array[EulerStateSE3Index.YAW] = yaw
object.__setattr__(self, "_array", array)
@classmethod
- def from_array(cls, array: npt.NDArray[np.float64], copy: bool = True) -> QuaternionSE3:
- """Constructs a QuaternionSE3 from a numpy array.
+ def from_array(cls, array: npt.NDArray[np.float64], copy: bool = True) -> EulerStateSE3:
+ """Constructs a StateSE3 from a numpy array.
- :param array: Array of shape (7,), indexed by :class:`~d123.geometry.geometry_index.QuaternionSE3Index`.
+ :param array: Array of shape (6,) representing the state [x, y, z, roll, pitch, yaw], indexed by \
+ :class:`~d123.geometry.geometry_index.StateSE3Index`.
:param copy: Whether to copy the input array. Defaults to True.
- :return: A QuaternionSE3 instance.
+ :return: A StateSE3 instance.
"""
assert array.ndim == 1
- assert array.shape[0] == len(QuaternionSE3Index)
+ assert array.shape[0] == len(EulerStateSE3Index)
instance = object.__new__(cls)
object.__setattr__(instance, "_array", array.copy() if copy else array)
return instance
+ @classmethod
+ def from_transformation_matrix(cls, array: npt.NDArray[np.float64]) -> EulerStateSE3:
+ """Constructs a StateSE3 from a 4x4 transformation matrix.
+
+ :param array: A 4x4 numpy array representing the transformation matrix.
+ :return: A StateSE3 instance.
+ """
+ assert array.ndim == 2
+ assert array.shape == (4, 4)
+ translation = array[:3, 3]
+ rotation = array[:3, :3]
+ roll, pitch, yaw = EulerAngles.from_rotation_matrix(rotation)
+
+ return EulerStateSE3(
+ x=translation[Point3DIndex.X],
+ y=translation[Point3DIndex.Y],
+ z=translation[Point3DIndex.Z],
+ roll=roll,
+ pitch=pitch,
+ yaw=yaw,
+ )
+
@property
def x(self) -> float:
- """Returns the x-coordinate of the quaternion.
+ """Returns the x-coordinate of the 3D state.
:return: The x-coordinate.
"""
- return self._array[QuaternionSE3Index.X]
+ return self._array[EulerStateSE3Index.X]
@property
def y(self) -> float:
- """Returns the y-coordinate of the quaternion.
+ """Returns the y-coordinate of the 3D state.
:return: The y-coordinate.
"""
- return self._array[QuaternionSE3Index.Y]
+ return self._array[EulerStateSE3Index.Y]
@property
def z(self) -> float:
- """Returns the z-coordinate of the quaternion.
+ """Returns the z-coordinate of the 3D state.
:return: The z-coordinate.
"""
- return self._array[QuaternionSE3Index.Z]
+ return self._array[EulerStateSE3Index.Z]
@property
- def qw(self) -> float:
- """Returns the w-coordinate of the quaternion.
-
- :return: The w-coordinate.
- """
- return self._array[QuaternionSE3Index.QW]
-
- @property
- def qx(self) -> float:
- """Returns the x-coordinate of the quaternion.
+ def roll(self) -> float:
+ """Returns the roll (x-axis rotation) of the 3D state.
- :return: The x-coordinate.
+ :return: The roll angle.
"""
- return self._array[QuaternionSE3Index.QX]
+ return self._array[EulerStateSE3Index.ROLL]
@property
- def qy(self) -> float:
- """Returns the y-coordinate of the quaternion.
+ def pitch(self) -> float:
+ """Returns the pitch (y-axis rotation) of the 3D state.
- :return: The y-coordinate.
+ :return: The pitch angle.
"""
- return self._array[QuaternionSE3Index.QY]
+ return self._array[EulerStateSE3Index.PITCH]
@property
- def qz(self) -> float:
- """Returns the z-coordinate of the quaternion.
+ def yaw(self) -> float:
+ """Returns the yaw (z-axis rotation) of the 3D state.
- :return: The z-coordinate.
+ :return: The yaw angle.
"""
- return self._array[QuaternionSE3Index.QZ]
+ return self._array[EulerStateSE3Index.YAW]
@property
def array(self) -> npt.NDArray[np.float64]:
- """Converts the QuaternionSE3 instance to a numpy array.
+ """Returns the StateSE3 instance as a numpy array.
- :return: A numpy array of shape (7,), indexed by :class:`~d123.geometry.geometry_index.QuaternionSE3Index`.
+ :return: A numpy array of shape (6,), indexed by \
+ :class:`~d123.geometry.geometry_index.StateSE3Index`.
"""
return self._array
@property
def state_se2(self) -> StateSE2:
- """Returns the quaternion state as a 2D state by ignoring the z-axis.
+ """Returns the 3D state as a 2D state by ignoring the z-axis.
:return: A StateSE2 instance representing the 2D projection of the 3D state.
"""
- # Convert quaternion to yaw angle
- yaw = self.quaternion.euler_angles.yaw
- return StateSE2(self.x, self.y, yaw)
+ return StateSE2(self.x, self.y, self.yaw)
@property
def point_3d(self) -> Point3D:
@@ -443,18 +399,54 @@ def shapely_point(self) -> geom.Point:
"""
return self.point_3d.shapely_point
- @cached_property
- def quaternion(self) -> Quaternion:
- """Returns the quaternion (w, x, y, z) representation of the state's orientation.
-
- :return: A Quaternion instance representing the quaternion.
- """
- return Quaternion.from_array(self.array[QuaternionSE3Index.QUATERNION])
-
@property
def rotation_matrix(self) -> npt.NDArray[np.float64]:
"""Returns the 3x3 rotation matrix representation of the state's orientation.
:return: A 3x3 numpy array representing the rotation matrix.
"""
- return self.quaternion.rotation_matrix
+ return self.euler_angles.rotation_matrix
+
+ @property
+ def transformation_matrix(self) -> npt.NDArray[np.float64]:
+ """Returns the 4x4 transformation matrix representation of the state.
+
+ :return: A 4x4 numpy array representing the transformation matrix.
+ """
+ rotation_matrix = self.rotation_matrix
+ transformation_matrix = np.eye(4, dtype=np.float64)
+ transformation_matrix[:3, :3] = rotation_matrix
+ transformation_matrix[:3, 3] = self.array[EulerStateSE3Index.XYZ]
+ return transformation_matrix
+
+ @cached_property
+ def euler_angles(self) -> EulerAngles:
+ return EulerAngles.from_array(self.array[EulerStateSE3Index.EULER_ANGLES])
+
+ @property
+ def quaternion_se3(self) -> StateSE3:
+ quaternion_se3_array = np.zeros(len(StateSE3Index), dtype=np.float64)
+ quaternion_se3_array[StateSE3Index.XYZ] = self.array[EulerStateSE3Index.XYZ]
+ quaternion_se3_array[StateSE3Index.QUATERNION] = Quaternion.from_euler_angles(self.euler_angles)
+
+ return StateSE3.from_array(quaternion_se3_array)
+
+ @property
+ def quaternion(self) -> Quaternion:
+ return Quaternion.from_euler_angles(self.euler_angles)
+
+ def __iter__(self) -> Iterable[float]:
+ """Iterator over the state coordinates (x, y, z, roll, pitch, yaw)."""
+ return iter((self.x, self.y, self.z, self.roll, self.pitch, self.yaw))
+
+ def __hash__(self) -> int:
+ """Hash method"""
+ return hash((self.x, self.y, self.z, self.roll, self.pitch, self.yaw))
+
+ def __matmul__(self, other: EulerStateSE3) -> EulerStateSE3:
+ """Combines two SE3 states by applying the transformation of the other state to this state.
+
+ :param other: Another StateSE3 instance representing the transformation to apply.
+ :return: A new StateSE3 instance representing the combined transformation.
+ """
+ return EulerStateSE3.from_transformation_matrix(self.transformation_matrix @ other.transformation_matrix)
diff --git a/d123/geometry/test/test_bounding_box.py b/d123/geometry/test/test_bounding_box.py
index 545c511c..06522d7a 100644
--- a/d123/geometry/test/test_bounding_box.py
+++ b/d123/geometry/test/test_bounding_box.py
@@ -4,7 +4,7 @@
import shapely.geometry as geom
from d123.common.utils.mixin import ArrayMixin
-from d123.geometry import BoundingBoxSE2, BoundingBoxSE3, Point2D, Point3D, StateSE2, StateSE3
+from d123.geometry import BoundingBoxSE2, BoundingBoxSE3, Point2D, Point3D, StateSE2, EulerStateSE3
from d123.geometry.geometry_index import (
BoundingBoxSE2Index,
BoundingBoxSE3Index,
@@ -109,7 +109,7 @@ class TestBoundingBoxSE3(unittest.TestCase):
def setUp(self):
"""Set up test fixtures."""
- self.center = StateSE3(1.0, 2.0, 3.0, 0.1, 0.2, 0.3)
+ self.center = EulerStateSE3(1.0, 2.0, 3.0, 0.1, 0.2, 0.3)
self.length = 4.0
self.width = 2.0
self.height = 1.5
@@ -204,7 +204,7 @@ def test_array_assertions(self):
def test_zero_dimensions(self):
"""Test bounding box with zero dimensions."""
- center = StateSE3(0.0, 0.0, 0.0, 0.0, 0.0, 0.0)
+ center = EulerStateSE3(0.0, 0.0, 0.0, 0.0, 0.0, 0.0)
bbox = BoundingBoxSE3(center, 0.0, 0.0, 0.0)
self.assertEqual(bbox.length, 0.0)
self.assertEqual(bbox.width, 0.0)
diff --git a/d123/geometry/test/test_transform.py b/d123/geometry/test/test_transform.py
deleted file mode 100644
index 8c033803..00000000
--- a/d123/geometry/test/test_transform.py
+++ /dev/null
@@ -1,976 +0,0 @@
-import unittest
-
-import numpy as np
-import numpy.typing as npt
-
-from d123.geometry.geometry_index import EulerAnglesIndex, Point2DIndex, Point3DIndex, StateSE2Index, StateSE3Index
-from d123.geometry.se import StateSE2, StateSE3
-from d123.geometry.transform.transform_se2 import (
- convert_absolute_to_relative_point_2d_array,
- convert_absolute_to_relative_se2_array,
- convert_relative_to_absolute_point_2d_array,
- convert_relative_to_absolute_se2_array,
- translate_se2_along_body_frame,
- translate_se2_along_x,
- translate_se2_along_y,
- translate_se2_array_along_body_frame,
-)
-from d123.geometry.transform.transform_se3 import (
- convert_absolute_to_relative_points_3d_array,
- convert_absolute_to_relative_se3_array,
- convert_absolute_to_relative_se3_array,
- convert_relative_to_absolute_points_3d_array,
- convert_relative_to_absolute_se3_array,
- translate_se3_along_body_frame,
- translate_se3_along_x,
- translate_se3_along_y,
- translate_se3_along_z,
-)
-from d123.geometry.vector import Vector2D, Vector3D
-
-
-class TestTransformSE2(unittest.TestCase):
-
- def setUp(self):
- self.decimal = 6 # Decimal places for np.testing.assert_array_almost_equal
-
- def test_translate_se2_along_x(self) -> None:
- """Tests translating a SE2 state along the X-axis."""
- pose: StateSE2 = StateSE2.from_array(np.array([0.0, 0.0, 0.0], dtype=np.float64))
- distance: float = 1.0
- result: StateSE2 = translate_se2_along_x(pose, distance)
- expected: StateSE2 = StateSE2.from_array(np.array([1.0, 0.0, 0.0], dtype=np.float64))
- np.testing.assert_array_almost_equal(result.array, expected.array, decimal=self.decimal)
-
- def test_translate_se2_along_x_negative(self) -> None:
- """Tests translating a SE2 state along the X-axis in the negative direction."""
- pose: StateSE2 = StateSE2.from_array(np.array([1.0, 2.0, 0.0], dtype=np.float64))
- distance: float = -0.5
- result: StateSE2 = translate_se2_along_x(pose, distance)
- expected: StateSE2 = StateSE2.from_array(np.array([0.5, 2.0, 0.0], dtype=np.float64))
- np.testing.assert_array_almost_equal(result.array, expected.array, decimal=self.decimal)
-
- def test_translate_se2_along_x_with_rotation(self) -> None:
- """Tests translating a SE2 state along the X-axis with 90 degree rotation."""
- pose: StateSE2 = StateSE2.from_array(np.array([0.0, 0.0, np.pi / 2], dtype=np.float64))
- distance: float = 1.0
- result: StateSE2 = translate_se2_along_x(pose, distance)
- expected: StateSE2 = StateSE2.from_array(np.array([0.0, 1.0, np.pi / 2], dtype=np.float64))
- np.testing.assert_array_almost_equal(result.array, expected.array, decimal=self.decimal)
-
- def test_translate_se2_along_y(self) -> None:
- """Tests translating a SE2 state along the Y-axis."""
- pose: StateSE2 = StateSE2.from_array(np.array([0.0, 0.0, 0.0], dtype=np.float64))
- distance: float = 1.0
- result: StateSE2 = translate_se2_along_y(pose, distance)
- expected: StateSE2 = StateSE2.from_array(np.array([0.0, 1.0, 0.0], dtype=np.float64))
- np.testing.assert_array_almost_equal(result.array, expected.array, decimal=self.decimal)
-
- def test_translate_se2_along_y_negative(self) -> None:
- """Tests translating a SE2 state along the Y-axis in the negative direction."""
- pose: StateSE2 = StateSE2.from_array(np.array([1.0, 2.0, 0.0], dtype=np.float64))
- distance: float = -1.5
- result: StateSE2 = translate_se2_along_y(pose, distance)
- expected: StateSE2 = StateSE2.from_array(np.array([1.0, 0.5, 0.0], dtype=np.float64))
- np.testing.assert_array_almost_equal(result.array, expected.array, decimal=self.decimal)
-
- def test_translate_se2_along_y_with_rotation(self) -> None:
- """Tests translating a SE2 state along the Y-axis with -90 degree rotation."""
- pose: StateSE2 = StateSE2.from_array(np.array([0.0, 0.0, -np.pi / 2], dtype=np.float64))
- distance: float = 2.0
- result: StateSE2 = translate_se2_along_y(pose, distance)
- expected: StateSE2 = StateSE2.from_array(np.array([2.0, 0.0, -np.pi / 2], dtype=np.float64))
- np.testing.assert_array_almost_equal(result.array, expected.array, decimal=self.decimal)
-
- def test_translate_se2_along_body_frame_forward(self) -> None:
- """Tests translating a SE2 state along the body frame forward direction, with 90 degree rotation."""
- # Move 1 unit forward in the direction of yaw (pi/2 = 90 degrees = +Y direction)
- pose: StateSE2 = StateSE2.from_array(np.array([0.0, 0.0, np.pi / 2], dtype=np.float64))
- vector: Vector2D = Vector2D(1.0, 0.0)
- result: StateSE2 = translate_se2_along_body_frame(pose, vector)
- expected: StateSE2 = StateSE2.from_array(np.array([0.0, 1.0, np.pi / 2], dtype=np.float64))
- np.testing.assert_array_almost_equal(result.array, expected.array, decimal=self.decimal)
-
- def test_translate_se2_along_body_frame_backward(self) -> None:
- """Tests translating a SE2 state along the body frame backward direction."""
- pose: StateSE2 = StateSE2.from_array(np.array([0.0, 0.0, 0.0], dtype=np.float64))
- vector: Vector2D = Vector2D(-1.0, 0.0)
- result: StateSE2 = translate_se2_along_body_frame(pose, vector)
- expected: StateSE2 = StateSE2.from_array(np.array([-1.0, 0.0, 0.0], dtype=np.float64))
- np.testing.assert_array_almost_equal(result.array, expected.array, decimal=self.decimal)
-
- def test_translate_se2_along_body_frame_diagonal(self) -> None:
- """Tests translating a SE2 state along the body frame diagonal direction."""
- pose: StateSE2 = StateSE2.from_array(np.array([1.0, 0.0, np.deg2rad(45)], dtype=np.float64))
- vector: Vector2D = Vector2D(1.0, 0.0)
- result: StateSE2 = translate_se2_along_body_frame(pose, vector)
- expected: StateSE2 = StateSE2.from_array(
- np.array([1.0 + np.sqrt(2.0) / 2, 0.0 + np.sqrt(2.0) / 2, np.deg2rad(45)], dtype=np.float64)
- )
- np.testing.assert_array_almost_equal(result.array, expected.array, decimal=self.decimal)
-
- def test_translate_se2_along_body_frame_lateral(self) -> None:
- """Tests translating a SE2 state along the body frame lateral direction."""
- # Move 1 unit to the right (positive y in body frame)
- pose: StateSE2 = StateSE2.from_array(np.array([0.0, 0.0, 0.0], dtype=np.float64))
- vector: Vector2D = Vector2D(0.0, 1.0)
- result: StateSE2 = translate_se2_along_body_frame(pose, vector)
- expected: StateSE2 = StateSE2.from_array(np.array([0.0, 1.0, 0.0], dtype=np.float64))
- np.testing.assert_array_almost_equal(result.array, expected.array, decimal=self.decimal)
-
- def test_translate_se2_along_body_frame_lateral_with_rotation(self) -> None:
- """Tests translating a SE2 state along the body frame lateral direction with 90 degree rotation."""
- # Move 1 unit to the right when facing 90 degrees
- pose: StateSE2 = StateSE2.from_array(np.array([0.0, 0.0, np.pi / 2], dtype=np.float64))
- vector: Vector2D = Vector2D(0.0, 1.0)
- result: StateSE2 = translate_se2_along_body_frame(pose, vector)
- expected: StateSE2 = StateSE2.from_array(np.array([-1.0, 0.0, np.pi / 2], dtype=np.float64))
- np.testing.assert_array_almost_equal(result.array, expected.array, decimal=self.decimal)
-
- def test_translate_se2_array_along_body_frame_single_distance(self) -> None:
- """Tests translating a SE2 state array along the body frame forward direction."""
- poses: npt.NDArray[np.float64] = np.array([[0.0, 0.0, 0.0], [0.0, 0.0, np.pi / 2]], dtype=np.float64)
- distance: Vector2D = Vector2D(1.0, 0.0)
- result: npt.NDArray[np.float64] = translate_se2_array_along_body_frame(poses, distance)
- expected: npt.NDArray[np.float64] = np.array([[1.0, 0.0, 0.0], [0.0, 1.0, np.pi / 2]], dtype=np.float64)
- np.testing.assert_array_almost_equal(result, expected, decimal=self.decimal)
-
- def test_translate_se2_array_along_body_frame_multiple_distances(self) -> None:
- """Tests translating a SE2 state array along the body frame forward direction with different distances."""
- poses: npt.NDArray[np.float64] = np.array([[0.0, 0.0, 0.0], [0.0, 0.0, np.pi]], dtype=np.float64)
- distance: Vector2D = Vector2D(2.0, 0.0)
- result: npt.NDArray[np.float64] = translate_se2_array_along_body_frame(poses, distance)
- expected: npt.NDArray[np.float64] = np.array([[2.0, 0.0, 0.0], [-2.0, 0.0, np.pi]], dtype=np.float64)
- np.testing.assert_array_almost_equal(result, expected, decimal=self.decimal)
-
- def test_translate_se2_array_along_body_frame_lateral(self) -> None:
- """Tests translating a SE2 state array along the body frame lateral direction with 90 degree rotation."""
- poses: npt.NDArray[np.float64] = np.array([[0.0, 0.0, 0.0], [0.0, 0.0, np.pi / 2]], dtype=np.float64)
- distance: Vector2D = Vector2D(0.0, 1.0)
- result: npt.NDArray[np.float64] = translate_se2_array_along_body_frame(poses, distance)
- expected: npt.NDArray[np.float64] = np.array([[0.0, 1.0, 0.0], [-1.0, 0.0, np.pi / 2]], dtype=np.float64)
- np.testing.assert_array_almost_equal(result, expected, decimal=self.decimal)
-
- def test_convert_absolute_to_relative_se2_array(self) -> None:
- """Tests converting absolute SE2 poses to relative SE2 poses."""
- origin: StateSE2 = StateSE2.from_array(np.array([1.0, 1.0, 0.0], dtype=np.float64))
- absolute_poses: npt.NDArray[np.float64] = np.array([[2.0, 2.0, 0.0], [0.0, 1.0, np.pi / 2]], dtype=np.float64)
- result: npt.NDArray[np.float64] = convert_absolute_to_relative_se2_array(origin, absolute_poses)
- expected: npt.NDArray[np.float64] = np.array([[1.0, 1.0, 0.0], [-1.0, 0.0, np.pi / 2]], dtype=np.float64)
- np.testing.assert_array_almost_equal(result, expected, decimal=self.decimal)
-
- def test_convert_absolute_to_relative_se2_array_with_rotation(self) -> None:
- """Tests converting absolute SE2 poses to relative SE2 poses with 90 degree rotation."""
- reference: StateSE2 = StateSE2.from_array(np.array([0.0, 0.0, np.pi / 2], dtype=np.float64))
- absolute_poses: npt.NDArray[np.float64] = np.array([[1.0, 0.0, np.pi / 2]], dtype=np.float64)
- result: npt.NDArray[np.float64] = convert_absolute_to_relative_se2_array(reference, absolute_poses)
- expected: npt.NDArray[np.float64] = np.array([[0.0, -1.0, 0.0]], dtype=np.float64)
- np.testing.assert_array_almost_equal(result, expected, decimal=self.decimal)
-
- def test_convert_absolute_to_relative_se2_array_identity(self) -> None:
- """Tests converting absolute SE2 poses to relative SE2 poses with identity transformation."""
- reference: StateSE2 = StateSE2.from_array(np.array([0.0, 0.0, 0.0], dtype=np.float64))
- absolute_poses: npt.NDArray[np.float64] = np.array([[1.0, 2.0, np.pi / 4]], dtype=np.float64)
- result: npt.NDArray[np.float64] = convert_absolute_to_relative_se2_array(reference, absolute_poses)
- expected: npt.NDArray[np.float64] = np.array([[1.0, 2.0, np.pi / 4]], dtype=np.float64)
- np.testing.assert_array_almost_equal(result, expected, decimal=self.decimal)
-
- def test_convert_relative_to_absolute_se2_array(self) -> None:
- """Tests converting relative SE2 poses to absolute SE2 poses."""
- reference: StateSE2 = StateSE2.from_array(np.array([1.0, 1.0, 0.0], dtype=np.float64))
- relative_poses: npt.NDArray[np.float64] = np.array([[1.0, 1.0, 0.0], [-1.0, 0.0, np.pi / 2]], dtype=np.float64)
- result: npt.NDArray[np.float64] = convert_relative_to_absolute_se2_array(reference, relative_poses)
- expected: npt.NDArray[np.float64] = np.array([[2.0, 2.0, 0.0], [0.0, 1.0, np.pi / 2]], dtype=np.float64)
- np.testing.assert_array_almost_equal(result, expected, decimal=self.decimal)
-
- def test_convert_relative_to_absolute_se2_array_with_rotation(self) -> None:
- """Tests converting relative SE2 poses to absolute SE2 poses with rotation."""
- reference: StateSE2 = StateSE2.from_array(np.array([1.0, 0.0, np.pi / 2], dtype=np.float64))
- relative_poses: npt.NDArray[np.float64] = np.array([[1.0, 0.0, 0.0]], dtype=np.float64)
- result: npt.NDArray[np.float64] = convert_relative_to_absolute_se2_array(reference, relative_poses)
- expected: npt.NDArray[np.float64] = np.array([[1.0, 1.0, np.pi / 2]], dtype=np.float64)
- np.testing.assert_array_almost_equal(result, expected, decimal=self.decimal)
-
- def test_convert_absolute_to_relative_point_2d_array(self) -> None:
- """Tests converting absolute 2D points to relative 2D points."""
- reference: StateSE2 = StateSE2.from_array(np.array([1.0, 1.0, 0.0], dtype=np.float64))
- absolute_points: npt.NDArray[np.float64] = np.array([[2.0, 2.0], [0.0, 1.0]], dtype=np.float64)
- result: npt.NDArray[np.float64] = convert_absolute_to_relative_point_2d_array(reference, absolute_points)
- expected: npt.NDArray[np.float64] = np.array([[1.0, 1.0], [-1.0, 0.0]], dtype=np.float64)
- np.testing.assert_array_almost_equal(result, expected, decimal=self.decimal)
-
- def test_convert_absolute_to_relative_point_2d_array_with_rotation(self) -> None:
- """Tests converting absolute 2D points to relative 2D points with 90 degree rotation."""
- reference: StateSE2 = StateSE2.from_array(np.array([0.0, 0.0, np.pi / 2], dtype=np.float64))
- absolute_points: npt.NDArray[np.float64] = np.array([[0.0, 1.0], [1.0, 0.0]], dtype=np.float64)
- result: npt.NDArray[np.float64] = convert_absolute_to_relative_point_2d_array(reference, absolute_points)
- expected: npt.NDArray[np.float64] = np.array([[1.0, 0.0], [0.0, -1.0]], dtype=np.float64)
- np.testing.assert_array_almost_equal(result, expected, decimal=self.decimal)
-
- def test_convert_absolute_to_relative_point_2d_array_empty(self) -> None:
- """Tests converting an empty array of absolute 2D points to relative 2D points."""
- reference: StateSE2 = StateSE2.from_array(np.array([1.0, 1.0, 0.0], dtype=np.float64))
- absolute_points: npt.NDArray[np.float64] = np.array([], dtype=np.float64).reshape(0, 2)
- result: npt.NDArray[np.float64] = convert_absolute_to_relative_point_2d_array(reference, absolute_points)
- expected: npt.NDArray[np.float64] = np.array([], dtype=np.float64).reshape(0, 2)
- np.testing.assert_array_almost_equal(result, expected, decimal=self.decimal)
-
- def test_convert_relative_to_absolute_point_2d_array(self) -> None:
- """Tests converting relative 2D points to absolute 2D points."""
- reference: StateSE2 = StateSE2.from_array(np.array([1.0, 1.0, 0.0], dtype=np.float64))
- relative_points: npt.NDArray[np.float64] = np.array([[1.0, 1.0], [-1.0, 0.0]], dtype=np.float64)
- result: npt.NDArray[np.float64] = convert_relative_to_absolute_point_2d_array(reference, relative_points)
- expected: npt.NDArray[np.float64] = np.array([[2.0, 2.0], [0.0, 1.0]], dtype=np.float64)
- np.testing.assert_array_almost_equal(result, expected, decimal=self.decimal)
-
- def test_convert_relative_to_absolute_point_2d_array_with_rotation(self) -> None:
- """Tests converting relative 2D points to absolute 2D points with 90 degree rotation."""
- reference: StateSE2 = StateSE2.from_array(np.array([1.0, 0.0, np.pi / 2], dtype=np.float64))
- relative_points: npt.NDArray[np.float64] = np.array([[1.0, 0.0], [0.0, 1.0]], dtype=np.float64)
- result: npt.NDArray[np.float64] = convert_relative_to_absolute_point_2d_array(reference, relative_points)
- expected: npt.NDArray[np.float64] = np.array([[1.0, 1.0], [0.0, 0.0]], dtype=np.float64)
- np.testing.assert_array_almost_equal(result, expected, decimal=self.decimal)
-
-
-class TestTransformSE3(unittest.TestCase):
-
- def setUp(self):
- self.decimal = 6 # Decimal places for np.testing.assert_array_almost_equal
- self.num_consistency_tests = 10 # Number of random test cases for consistency checks
-
- def test_translate_se3_along_x(self) -> None:
- """Tests translating a SE3 state along the body frame forward direction."""
- pose: StateSE3 = StateSE3.from_array(np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float64))
- distance: float = 1.0
- result: StateSE3 = translate_se3_along_x(pose, distance)
- expected: StateSE3 = StateSE3.from_array(np.array([1.0, 0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float64))
- np.testing.assert_array_almost_equal(result.array, expected.array)
-
- def test_translate_se3_along_x_negative(self) -> None:
- """Tests translating a SE3 state along the body frame backward direction."""
- pose: StateSE3 = StateSE3.from_array(np.array([1.0, 2.0, 3.0, 0.0, 0.0, 0.0], dtype=np.float64))
- distance: float = -0.5
- result: StateSE3 = translate_se3_along_x(pose, distance)
- expected: StateSE3 = StateSE3.from_array(np.array([0.5, 2.0, 3.0, 0.0, 0.0, 0.0], dtype=np.float64))
- np.testing.assert_array_almost_equal(result.array, expected.array)
-
- def test_translate_se3_along_x_with_rotation(self) -> None:
- """Tests translating a SE3 state along the body frame forward direction with yaw rotation."""
- pose: StateSE3 = StateSE3.from_array(np.array([0.0, 0.0, 0.0, 0.0, 0.0, np.pi / 2], dtype=np.float64))
- distance: float = 2.5
- result: StateSE3 = translate_se3_along_x(pose, distance)
- expected: StateSE3 = StateSE3.from_array(np.array([0.0, 2.5, 0.0, 0.0, 0.0, np.pi / 2], dtype=np.float64))
- np.testing.assert_array_almost_equal(result.array, expected.array)
-
- def test_translate_se3_along_y(self) -> None:
- """Tests translating a SE3 state along the body frame lateral direction."""
- pose: StateSE3 = StateSE3.from_array(np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float64))
- distance: float = 1.0
- result: StateSE3 = translate_se3_along_y(pose, distance)
- expected: StateSE3 = StateSE3.from_array(np.array([0.0, 1.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float64))
- np.testing.assert_array_almost_equal(result.array, expected.array)
-
- def test_translate_se3_along_y_with_existing_position(self) -> None:
- """Tests translating a SE3 state along the body frame lateral direction with existing position."""
- pose: StateSE3 = StateSE3.from_array(np.array([1.0, 2.0, 3.0, 0.0, 0.0, 0.0], dtype=np.float64))
- distance: float = 2.5
- result: StateSE3 = translate_se3_along_y(pose, distance)
- expected: StateSE3 = StateSE3.from_array(np.array([1.0, 4.5, 3.0, 0.0, 0.0, 0.0], dtype=np.float64))
- np.testing.assert_array_almost_equal(result.array, expected.array)
-
- def test_translate_se3_along_y_negative(self) -> None:
- """Tests translating a SE3 state along the body frame lateral direction in the negative direction."""
- pose: StateSE3 = StateSE3.from_array(np.array([1.0, 2.0, 3.0, 0.0, 0.0, 0.0], dtype=np.float64))
- distance: float = -1.0
- result: StateSE3 = translate_se3_along_y(pose, distance)
- expected: StateSE3 = StateSE3.from_array(np.array([1.0, 1.0, 3.0, 0.0, 0.0, 0.0], dtype=np.float64))
- np.testing.assert_array_almost_equal(result.array, expected.array)
-
- def test_translate_se3_along_y_with_rotation(self) -> None:
- """Tests translating a SE3 state along the body frame lateral direction with roll rotation."""
- pose: StateSE3 = StateSE3.from_array(np.array([1.0, 2.0, 3.0, np.pi / 2, 0.0, 0.0], dtype=np.float64))
- distance: float = -1.0
- result: StateSE3 = translate_se3_along_y(pose, distance)
- expected: StateSE3 = StateSE3.from_array(np.array([1.0, 2.0, 2.0, np.pi / 2, 0.0, 0.0], dtype=np.float64))
- np.testing.assert_array_almost_equal(result.array, expected.array)
-
- def test_translate_se3_along_z(self) -> None:
- """Tests translating a SE3 state along the body frame vertical direction."""
- pose: StateSE3 = StateSE3.from_array(np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float64))
- distance: float = 1.0
- result: StateSE3 = translate_se3_along_z(pose, distance)
- expected: StateSE3 = StateSE3.from_array(np.array([0.0, 0.0, 1.0, 0.0, 0.0, 0.0], dtype=np.float64))
- np.testing.assert_array_almost_equal(result.array, expected.array)
-
- def test_translate_se3_along_z_large_distance(self) -> None:
- """Tests translating a SE3 state along the body frame vertical direction with a large distance."""
- pose: StateSE3 = StateSE3.from_array(np.array([0.0, 0.0, 5.0, 0.0, 0.0, 0.0], dtype=np.float64))
- distance: float = 10.0
- result: StateSE3 = translate_se3_along_z(pose, distance)
- expected: StateSE3 = StateSE3.from_array(np.array([0.0, 0.0, 15.0, 0.0, 0.0, 0.0], dtype=np.float64))
- np.testing.assert_array_almost_equal(result.array, expected.array)
-
- def test_translate_se3_along_z_negative(self) -> None:
- """Tests translating a SE3 state along the body frame vertical direction in the negative direction."""
- pose: StateSE3 = StateSE3.from_array(np.array([1.0, 2.0, 5.0, 0.0, 0.0, 0.0], dtype=np.float64))
- distance: float = -2.0
- result: StateSE3 = translate_se3_along_z(pose, distance)
- expected: StateSE3 = StateSE3.from_array(np.array([1.0, 2.0, 3.0, 0.0, 0.0, 0.0], dtype=np.float64))
- np.testing.assert_array_almost_equal(result.array, expected.array)
-
- def test_translate_se3_along_z_with_rotation(self) -> None:
- """Tests translating a SE3 state along the body frame vertical direction with pitch rotation."""
- pose: StateSE3 = StateSE3.from_array(np.array([1.0, 2.0, 3.0, 0.0, np.pi / 2, 0.0], dtype=np.float64))
- distance: float = 2.0
- result: StateSE3 = translate_se3_along_z(pose, distance)
- expected: StateSE3 = StateSE3.from_array(np.array([3.0, 2.0, 3.0, 0.0, np.pi / 2, 0.0], dtype=np.float64))
- np.testing.assert_array_almost_equal(result.array, expected.array)
-
- def test_translate_se3_along_body_frame(self) -> None:
- """Tests translating a SE3 state along the body frame forward direction."""
- pose: StateSE3 = StateSE3.from_array(np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float64))
- translation: Vector3D = Vector3D.from_array(np.array([1.0, 0.0, 0.0], dtype=np.float64))
- result: StateSE3 = translate_se3_along_body_frame(pose, translation)
- expected: StateSE3 = StateSE3.from_array(np.array([1.0, 0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float64))
- np.testing.assert_array_almost_equal(result.array, expected.array)
-
- def test_translate_se3_along_body_frame_multiple_axes(self) -> None:
- """Tests translating a SE3 state along the body frame in multiple axes."""
- pose: StateSE3 = StateSE3.from_array(np.array([1.0, 2.0, 3.0, 0.0, 0.0, 0.0], dtype=np.float64))
- translation: Vector3D = Vector3D.from_array(np.array([0.5, -1.0, 2.0], dtype=np.float64))
- result: StateSE3 = translate_se3_along_body_frame(pose, translation)
- expected: StateSE3 = StateSE3.from_array(np.array([1.5, 1.0, 5.0, 0.0, 0.0, 0.0], dtype=np.float64))
- np.testing.assert_array_almost_equal(result.array, expected.array)
-
- def test_translate_se3_along_body_frame_zero_translation(self) -> None:
- """Tests translating a SE3 state along the body frame with zero translation."""
- pose: StateSE3 = StateSE3.from_array(np.array([1.0, 2.0, 3.0, 0.0, 0.0, 0.0], dtype=np.float64))
- translation: Vector3D = Vector3D.from_array(np.array([0.0, 0.0, 0.0], dtype=np.float64))
- result: StateSE3 = translate_se3_along_body_frame(pose, translation)
- expected: StateSE3 = StateSE3.from_array(np.array([1.0, 2.0, 3.0, 0.0, 0.0, 0.0], dtype=np.float64))
- np.testing.assert_array_almost_equal(result.array, expected.array)
-
- def test_translate_se3_along_body_frame_with_rotation(self) -> None:
- """Tests translating a SE3 state along the body frame forward direction with yaw rotation."""
- # Rotate 90 degrees around z-axis, then translate 1 unit along body x-axis
- pose: StateSE3 = StateSE3.from_array(np.array([0.0, 0.0, 0.0, 0.0, 0.0, np.pi / 2], dtype=np.float64))
- translation: Vector3D = Vector3D.from_array(np.array([1.0, 0.0, 0.0], dtype=np.float64))
- result: StateSE3 = translate_se3_along_body_frame(pose, translation)
- # Should move in +Y direction in world frame
- expected: StateSE3 = StateSE3.from_array(np.array([0.0, 1.0, 0.0, 0.0, 0.0, np.pi / 2], dtype=np.float64))
- np.testing.assert_array_almost_equal(result.array, expected.array, decimal=self.decimal)
-
- def test_translate_se3_along_body_frame_consistency(self) -> None:
- """Tests consistency between translate_se3_along_body_frame and axis-specific translation functions."""
-
- for _ in range(self.num_consistency_tests):
- # Generate random parameters
- x_distance: float = np.random.uniform(-10.0, 10.0)
- y_distance: float = np.random.uniform(-10.0, 10.0)
- z_distance: float = np.random.uniform(-10.0, 10.0)
-
- start_x: float = np.random.uniform(-5.0, 5.0)
- start_y: float = np.random.uniform(-5.0, 5.0)
- start_z: float = np.random.uniform(-5.0, 5.0)
-
- start_roll: float = np.random.uniform(-np.pi, np.pi)
- start_pitch: float = np.random.uniform(-np.pi, np.pi)
- start_yaw: float = np.random.uniform(-np.pi, np.pi)
-
- original_pose: StateSE3 = StateSE3.from_array(
- np.array(
- [
- start_x,
- start_y,
- start_z,
- start_roll,
- start_pitch,
- start_yaw,
- ],
- dtype=np.float64,
- )
- )
-
- # x-axis translation
- translation_x: Vector3D = Vector3D.from_array(np.array([x_distance, 0.0, 0.0], dtype=np.float64))
- result_body_frame_x: StateSE3 = translate_se3_along_body_frame(original_pose, translation_x)
- result_axis_x: StateSE3 = translate_se3_along_x(original_pose, x_distance)
- np.testing.assert_array_almost_equal(result_body_frame_x.array, result_axis_x.array, decimal=self.decimal)
-
- # y-axis translation
- translation_y: Vector3D = Vector3D.from_array(np.array([0.0, y_distance, 0.0], dtype=np.float64))
- result_body_frame_y: StateSE3 = translate_se3_along_body_frame(original_pose, translation_y)
- result_axis_y: StateSE3 = translate_se3_along_y(original_pose, y_distance)
- np.testing.assert_array_almost_equal(result_body_frame_y.array, result_axis_y.array, decimal=self.decimal)
-
- # z-axis translation
- translation_z: Vector3D = Vector3D.from_array(np.array([0.0, 0.0, z_distance], dtype=np.float64))
- result_body_frame_z: StateSE3 = translate_se3_along_body_frame(original_pose, translation_z)
- result_axis_z: StateSE3 = translate_se3_along_z(original_pose, z_distance)
- np.testing.assert_array_almost_equal(result_body_frame_z.array, result_axis_z.array, decimal=self.decimal)
-
- # all axes translation
- translation_all: Vector3D = Vector3D.from_array(
- np.array([x_distance, y_distance, z_distance], dtype=np.float64)
- )
- result_body_frame_all: StateSE3 = translate_se3_along_body_frame(original_pose, translation_all)
- intermediate_pose: StateSE3 = translate_se3_along_x(original_pose, x_distance)
- intermediate_pose = translate_se3_along_y(intermediate_pose, y_distance)
- result_axis_all: StateSE3 = translate_se3_along_z(intermediate_pose, z_distance)
- np.testing.assert_array_almost_equal(
- result_body_frame_all.array, result_axis_all.array, decimal=self.decimal
- )
-
- def test_convert_absolute_to_relative_se3_array(self) -> None:
- """Tests converting absolute SE3 poses to relative SE3 poses."""
- reference: StateSE3 = StateSE3.from_array(np.array([1.0, 1.0, 1.0, 0.0, 0.0, 0.0], dtype=np.float64))
- absolute_poses: npt.NDArray[np.float64] = np.array(
- [
- [2.0, 2.0, 2.0, 0.0, 0.0, 0.0],
- [0.0, 1.0, 0.0, 0.0, 0.0, 0.0],
- ],
- dtype=np.float64,
- )
- result: npt.NDArray[np.float64] = convert_absolute_to_relative_se3_array(reference, absolute_poses)
- expected: npt.NDArray[np.float64] = np.array(
- [
- [1.0, 1.0, 1.0, 0.0, 0.0, 0.0],
- [-1.0, 0.0, -1.0, 0.0, 0.0, 0.0],
- ],
- dtype=np.float64,
- )
- np.testing.assert_array_almost_equal(result, expected)
-
- def test_convert_absolute_to_relative_se3_array_single_pose(self) -> None:
- """Tests converting a single absolute SE3 pose to a relative SE3 pose."""
- reference: StateSE3 = StateSE3.from_array(np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float64))
- absolute_poses: npt.NDArray[np.float64] = np.array([[1.0, 2.0, 3.0, 0.0, 0.0, 0.0]], dtype=np.float64)
- result: npt.NDArray[np.float64] = convert_absolute_to_relative_se3_array(reference, absolute_poses)
- expected: npt.NDArray[np.float64] = np.array([[1.0, 2.0, 3.0, 0.0, 0.0, 0.0]], dtype=np.float64)
- np.testing.assert_array_almost_equal(result, expected)
-
- def test_convert_absolute_to_relative_se3_array_with_rotation(self) -> None:
- """Tests converting absolute SE3 poses to relative SE3 poses with 90 degree yaw rotation."""
- reference: StateSE3 = StateSE3.from_array(np.array([0.0, 0.0, 0.0, 0.0, 0.0, np.pi / 2], dtype=np.float64))
- absolute_poses: npt.NDArray[np.float64] = np.array([[1.0, 0.0, 0.0, 0.0, 0.0, 0.0]], dtype=np.float64)
- result: npt.NDArray[np.float64] = convert_absolute_to_relative_se3_array(reference, absolute_poses)
- expected: npt.NDArray[np.float64] = np.array([[0.0, -1.0, 0.0, 0.0, 0.0, -np.pi / 2]], dtype=np.float64)
- np.testing.assert_array_almost_equal(result, expected, decimal=self.decimal)
-
- def test_convert_relative_to_absolute_se3_array(self) -> None:
- """Tests converting relative SE3 poses to absolute SE3 poses."""
- reference: StateSE3 = StateSE3.from_array(np.array([1.0, 1.0, 1.0, 0.0, 0.0, 0.0], dtype=np.float64))
- relative_poses: npt.NDArray[np.float64] = np.array(
- [
- [1.0, 1.0, 1.0, 0.0, 0.0, 0.0],
- [-1.0, 0.0, -1.0, 0.0, 0.0, 0.0],
- ],
- dtype=np.float64,
- )
- result: npt.NDArray[np.float64] = convert_relative_to_absolute_se3_array(reference, relative_poses)
- expected: npt.NDArray[np.float64] = np.array(
- [
- [2.0, 2.0, 2.0, 0.0, 0.0, 0.0],
- [0.0, 1.0, 0.0, 0.0, 0.0, 0.0],
- ],
- dtype=np.float64,
- )
- np.testing.assert_array_almost_equal(result, expected)
-
- def test_convert_relative_to_absolute_se3_array_with_rotation(self) -> None:
- """Tests converting relative SE3 poses to absolute SE3 poses with 90 degree yaw rotation."""
- reference: StateSE3 = StateSE3.from_array(np.array([1.0, 0.0, 0.0, 0.0, 0.0, np.pi / 2], dtype=np.float64))
- relative_poses: npt.NDArray[np.float64] = np.array([[1.0, 0.0, 0.0, 0.0, 0.0, 0.0]], dtype=np.float64)
- result: npt.NDArray[np.float64] = convert_relative_to_absolute_se3_array(reference, relative_poses)
- expected: npt.NDArray[np.float64] = np.array([[1.0, 1.0, 0.0, 0.0, 0.0, np.pi / 2]], dtype=np.float64)
- np.testing.assert_array_almost_equal(result, expected, decimal=self.decimal)
-
- def test_convert_absolute_to_relative_points_3d_array(self) -> None:
- """Tests converting absolute 3D points to relative 3D points."""
- reference: StateSE3 = StateSE3.from_array(np.array([1.0, 1.0, 1.0, 0.0, 0.0, 0.0], dtype=np.float64))
- absolute_points: npt.NDArray[np.float64] = np.array([[2.0, 2.0, 2.0], [0.0, 1.0, 0.0]], dtype=np.float64)
- result: npt.NDArray[np.float64] = convert_absolute_to_relative_points_3d_array(reference, absolute_points)
- expected: npt.NDArray[np.float64] = np.array([[1.0, 1.0, 1.0], [-1.0, 0.0, -1.0]], dtype=np.float64)
- np.testing.assert_array_almost_equal(result, expected)
-
- def test_convert_absolute_to_relative_points_3d_array_origin_reference(self) -> None:
- """Tests converting absolute 3D points to relative 3D points with origin reference."""
- reference: StateSE3 = StateSE3.from_array(np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float64))
- absolute_points: npt.NDArray[np.float64] = np.array([[1.0, 2.0, 3.0], [-1.0, -2.0, -3.0]], dtype=np.float64)
- result: npt.NDArray[np.float64] = convert_absolute_to_relative_points_3d_array(reference, absolute_points)
- expected: npt.NDArray[np.float64] = np.array([[1.0, 2.0, 3.0], [-1.0, -2.0, -3.0]], dtype=np.float64)
- np.testing.assert_array_almost_equal(result, expected)
-
- def test_convert_absolute_to_relative_points_3d_array_with_rotation(self) -> None:
- """Tests converting absolute 3D points to relative 3D points with 90 degree yaw rotation."""
- reference: StateSE3 = StateSE3.from_array(np.array([0.0, 0.0, 0.0, 0.0, 0.0, np.pi / 2], dtype=np.float64))
- absolute_points: npt.NDArray[np.float64] = np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 1.0]], dtype=np.float64)
- result: npt.NDArray[np.float64] = convert_absolute_to_relative_points_3d_array(reference, absolute_points)
- expected: npt.NDArray[np.float64] = np.array([[0.0, -1.0, 0.0], [1.0, 0.0, 1.0]], dtype=np.float64)
- np.testing.assert_array_almost_equal(result, expected, decimal=self.decimal)
-
- def test_convert_relative_to_absolute_points_3d_array(self) -> None:
- """Tests converting relative 3D points to absolute 3D points."""
- reference: StateSE3 = StateSE3.from_array(np.array([1.0, 1.0, 1.0, 0.0, 0.0, 0.0], dtype=np.float64))
- relative_points: npt.NDArray[np.float64] = np.array([[1.0, 1.0, 1.0], [-1.0, 0.0, -1.0]], dtype=np.float64)
- result: npt.NDArray[np.float64] = convert_relative_to_absolute_points_3d_array(reference, relative_points)
- expected: npt.NDArray[np.float64] = np.array([[2.0, 2.0, 2.0], [0.0, 1.0, 0.0]], dtype=np.float64)
- np.testing.assert_array_almost_equal(result, expected)
-
- def test_convert_relative_to_absolute_points_3d_array_empty(self) -> None:
- """Tests converting an empty array of relative 3D points to absolute 3D points."""
- reference: StateSE3 = StateSE3.from_array(np.array([1.0, 1.0, 1.0, 0.0, 0.0, 0.0], dtype=np.float64))
- relative_points: npt.NDArray[np.float64] = np.array([], dtype=np.float64).reshape(0, 3)
- result: npt.NDArray[np.float64] = convert_relative_to_absolute_points_3d_array(reference, relative_points)
- expected: npt.NDArray[np.float64] = np.array([], dtype=np.float64).reshape(0, 3)
- np.testing.assert_array_almost_equal(result, expected)
-
- def test_convert_relative_to_absolute_points_3d_array_with_rotation(self) -> None:
- """Tests converting relative 3D points to absolute 3D points with 90 degree yaw rotation."""
- reference: StateSE3 = StateSE3.from_array(np.array([1.0, 0.0, 0.0, 0.0, 0.0, np.pi / 2], dtype=np.float64))
- relative_points: npt.NDArray[np.float64] = np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 1.0]], dtype=np.float64)
- result: npt.NDArray[np.float64] = convert_relative_to_absolute_points_3d_array(reference, relative_points)
- expected: npt.NDArray[np.float64] = np.array([[1.0, 1.0, 0.0], [0.0, 0.0, 1.0]], dtype=np.float64)
- np.testing.assert_array_almost_equal(result, expected, decimal=self.decimal)
-
-
-class TestTransformConsistency(unittest.TestCase):
- """Tests to ensure consistency between different transformation functions."""
-
- def setUp(self):
- self.decimal = 4 # Decimal places for np.testing.assert_array_almost_equal
- self.num_consistency_tests = 10 # Number of random test cases for consistency checks
-
- self.max_pose_xyz = 100.0
- self.min_random_poses = 1
- self.max_random_poses = 20
-
- def _get_random_se2_array(self, size: int) -> npt.NDArray[np.float64]:
- """Generate a random SE2 pose"""
- random_se2_array = np.random.uniform(-self.max_pose_xyz, self.max_pose_xyz, (size, len(StateSE2Index)))
- random_se2_array[:, StateSE2Index.YAW] = np.random.uniform(-np.pi, np.pi, size) # yaw angles
- return random_se2_array
-
- def _get_random_se3_array(self, size: int) -> npt.NDArray[np.float64]:
- """Generate a random SE3 poses"""
- random_se3_array = np.zeros((size, len(StateSE3Index)), dtype=np.float64)
- random_se3_array[:, StateSE3Index.XYZ] = np.random.uniform(-self.max_pose_xyz, self.max_pose_xyz, (size, 3))
- random_se3_array[:, StateSE3Index.YAW] = np.random.uniform(-np.pi, np.pi, size)
- random_se3_array[:, StateSE3Index.PITCH] = np.random.uniform(-np.pi / 2, np.pi / 2, size)
- random_se3_array[:, StateSE3Index.ROLL] = np.random.uniform(-np.pi, np.pi, size)
-
- return random_se3_array
-
- def test_se2_absolute_relative_conversion_consistency(self) -> None:
- """Test that converting absolute->relative->absolute returns original poses"""
- for _ in range(self.num_consistency_tests):
- # Generate random reference pose
- reference = StateSE2.from_array(self._get_random_se2_array(1)[0])
-
- # Generate random absolute poses
- num_poses = np.random.randint(self.min_random_poses, self.max_random_poses)
- absolute_poses = self._get_random_se2_array(num_poses)
-
- # Convert absolute -> relative -> absolute
- relative_poses = convert_absolute_to_relative_se2_array(reference, absolute_poses)
- recovered_absolute = convert_relative_to_absolute_se2_array(reference, relative_poses)
-
- np.testing.assert_array_almost_equal(absolute_poses, recovered_absolute, decimal=self.decimal)
-
- def test_se2_points_absolute_relative_conversion_consistency(self) -> None:
- """Test that converting absolute->relative->absolute returns original points"""
- for _ in range(self.num_consistency_tests):
- # Generate random reference pose
- reference = StateSE2.from_array(self._get_random_se2_array(1)[0])
-
- # Generate random absolute points
- num_points = np.random.randint(self.min_random_poses, self.max_random_poses)
- absolute_points = self._get_random_se2_array(num_points)[:, StateSE2Index.XY]
-
- # Convert absolute -> relative -> absolute
- relative_points = convert_absolute_to_relative_point_2d_array(reference, absolute_points)
- recovered_absolute = convert_relative_to_absolute_point_2d_array(reference, relative_points)
-
- np.testing.assert_array_almost_equal(absolute_points, recovered_absolute, decimal=self.decimal)
-
- def test_se2_points_consistency(self) -> None:
- """Test whether SE2 point and pose conversions are consistent"""
- for _ in range(self.num_consistency_tests):
- # Generate random reference pose
- reference = StateSE2.from_array(self._get_random_se2_array(1)[0])
-
- # Generate random absolute points
- num_poses = np.random.randint(self.min_random_poses, self.max_random_poses)
- absolute_se2 = self._get_random_se2_array(num_poses)
-
- # Convert absolute -> relative -> absolute
- relative_se2 = convert_absolute_to_relative_se2_array(reference, absolute_se2)
- relative_points = convert_absolute_to_relative_point_2d_array(
- reference, absolute_se2[..., StateSE2Index.XY]
- )
- np.testing.assert_array_almost_equal(
- relative_se2[..., StateSE2Index.XY], relative_points, decimal=self.decimal
- )
-
- recovered_absolute_se2 = convert_relative_to_absolute_se2_array(reference, relative_se2)
- absolute_points = convert_relative_to_absolute_point_2d_array(reference, relative_points)
- np.testing.assert_array_almost_equal(
- recovered_absolute_se2[..., StateSE2Index.XY], absolute_points, decimal=self.decimal
- )
-
- def test_se2_translation_consistency(self) -> None:
- """Test that SE2 translations are consistent between different methods"""
- for _ in range(self.num_consistency_tests):
- # Generate random pose
- pose = StateSE2.from_array(self._get_random_se2_array(1)[0])
-
- # Generate random distances
- dx = np.random.uniform(-10.0, 10.0)
- dy = np.random.uniform(-10.0, 10.0)
-
- # Test x-translation consistency
- result_x_direct = translate_se2_along_x(pose, dx)
- result_x_body = translate_se2_along_body_frame(pose, Vector2D(dx, 0.0))
- np.testing.assert_array_almost_equal(result_x_direct.array, result_x_body.array, decimal=self.decimal)
-
- # Test y-translation consistency
- result_y_direct = translate_se2_along_y(pose, dy)
- result_y_body = translate_se2_along_body_frame(pose, Vector2D(0.0, dy))
- np.testing.assert_array_almost_equal(result_y_direct.array, result_y_body.array, decimal=self.decimal)
-
- # Test combined translation
- result_xy_body = translate_se2_along_body_frame(pose, Vector2D(dx, dy))
- result_xy_sequential = translate_se2_along_y(translate_se2_along_x(pose, dx), dy)
- np.testing.assert_array_almost_equal(result_xy_body.array, result_xy_sequential.array, decimal=self.decimal)
-
- def test_se3_absolute_relative_conversion_consistency(self) -> None:
- """Test that converting absolute->relative->absolute returns original poses"""
- for _ in range(self.num_consistency_tests):
- # Generate random reference pose
- reference = StateSE3.from_array(self._get_random_se3_array(1)[0])
-
- # Generate random absolute poses
- num_poses = np.random.randint(self.min_random_poses, self.max_random_poses)
- absolute_poses = self._get_random_se3_array(num_poses)
-
- # Convert absolute -> relative -> absolute
- relative_poses = convert_absolute_to_relative_se3_array(reference, absolute_poses)
- recovered_absolute = convert_relative_to_absolute_se3_array(reference, relative_poses)
-
- np.testing.assert_array_almost_equal(absolute_poses, recovered_absolute, decimal=self.decimal)
-
- def test_se3_points_absolute_relative_conversion_consistency(self) -> None:
- """Test that converting absolute->relative->absolute returns original points"""
- for _ in range(self.num_consistency_tests):
- # Generate random reference pose
- reference = StateSE3.from_array(self._get_random_se3_array(1)[0])
-
- # Generate random absolute points
- num_points = np.random.randint(self.min_random_poses, self.max_random_poses)
- absolute_points = self._get_random_se3_array(num_points)[:, StateSE3Index.XYZ]
-
- # Convert absolute -> relative -> absolute
- relative_points = convert_absolute_to_relative_points_3d_array(reference, absolute_points)
- recovered_absolute = convert_relative_to_absolute_points_3d_array(reference, relative_points)
-
- np.testing.assert_array_almost_equal(absolute_points, recovered_absolute, decimal=self.decimal)
-
- def test_se3_points_consistency(self) -> None:
- """Test whether SE3 point and pose conversions are consistent"""
- for _ in range(self.num_consistency_tests):
- # Generate random reference pose
- reference = StateSE3.from_array(self._get_random_se3_array(1)[0])
-
- # Generate random absolute points
- num_poses = np.random.randint(self.min_random_poses, self.max_random_poses)
- absolute_se3 = self._get_random_se3_array(num_poses)
-
- # Convert absolute -> relative -> absolute
- relative_se3 = convert_absolute_to_relative_se3_array(reference, absolute_se3)
- relative_points = convert_absolute_to_relative_points_3d_array(
- reference, absolute_se3[..., StateSE3Index.XYZ]
- )
- np.testing.assert_array_almost_equal(
- relative_se3[..., StateSE3Index.XYZ], relative_points, decimal=self.decimal
- )
-
- recovered_absolute_se3 = convert_relative_to_absolute_se3_array(reference, relative_se3)
- absolute_points = convert_relative_to_absolute_points_3d_array(reference, relative_points)
- np.testing.assert_array_almost_equal(
- recovered_absolute_se3[..., StateSE3Index.XYZ], absolute_points, decimal=self.decimal
- )
-
- def test_se2_se3_translation_along_body_consistency(self) -> None:
- """Test that SE2 and SE3 translations are consistent when SE3 has no z-component or rotation"""
- for _ in range(self.num_consistency_tests):
- # Create equivalent SE2 and SE3 poses (SE3 with z=0 and no rotations except yaw)
-
- pose_se2 = StateSE2.from_array(self._get_random_se2_array(1)[0])
- pose_se3 = StateSE3.from_array(
- np.array([pose_se2.x, pose_se2.y, 0.0, 0.0, 0.0, pose_se2.yaw], dtype=np.float64)
- )
-
- # Test translation along x-axis
- dx = np.random.uniform(-5.0, 5.0)
- translated_se2_x = translate_se2_along_body_frame(pose_se2, Vector2D(dx, 0.0))
- translated_se3_x = translate_se3_along_x(pose_se3, dx)
-
- np.testing.assert_array_almost_equal(
- translated_se2_x.array[StateSE2Index.XY], translated_se3_x.array[StateSE3Index.XY], decimal=self.decimal
- )
- np.testing.assert_almost_equal(
- translated_se2_x.array[StateSE2Index.YAW],
- translated_se3_x.array[StateSE3Index.YAW],
- decimal=self.decimal,
- )
-
- # Test translation along y-axis
- dy = np.random.uniform(-5.0, 5.0)
- translated_se2_y = translate_se2_along_body_frame(pose_se2, Vector2D(0.0, dy))
- translated_se3_y = translate_se3_along_y(pose_se3, dy)
-
- np.testing.assert_array_almost_equal(
- translated_se2_y.array[StateSE2Index.XY], translated_se3_y.array[StateSE3Index.XY], decimal=self.decimal
- )
- np.testing.assert_almost_equal(
- translated_se2_y.array[StateSE2Index.YAW],
- translated_se3_y.array[StateSE3Index.YAW],
- decimal=self.decimal,
- )
-
- # Test translation along x- and y-axis
- dx = np.random.uniform(-5.0, 5.0)
- dy = np.random.uniform(-5.0, 5.0)
- translated_se2_xy = translate_se2_along_body_frame(pose_se2, Vector2D(dx, dy))
- translated_se3_xy = translate_se3_along_body_frame(pose_se3, Vector3D(dx, dy, 0.0))
- np.testing.assert_array_almost_equal(
- translated_se2_xy.array[StateSE2Index.XY],
- translated_se3_xy.array[StateSE3Index.XY],
- decimal=self.decimal,
- )
- np.testing.assert_almost_equal(
- translated_se2_xy.array[StateSE2Index.YAW],
- translated_se3_xy.array[StateSE3Index.YAW],
- decimal=self.decimal,
- )
-
- def test_se2_se3_point_conversion_consistency(self) -> None:
- """Test that SE2 and SE3 point conversions are consistent for 2D points embedded in 3D"""
- for _ in range(self.num_consistency_tests):
- # Create equivalent SE2 and SE3 reference poses
- x = np.random.uniform(-10.0, 10.0)
- y = np.random.uniform(-10.0, 10.0)
- yaw = np.random.uniform(-np.pi, np.pi)
-
- reference_se2 = StateSE2.from_array(np.array([x, y, yaw], dtype=np.float64))
- reference_se3 = StateSE3.from_array(np.array([x, y, 0.0, 0.0, 0.0, yaw], dtype=np.float64))
-
- # Generate 2D points and embed them in 3D with z=0
- num_points = np.random.randint(1, 8)
- points_2d = np.random.uniform(-20.0, 20.0, (num_points, len(Point2DIndex)))
- points_3d = np.column_stack([points_2d, np.zeros(num_points)])
-
- # Convert using SE2 functions
- relative_2d = convert_absolute_to_relative_point_2d_array(reference_se2, points_2d)
- absolute_2d_recovered = convert_relative_to_absolute_point_2d_array(reference_se2, relative_2d)
-
- # Convert using SE3 functions
- relative_3d = convert_absolute_to_relative_points_3d_array(reference_se3, points_3d)
- absolute_3d_recovered = convert_relative_to_absolute_points_3d_array(reference_se3, relative_3d)
-
- # Check that SE2 and SE3 conversions are consistent for the x,y components
- np.testing.assert_array_almost_equal(relative_2d, relative_3d[:, Point3DIndex.XY], decimal=self.decimal)
- np.testing.assert_array_almost_equal(
- absolute_2d_recovered, absolute_3d_recovered[:, Point3DIndex.XY], decimal=self.decimal
- )
-
- # Check that z-components remain zero
- np.testing.assert_array_almost_equal(
- relative_3d[:, Point3DIndex.Z], np.zeros(num_points), decimal=self.decimal
- )
- np.testing.assert_array_almost_equal(
- absolute_3d_recovered[:, Point3DIndex.Z], np.zeros(num_points), decimal=self.decimal
- )
-
- def test_se2_se3_pose_conversion_consistency(self) -> None:
- """Test that SE2 and SE3 pose conversions are consistent for 2D points embedded in 3D"""
- for _ in range(self.num_consistency_tests):
- # Create equivalent SE2 and SE3 reference poses
- x = np.random.uniform(-10.0, 10.0)
- y = np.random.uniform(-10.0, 10.0)
- yaw = np.random.uniform(-np.pi, np.pi)
-
- reference_se2 = StateSE2.from_array(np.array([x, y, yaw], dtype=np.float64))
- reference_se3 = StateSE3.from_array(np.array([x, y, 0.0, 0.0, 0.0, yaw], dtype=np.float64))
-
- # Generate 2D poses and embed them in 3D with z=0 and zero roll/pitch
- num_poses = np.random.randint(1, 8)
- pose_2d = self._get_random_se2_array(num_poses)
- pose_3d = np.zeros((num_poses, len(StateSE3Index)), dtype=np.float64)
- pose_3d[:, StateSE3Index.XY] = pose_2d[:, StateSE2Index.XY]
- pose_3d[:, StateSE3Index.YAW] = pose_2d[:, StateSE2Index.YAW]
-
- # Convert using SE2 functions
- relative_se2 = convert_absolute_to_relative_se2_array(reference_se2, pose_2d)
- absolute_se2_recovered = convert_relative_to_absolute_se2_array(reference_se2, relative_se2)
-
- # Convert using SE3 functions
- relative_se3 = convert_absolute_to_relative_se3_array(reference_se3, pose_3d)
- absolute_se3_recovered = convert_relative_to_absolute_se3_array(reference_se3, relative_se3)
-
- # Check that SE2 and SE3 conversions are consistent for the x,y components
- np.testing.assert_array_almost_equal(
- relative_se2[:, StateSE2Index.XY], relative_se3[:, StateSE3Index.XY], decimal=self.decimal
- )
- np.testing.assert_array_almost_equal(
- absolute_se2_recovered[:, StateSE2Index.XY],
- absolute_se3_recovered[:, StateSE3Index.XY],
- decimal=self.decimal,
- )
- # Check that SE2 and SE3 conversions are consistent for the yaw component
- np.testing.assert_array_almost_equal(
- relative_se2[:, StateSE2Index.YAW], relative_se3[:, StateSE3Index.YAW], decimal=self.decimal
- )
- np.testing.assert_array_almost_equal(
- absolute_se2_recovered[:, StateSE2Index.YAW],
- absolute_se3_recovered[:, StateSE3Index.YAW],
- decimal=self.decimal,
- )
-
- # Check that z-components remain zero
- np.testing.assert_array_almost_equal(
- relative_se3[:, Point3DIndex.Z], np.zeros(num_poses), decimal=self.decimal
- )
- np.testing.assert_array_almost_equal(
- absolute_se3_recovered[:, Point3DIndex.Z], np.zeros(num_poses), decimal=self.decimal
- )
-
- def test_se2_array_translation_consistency(self) -> None:
- """Test that SE2 array translation is consistent with single pose translation"""
- for _ in range(self.num_consistency_tests):
- # Generate random poses
- num_poses = np.random.randint(self.min_random_poses, self.max_random_poses)
- poses_array = self._get_random_se2_array(num_poses)
-
- # Generate random translation
- dx = np.random.uniform(-5.0, 5.0)
- dy = np.random.uniform(-5.0, 5.0)
- translation = Vector2D(dx, dy)
-
- # Translate using array function
- result_array = translate_se2_array_along_body_frame(poses_array, translation)
-
- # Translate each pose individually
- result_individual = np.zeros_like(poses_array)
- for i in range(num_poses):
- pose = StateSE2.from_array(poses_array[i])
- translated = translate_se2_along_body_frame(pose, translation)
- result_individual[i] = translated.array
-
- np.testing.assert_array_almost_equal(result_array, result_individual, decimal=self.decimal)
-
- # def test_transform_empty_arrays(self) -> None:
- # """Test that transform functions handle empty arrays correctly"""
- # reference_se2 = StateSE2.from_array(np.array([1.0, 2.0, np.pi / 4], dtype=np.float64))
- # reference_se3 = StateSE3.from_array(np.array([1.0, 2.0, 3.0, 0.1, 0.2, 0.3], dtype=np.float64))
-
- # # Test SE2 empty arrays
- # empty_se2_poses = np.array([], dtype=np.float64).reshape(0, len(StateSE2Index))
- # empty_2d_points = np.array([], dtype=np.float64).reshape(0, len(Point2DIndex))
-
- # result_se2_poses = convert_absolute_to_relative_se2_array(reference_se2, empty_se2_poses)
- # result_2d_points = convert_absolute_to_relative_point_2d_array(reference_se2, empty_2d_points)
-
- # self.assertEqual(result_se2_poses.shape, (0, len(StateSE2Index)))
- # self.assertEqual(result_2d_points.shape, (0, len(Point2DIndex)))
-
- # # Test SE3 empty arrays
- # empty_se3_poses = np.array([], dtype=np.float64).reshape(0, len(StateSE3Index))
- # empty_3d_points = np.array([], dtype=np.float64).reshape(0, len(Point3DIndex))
-
- # result_se3_poses = convert_absolute_to_relative_se3_array(reference_se3, empty_se3_poses)
- # result_3d_points = convert_absolute_to_relative_points_3d_array(reference_se3, empty_3d_points)
-
- # self.assertEqual(result_se3_poses.shape, (0, len(StateSE3Index)))
- # self.assertEqual(result_3d_points.shape, (0, len(Point3DIndex)))
-
- def test_transform_identity_operations(self) -> None:
- """Test that transforms with identity reference frames work correctly"""
- # Identity SE2 pose
- identity_se2 = StateSE2.from_array(np.array([0.0, 0.0, 0.0], dtype=np.float64))
- identity_se3 = StateSE3.from_array(np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float64))
-
- for _ in range(self.num_consistency_tests):
- # Test SE2 identity transforms
- num_poses = np.random.randint(1, 10)
- se2_poses = self._get_random_se2_array(num_poses)
- se2_points = se2_poses[:, StateSE2Index.XY]
-
- relative_se2_poses = convert_absolute_to_relative_se2_array(identity_se2, se2_poses)
- relative_se2_points = convert_absolute_to_relative_point_2d_array(identity_se2, se2_points)
-
- np.testing.assert_array_almost_equal(se2_poses, relative_se2_poses, decimal=self.decimal)
- np.testing.assert_array_almost_equal(se2_points, relative_se2_points, decimal=self.decimal)
-
- # Test SE3 identity transforms
- se3_poses = self._get_random_se3_array(num_poses)
- se3_points = se3_poses[:, StateSE3Index.XYZ]
-
- relative_se3_poses = convert_absolute_to_relative_se3_array(identity_se3, se3_poses)
- relative_se3_points = convert_absolute_to_relative_points_3d_array(identity_se3, se3_points)
-
- np.testing.assert_array_almost_equal(
- se3_poses[..., StateSE3Index.EULER_ANGLES],
- relative_se3_poses[..., StateSE3Index.EULER_ANGLES],
- decimal=self.decimal,
- )
- np.testing.assert_array_almost_equal(se3_points, relative_se3_points, decimal=self.decimal)
-
- def test_transform_large_rotations(self) -> None:
- """Test transforms with large rotation angles beyond [-π, π]"""
- for _ in range(self.num_consistency_tests):
- # Create poses with large rotation angles
- large_yaw_se2 = np.random.uniform(-4 * np.pi, 4 * np.pi)
- large_euler_se3 = np.random.uniform(-4 * np.pi, 4 * np.pi, 3)
-
- reference_se2 = StateSE2.from_array(np.array([0.0, 0.0, large_yaw_se2], dtype=np.float64))
- reference_se3 = StateSE3.from_array(
- np.array([0.0, 0.0, 0.0, large_euler_se3[0], large_euler_se3[1], large_euler_se3[2]], dtype=np.float64)
- )
-
- # Generate test poses/points
- test_se2_poses = self._get_random_se2_array(5)
- test_se3_poses = self._get_random_se3_array(5)
- test_2d_points = test_se2_poses[:, StateSE2Index.XY]
- test_3d_points = test_se3_poses[:, StateSE3Index.XYZ]
-
- # Test round-trip conversions should still work
- relative_se2 = convert_absolute_to_relative_se2_array(reference_se2, test_se2_poses)
- recovered_se2 = convert_relative_to_absolute_se2_array(reference_se2, relative_se2)
-
- relative_se3 = convert_absolute_to_relative_se3_array(reference_se3, test_se3_poses)
- recovered_se3 = convert_relative_to_absolute_se3_array(reference_se3, relative_se3)
-
- relative_2d_points = convert_absolute_to_relative_point_2d_array(reference_se2, test_2d_points)
- recovered_2d_points = convert_relative_to_absolute_point_2d_array(reference_se2, relative_2d_points)
-
- relative_3d_points = convert_absolute_to_relative_points_3d_array(reference_se3, test_3d_points)
- recovered_3d_points = convert_relative_to_absolute_points_3d_array(reference_se3, relative_3d_points)
-
- # Check consistency (allowing for angle wrapping)
- np.testing.assert_array_almost_equal(
- test_se2_poses[:, StateSE2Index.XY],
- recovered_se2[:, StateSE2Index.XY],
- decimal=self.decimal,
- )
- np.testing.assert_array_almost_equal(
- test_se3_poses[:, StateSE3Index.XYZ],
- recovered_se3[:, StateSE3Index.XYZ],
- decimal=self.decimal,
- )
- np.testing.assert_array_almost_equal(test_2d_points, recovered_2d_points, decimal=self.decimal)
- np.testing.assert_array_almost_equal(test_3d_points, recovered_3d_points, decimal=self.decimal)
-
-
-if __name__ == "__main__":
- unittest.main()
diff --git a/d123/geometry/transform/__init__.py b/d123/geometry/transform/__init__.py
index 96088028..e69de29b 100644
--- a/d123/geometry/transform/__init__.py
+++ b/d123/geometry/transform/__init__.py
@@ -1,15 +0,0 @@
-from d123.geometry.transform.transform_se2 import (
- convert_absolute_to_relative_se2_array,
- convert_relative_to_absolute_se2_array,
- translate_se2_along_body_frame,
- translate_se2_along_x,
- translate_se2_along_y,
-)
-from d123.geometry.transform.transform_se3 import (
- convert_absolute_to_relative_se3_array,
- convert_relative_to_absolute_se3_array,
- translate_se3_along_body_frame,
- translate_se3_along_x,
- translate_se3_along_y,
- translate_se3_along_z,
-)
diff --git a/d123/geometry/transform/test/__init__.py b/d123/geometry/transform/test/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/d123/geometry/transform/test/test_transform_consistency.py b/d123/geometry/transform/test/test_transform_consistency.py
new file mode 100644
index 00000000..b4f73ab1
--- /dev/null
+++ b/d123/geometry/transform/test/test_transform_consistency.py
@@ -0,0 +1,475 @@
+import unittest
+
+import numpy as np
+import numpy.typing as npt
+
+from d123.geometry.geometry_index import Point2DIndex, Point3DIndex, StateSE2Index, EulerStateSE3Index
+from d123.geometry.se import StateSE2, EulerStateSE3
+from d123.geometry.transform.transform_se2 import (
+ convert_absolute_to_relative_point_2d_array,
+ convert_absolute_to_relative_se2_array,
+ convert_relative_to_absolute_point_2d_array,
+ convert_relative_to_absolute_se2_array,
+ translate_se2_along_body_frame,
+ translate_se2_along_x,
+ translate_se2_along_y,
+ translate_se2_array_along_body_frame,
+)
+from d123.geometry.transform.transform_euler_se3 import (
+ convert_absolute_to_relative_points_3d_array,
+ convert_absolute_to_relative_euler_se3_array,
+ convert_relative_to_absolute_points_3d_array,
+ convert_relative_to_absolute_euler_se3_array,
+ translate_euler_se3_along_body_frame,
+ translate_euler_se3_along_x,
+ translate_euler_se3_along_y,
+)
+from d123.geometry.vector import Vector2D, Vector3D
+
+
+class TestTransformConsistency(unittest.TestCase):
+ """Tests to ensure consistency between different transformation functions."""
+
+ def setUp(self):
+ self.decimal = 4 # Decimal places for np.testing.assert_array_almost_equal
+ self.num_consistency_tests = 10 # Number of random test cases for consistency checks
+
+ self.max_pose_xyz = 100.0
+ self.min_random_poses = 1
+ self.max_random_poses = 20
+
+ def _get_random_se2_array(self, size: int) -> npt.NDArray[np.float64]:
+ """Generate a random SE2 pose"""
+ random_se2_array = np.random.uniform(-self.max_pose_xyz, self.max_pose_xyz, (size, len(StateSE2Index)))
+ random_se2_array[:, StateSE2Index.YAW] = np.random.uniform(-np.pi, np.pi, size) # yaw angles
+ return random_se2_array
+
+ def _get_random_se3_array(self, size: int) -> npt.NDArray[np.float64]:
+ """Generate a random SE3 poses"""
+ random_se3_array = np.zeros((size, len(EulerStateSE3Index)), dtype=np.float64)
+ random_se3_array[:, EulerStateSE3Index.XYZ] = np.random.uniform(
+ -self.max_pose_xyz, self.max_pose_xyz, (size, 3)
+ )
+ random_se3_array[:, EulerStateSE3Index.YAW] = np.random.uniform(-np.pi, np.pi, size)
+ random_se3_array[:, EulerStateSE3Index.PITCH] = np.random.uniform(-np.pi / 2, np.pi / 2, size)
+ random_se3_array[:, EulerStateSE3Index.ROLL] = np.random.uniform(-np.pi, np.pi, size)
+
+ return random_se3_array
+
+ def test_se2_absolute_relative_conversion_consistency(self) -> None:
+ """Test that converting absolute->relative->absolute returns original poses"""
+ for _ in range(self.num_consistency_tests):
+ # Generate random reference pose
+ reference = StateSE2.from_array(self._get_random_se2_array(1)[0])
+
+ # Generate random absolute poses
+ num_poses = np.random.randint(self.min_random_poses, self.max_random_poses)
+ absolute_poses = self._get_random_se2_array(num_poses)
+
+ # Convert absolute -> relative -> absolute
+ relative_poses = convert_absolute_to_relative_se2_array(reference, absolute_poses)
+ recovered_absolute = convert_relative_to_absolute_se2_array(reference, relative_poses)
+
+ np.testing.assert_array_almost_equal(absolute_poses, recovered_absolute, decimal=self.decimal)
+
+ def test_se2_points_absolute_relative_conversion_consistency(self) -> None:
+ """Test that converting absolute->relative->absolute returns original points"""
+ for _ in range(self.num_consistency_tests):
+ # Generate random reference pose
+ reference = StateSE2.from_array(self._get_random_se2_array(1)[0])
+
+ # Generate random absolute points
+ num_points = np.random.randint(self.min_random_poses, self.max_random_poses)
+ absolute_points = self._get_random_se2_array(num_points)[:, StateSE2Index.XY]
+
+ # Convert absolute -> relative -> absolute
+ relative_points = convert_absolute_to_relative_point_2d_array(reference, absolute_points)
+ recovered_absolute = convert_relative_to_absolute_point_2d_array(reference, relative_points)
+
+ np.testing.assert_array_almost_equal(absolute_points, recovered_absolute, decimal=self.decimal)
+
+ def test_se2_points_consistency(self) -> None:
+ """Test whether SE2 point and pose conversions are consistent"""
+ for _ in range(self.num_consistency_tests):
+ # Generate random reference pose
+ reference = StateSE2.from_array(self._get_random_se2_array(1)[0])
+
+ # Generate random absolute points
+ num_poses = np.random.randint(self.min_random_poses, self.max_random_poses)
+ absolute_se2 = self._get_random_se2_array(num_poses)
+
+ # Convert absolute -> relative -> absolute
+ relative_se2 = convert_absolute_to_relative_se2_array(reference, absolute_se2)
+ relative_points = convert_absolute_to_relative_point_2d_array(
+ reference, absolute_se2[..., StateSE2Index.XY]
+ )
+ np.testing.assert_array_almost_equal(
+ relative_se2[..., StateSE2Index.XY], relative_points, decimal=self.decimal
+ )
+
+ recovered_absolute_se2 = convert_relative_to_absolute_se2_array(reference, relative_se2)
+ absolute_points = convert_relative_to_absolute_point_2d_array(reference, relative_points)
+ np.testing.assert_array_almost_equal(
+ recovered_absolute_se2[..., StateSE2Index.XY], absolute_points, decimal=self.decimal
+ )
+
+ def test_se2_translation_consistency(self) -> None:
+ """Test that SE2 translations are consistent between different methods"""
+ for _ in range(self.num_consistency_tests):
+ # Generate random pose
+ pose = StateSE2.from_array(self._get_random_se2_array(1)[0])
+
+ # Generate random distances
+ dx = np.random.uniform(-10.0, 10.0)
+ dy = np.random.uniform(-10.0, 10.0)
+
+ # Test x-translation consistency
+ result_x_direct = translate_se2_along_x(pose, dx)
+ result_x_body = translate_se2_along_body_frame(pose, Vector2D(dx, 0.0))
+ np.testing.assert_array_almost_equal(result_x_direct.array, result_x_body.array, decimal=self.decimal)
+
+ # Test y-translation consistency
+ result_y_direct = translate_se2_along_y(pose, dy)
+ result_y_body = translate_se2_along_body_frame(pose, Vector2D(0.0, dy))
+ np.testing.assert_array_almost_equal(result_y_direct.array, result_y_body.array, decimal=self.decimal)
+
+ # Test combined translation
+ result_xy_body = translate_se2_along_body_frame(pose, Vector2D(dx, dy))
+ result_xy_sequential = translate_se2_along_y(translate_se2_along_x(pose, dx), dy)
+ np.testing.assert_array_almost_equal(result_xy_body.array, result_xy_sequential.array, decimal=self.decimal)
+
+ def test_se3_absolute_relative_conversion_consistency(self) -> None:
+ """Test that converting absolute->relative->absolute returns original poses"""
+ for _ in range(self.num_consistency_tests):
+ # Generate random reference pose
+ reference = EulerStateSE3.from_array(self._get_random_se3_array(1)[0])
+
+ # Generate random absolute poses
+ num_poses = np.random.randint(self.min_random_poses, self.max_random_poses)
+ absolute_poses = self._get_random_se3_array(num_poses)
+
+ # Convert absolute -> relative -> absolute
+ relative_poses = convert_absolute_to_relative_euler_se3_array(reference, absolute_poses)
+ recovered_absolute = convert_relative_to_absolute_euler_se3_array(reference, relative_poses)
+
+ np.testing.assert_array_almost_equal(absolute_poses, recovered_absolute, decimal=self.decimal)
+
+ def test_se3_points_absolute_relative_conversion_consistency(self) -> None:
+ """Test that converting absolute->relative->absolute returns original points"""
+ for _ in range(self.num_consistency_tests):
+ # Generate random reference pose
+ reference = EulerStateSE3.from_array(self._get_random_se3_array(1)[0])
+
+ # Generate random absolute points
+ num_points = np.random.randint(self.min_random_poses, self.max_random_poses)
+ absolute_points = self._get_random_se3_array(num_points)[:, EulerStateSE3Index.XYZ]
+
+ # Convert absolute -> relative -> absolute
+ relative_points = convert_absolute_to_relative_points_3d_array(reference, absolute_points)
+ recovered_absolute = convert_relative_to_absolute_points_3d_array(reference, relative_points)
+
+ np.testing.assert_array_almost_equal(absolute_points, recovered_absolute, decimal=self.decimal)
+
+ def test_se3_points_consistency(self) -> None:
+ """Test whether SE3 point and pose conversions are consistent"""
+ for _ in range(self.num_consistency_tests):
+ # Generate random reference pose
+ reference = EulerStateSE3.from_array(self._get_random_se3_array(1)[0])
+
+ # Generate random absolute points
+ num_poses = np.random.randint(self.min_random_poses, self.max_random_poses)
+ absolute_se3 = self._get_random_se3_array(num_poses)
+
+ # Convert absolute -> relative -> absolute
+ relative_se3 = convert_absolute_to_relative_euler_se3_array(reference, absolute_se3)
+ relative_points = convert_absolute_to_relative_points_3d_array(
+ reference, absolute_se3[..., EulerStateSE3Index.XYZ]
+ )
+ np.testing.assert_array_almost_equal(
+ relative_se3[..., EulerStateSE3Index.XYZ], relative_points, decimal=self.decimal
+ )
+
+ recovered_absolute_se3 = convert_relative_to_absolute_euler_se3_array(reference, relative_se3)
+ absolute_points = convert_relative_to_absolute_points_3d_array(reference, relative_points)
+ np.testing.assert_array_almost_equal(
+ recovered_absolute_se3[..., EulerStateSE3Index.XYZ], absolute_points, decimal=self.decimal
+ )
+
+ def test_se2_se3_translation_along_body_consistency(self) -> None:
+ """Test that SE2 and SE3 translations are consistent when SE3 has no z-component or rotation"""
+ for _ in range(self.num_consistency_tests):
+ # Create equivalent SE2 and SE3 poses (SE3 with z=0 and no rotations except yaw)
+
+ pose_se2 = StateSE2.from_array(self._get_random_se2_array(1)[0])
+ pose_se3 = EulerStateSE3.from_array(
+ np.array([pose_se2.x, pose_se2.y, 0.0, 0.0, 0.0, pose_se2.yaw], dtype=np.float64)
+ )
+
+ # Test translation along x-axis
+ dx = np.random.uniform(-5.0, 5.0)
+ translated_se2_x = translate_se2_along_body_frame(pose_se2, Vector2D(dx, 0.0))
+ translated_se3_x = translate_euler_se3_along_x(pose_se3, dx)
+
+ np.testing.assert_array_almost_equal(
+ translated_se2_x.array[StateSE2Index.XY],
+ translated_se3_x.array[EulerStateSE3Index.XY],
+ decimal=self.decimal,
+ )
+ np.testing.assert_almost_equal(
+ translated_se2_x.array[StateSE2Index.YAW],
+ translated_se3_x.array[EulerStateSE3Index.YAW],
+ decimal=self.decimal,
+ )
+
+ # Test translation along y-axis
+ dy = np.random.uniform(-5.0, 5.0)
+ translated_se2_y = translate_se2_along_body_frame(pose_se2, Vector2D(0.0, dy))
+ translated_se3_y = translate_euler_se3_along_y(pose_se3, dy)
+
+ np.testing.assert_array_almost_equal(
+ translated_se2_y.array[StateSE2Index.XY],
+ translated_se3_y.array[EulerStateSE3Index.XY],
+ decimal=self.decimal,
+ )
+ np.testing.assert_almost_equal(
+ translated_se2_y.array[StateSE2Index.YAW],
+ translated_se3_y.array[EulerStateSE3Index.YAW],
+ decimal=self.decimal,
+ )
+
+ # Test translation along x- and y-axis
+ dx = np.random.uniform(-5.0, 5.0)
+ dy = np.random.uniform(-5.0, 5.0)
+ translated_se2_xy = translate_se2_along_body_frame(pose_se2, Vector2D(dx, dy))
+ translated_se3_xy = translate_euler_se3_along_body_frame(pose_se3, Vector3D(dx, dy, 0.0))
+ np.testing.assert_array_almost_equal(
+ translated_se2_xy.array[StateSE2Index.XY],
+ translated_se3_xy.array[EulerStateSE3Index.XY],
+ decimal=self.decimal,
+ )
+ np.testing.assert_almost_equal(
+ translated_se2_xy.array[StateSE2Index.YAW],
+ translated_se3_xy.array[EulerStateSE3Index.YAW],
+ decimal=self.decimal,
+ )
+
+ def test_se2_se3_point_conversion_consistency(self) -> None:
+ """Test that SE2 and SE3 point conversions are consistent for 2D points embedded in 3D"""
+ for _ in range(self.num_consistency_tests):
+ # Create equivalent SE2 and SE3 reference poses
+ x = np.random.uniform(-10.0, 10.0)
+ y = np.random.uniform(-10.0, 10.0)
+ yaw = np.random.uniform(-np.pi, np.pi)
+
+ reference_se2 = StateSE2.from_array(np.array([x, y, yaw], dtype=np.float64))
+ reference_se3 = EulerStateSE3.from_array(np.array([x, y, 0.0, 0.0, 0.0, yaw], dtype=np.float64))
+
+ # Generate 2D points and embed them in 3D with z=0
+ num_points = np.random.randint(1, 8)
+ points_2d = np.random.uniform(-20.0, 20.0, (num_points, len(Point2DIndex)))
+ points_3d = np.column_stack([points_2d, np.zeros(num_points)])
+
+ # Convert using SE2 functions
+ relative_2d = convert_absolute_to_relative_point_2d_array(reference_se2, points_2d)
+ absolute_2d_recovered = convert_relative_to_absolute_point_2d_array(reference_se2, relative_2d)
+
+ # Convert using SE3 functions
+ relative_3d = convert_absolute_to_relative_points_3d_array(reference_se3, points_3d)
+ absolute_3d_recovered = convert_relative_to_absolute_points_3d_array(reference_se3, relative_3d)
+
+ # Check that SE2 and SE3 conversions are consistent for the x,y components
+ np.testing.assert_array_almost_equal(relative_2d, relative_3d[:, Point3DIndex.XY], decimal=self.decimal)
+ np.testing.assert_array_almost_equal(
+ absolute_2d_recovered, absolute_3d_recovered[:, Point3DIndex.XY], decimal=self.decimal
+ )
+
+ # Check that z-components remain zero
+ np.testing.assert_array_almost_equal(
+ relative_3d[:, Point3DIndex.Z], np.zeros(num_points), decimal=self.decimal
+ )
+ np.testing.assert_array_almost_equal(
+ absolute_3d_recovered[:, Point3DIndex.Z], np.zeros(num_points), decimal=self.decimal
+ )
+
+ def test_se2_se3_pose_conversion_consistency(self) -> None:
+ """Test that SE2 and SE3 pose conversions are consistent for 2D points embedded in 3D"""
+ for _ in range(self.num_consistency_tests):
+ # Create equivalent SE2 and SE3 reference poses
+ x = np.random.uniform(-10.0, 10.0)
+ y = np.random.uniform(-10.0, 10.0)
+ yaw = np.random.uniform(-np.pi, np.pi)
+
+ reference_se2 = StateSE2.from_array(np.array([x, y, yaw], dtype=np.float64))
+ reference_se3 = EulerStateSE3.from_array(np.array([x, y, 0.0, 0.0, 0.0, yaw], dtype=np.float64))
+
+ # Generate 2D poses and embed them in 3D with z=0 and zero roll/pitch
+ num_poses = np.random.randint(1, 8)
+ pose_2d = self._get_random_se2_array(num_poses)
+ pose_3d = np.zeros((num_poses, len(EulerStateSE3Index)), dtype=np.float64)
+ pose_3d[:, EulerStateSE3Index.XY] = pose_2d[:, StateSE2Index.XY]
+ pose_3d[:, EulerStateSE3Index.YAW] = pose_2d[:, StateSE2Index.YAW]
+
+ # Convert using SE2 functions
+ relative_se2 = convert_absolute_to_relative_se2_array(reference_se2, pose_2d)
+ absolute_se2_recovered = convert_relative_to_absolute_se2_array(reference_se2, relative_se2)
+
+ # Convert using SE3 functions
+ relative_se3 = convert_absolute_to_relative_euler_se3_array(reference_se3, pose_3d)
+ absolute_se3_recovered = convert_relative_to_absolute_euler_se3_array(reference_se3, relative_se3)
+
+ # Check that SE2 and SE3 conversions are consistent for the x,y components
+ np.testing.assert_array_almost_equal(
+ relative_se2[:, StateSE2Index.XY], relative_se3[:, EulerStateSE3Index.XY], decimal=self.decimal
+ )
+ np.testing.assert_array_almost_equal(
+ absolute_se2_recovered[:, StateSE2Index.XY],
+ absolute_se3_recovered[:, EulerStateSE3Index.XY],
+ decimal=self.decimal,
+ )
+ # Check that SE2 and SE3 conversions are consistent for the yaw component
+ np.testing.assert_array_almost_equal(
+ relative_se2[:, StateSE2Index.YAW], relative_se3[:, EulerStateSE3Index.YAW], decimal=self.decimal
+ )
+ np.testing.assert_array_almost_equal(
+ absolute_se2_recovered[:, StateSE2Index.YAW],
+ absolute_se3_recovered[:, EulerStateSE3Index.YAW],
+ decimal=self.decimal,
+ )
+
+ # Check that z-components remain zero
+ np.testing.assert_array_almost_equal(
+ relative_se3[:, Point3DIndex.Z], np.zeros(num_poses), decimal=self.decimal
+ )
+ np.testing.assert_array_almost_equal(
+ absolute_se3_recovered[:, Point3DIndex.Z], np.zeros(num_poses), decimal=self.decimal
+ )
+
+ def test_se2_array_translation_consistency(self) -> None:
+ """Test that SE2 array translation is consistent with single pose translation"""
+ for _ in range(self.num_consistency_tests):
+ # Generate random poses
+ num_poses = np.random.randint(self.min_random_poses, self.max_random_poses)
+ poses_array = self._get_random_se2_array(num_poses)
+
+ # Generate random translation
+ dx = np.random.uniform(-5.0, 5.0)
+ dy = np.random.uniform(-5.0, 5.0)
+ translation = Vector2D(dx, dy)
+
+ # Translate using array function
+ result_array = translate_se2_array_along_body_frame(poses_array, translation)
+
+ # Translate each pose individually
+ result_individual = np.zeros_like(poses_array)
+ for i in range(num_poses):
+ pose = StateSE2.from_array(poses_array[i])
+ translated = translate_se2_along_body_frame(pose, translation)
+ result_individual[i] = translated.array
+
+ np.testing.assert_array_almost_equal(result_array, result_individual, decimal=self.decimal)
+
+ def test_transform_empty_arrays(self) -> None:
+ """Test that transform functions handle empty arrays correctly"""
+ reference_se2 = StateSE2.from_array(np.array([1.0, 2.0, np.pi / 4], dtype=np.float64))
+ reference_se3 = EulerStateSE3.from_array(np.array([1.0, 2.0, 3.0, 0.1, 0.2, 0.3], dtype=np.float64))
+
+ # Test SE2 empty arrays
+ empty_se2_poses = np.array([], dtype=np.float64).reshape(0, len(StateSE2Index))
+ empty_2d_points = np.array([], dtype=np.float64).reshape(0, len(Point2DIndex))
+
+ result_se2_poses = convert_absolute_to_relative_se2_array(reference_se2, empty_se2_poses)
+ result_2d_points = convert_absolute_to_relative_point_2d_array(reference_se2, empty_2d_points)
+
+ self.assertEqual(result_se2_poses.shape, (0, len(StateSE2Index)))
+ self.assertEqual(result_2d_points.shape, (0, len(Point2DIndex)))
+
+ # Test SE3 empty arrays
+ empty_se3_poses = np.array([], dtype=np.float64).reshape(0, len(EulerStateSE3Index))
+ empty_3d_points = np.array([], dtype=np.float64).reshape(0, len(Point3DIndex))
+
+ result_se3_poses = convert_absolute_to_relative_euler_se3_array(reference_se3, empty_se3_poses)
+ result_3d_points = convert_absolute_to_relative_points_3d_array(reference_se3, empty_3d_points)
+
+ self.assertEqual(result_se3_poses.shape, (0, len(EulerStateSE3Index)))
+ self.assertEqual(result_3d_points.shape, (0, len(Point3DIndex)))
+
+ def test_transform_identity_operations(self) -> None:
+ """Test that transforms with identity reference frames work correctly"""
+ # Identity SE2 pose
+ identity_se2 = StateSE2.from_array(np.array([0.0, 0.0, 0.0], dtype=np.float64))
+ identity_se3 = EulerStateSE3.from_array(np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float64))
+
+ for _ in range(self.num_consistency_tests):
+ # Test SE2 identity transforms
+ num_poses = np.random.randint(1, 10)
+ se2_poses = self._get_random_se2_array(num_poses)
+ se2_points = se2_poses[:, StateSE2Index.XY]
+
+ relative_se2_poses = convert_absolute_to_relative_se2_array(identity_se2, se2_poses)
+ relative_se2_points = convert_absolute_to_relative_point_2d_array(identity_se2, se2_points)
+
+ np.testing.assert_array_almost_equal(se2_poses, relative_se2_poses, decimal=self.decimal)
+ np.testing.assert_array_almost_equal(se2_points, relative_se2_points, decimal=self.decimal)
+
+ # Test SE3 identity transforms
+ se3_poses = self._get_random_se3_array(num_poses)
+ se3_points = se3_poses[:, EulerStateSE3Index.XYZ]
+
+ relative_se3_poses = convert_absolute_to_relative_euler_se3_array(identity_se3, se3_poses)
+ relative_se3_points = convert_absolute_to_relative_points_3d_array(identity_se3, se3_points)
+
+ np.testing.assert_array_almost_equal(
+ se3_poses[..., EulerStateSE3Index.EULER_ANGLES],
+ relative_se3_poses[..., EulerStateSE3Index.EULER_ANGLES],
+ decimal=self.decimal,
+ )
+ np.testing.assert_array_almost_equal(se3_points, relative_se3_points, decimal=self.decimal)
+
+ def test_transform_large_rotations(self) -> None:
+ """Test transforms with large rotation angles beyond [-π, π]"""
+ for _ in range(self.num_consistency_tests):
+ # Create poses with large rotation angles
+ large_yaw_se2 = np.random.uniform(-4 * np.pi, 4 * np.pi)
+ large_euler_se3 = np.random.uniform(-4 * np.pi, 4 * np.pi, 3)
+
+ reference_se2 = StateSE2.from_array(np.array([0.0, 0.0, large_yaw_se2], dtype=np.float64))
+ reference_se3 = EulerStateSE3.from_array(
+ np.array([0.0, 0.0, 0.0, large_euler_se3[0], large_euler_se3[1], large_euler_se3[2]], dtype=np.float64)
+ )
+
+ # Generate test poses/points
+ test_se2_poses = self._get_random_se2_array(5)
+ test_se3_poses = self._get_random_se3_array(5)
+ test_2d_points = test_se2_poses[:, StateSE2Index.XY]
+ test_3d_points = test_se3_poses[:, EulerStateSE3Index.XYZ]
+
+ # Test round-trip conversions should still work
+ relative_se2 = convert_absolute_to_relative_se2_array(reference_se2, test_se2_poses)
+ recovered_se2 = convert_relative_to_absolute_se2_array(reference_se2, relative_se2)
+
+ relative_se3 = convert_absolute_to_relative_euler_se3_array(reference_se3, test_se3_poses)
+ recovered_se3 = convert_relative_to_absolute_euler_se3_array(reference_se3, relative_se3)
+
+ relative_2d_points = convert_absolute_to_relative_point_2d_array(reference_se2, test_2d_points)
+ recovered_2d_points = convert_relative_to_absolute_point_2d_array(reference_se2, relative_2d_points)
+
+ relative_3d_points = convert_absolute_to_relative_points_3d_array(reference_se3, test_3d_points)
+ recovered_3d_points = convert_relative_to_absolute_points_3d_array(reference_se3, relative_3d_points)
+
+ # Check consistency (allowing for angle wrapping)
+ np.testing.assert_array_almost_equal(
+ test_se2_poses[:, StateSE2Index.XY],
+ recovered_se2[:, StateSE2Index.XY],
+ decimal=self.decimal,
+ )
+ np.testing.assert_array_almost_equal(
+ test_se3_poses[:, EulerStateSE3Index.XYZ],
+ recovered_se3[:, EulerStateSE3Index.XYZ],
+ decimal=self.decimal,
+ )
+ np.testing.assert_array_almost_equal(test_2d_points, recovered_2d_points, decimal=self.decimal)
+ np.testing.assert_array_almost_equal(test_3d_points, recovered_3d_points, decimal=self.decimal)
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/d123/geometry/transform/test/test_transform_euler_se3.py b/d123/geometry/transform/test/test_transform_euler_se3.py
new file mode 100644
index 00000000..b40ab4c7
--- /dev/null
+++ b/d123/geometry/transform/test/test_transform_euler_se3.py
@@ -0,0 +1,335 @@
+import unittest
+
+import numpy as np
+import numpy.typing as npt
+
+from d123.geometry.se import EulerStateSE3
+from d123.geometry.transform.transform_euler_se3 import (
+ convert_absolute_to_relative_points_3d_array,
+ convert_absolute_to_relative_euler_se3_array,
+ convert_relative_to_absolute_points_3d_array,
+ convert_relative_to_absolute_euler_se3_array,
+ translate_euler_se3_along_body_frame,
+ translate_euler_se3_along_x,
+ translate_euler_se3_along_y,
+ translate_euler_se3_along_z,
+)
+from d123.geometry.vector import Vector3D
+
+
+class TestTransformEulerSE3(unittest.TestCase):
+
+ def setUp(self):
+ self.decimal = 6 # Decimal places for np.testing.assert_array_almost_equal
+ self.num_consistency_tests = 10 # Number of random test cases for consistency checks
+
+ def test_translate_se3_along_x(self) -> None:
+ """Tests translating a SE3 state along the body frame forward direction."""
+ pose: EulerStateSE3 = EulerStateSE3.from_array(np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float64))
+ distance: float = 1.0
+ result: EulerStateSE3 = translate_euler_se3_along_x(pose, distance)
+ expected: EulerStateSE3 = EulerStateSE3.from_array(np.array([1.0, 0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float64))
+ np.testing.assert_array_almost_equal(result.array, expected.array)
+
+ def test_translate_se3_along_x_negative(self) -> None:
+ """Tests translating a SE3 state along the body frame backward direction."""
+ pose: EulerStateSE3 = EulerStateSE3.from_array(np.array([1.0, 2.0, 3.0, 0.0, 0.0, 0.0], dtype=np.float64))
+ distance: float = -0.5
+ result: EulerStateSE3 = translate_euler_se3_along_x(pose, distance)
+ expected: EulerStateSE3 = EulerStateSE3.from_array(np.array([0.5, 2.0, 3.0, 0.0, 0.0, 0.0], dtype=np.float64))
+ np.testing.assert_array_almost_equal(result.array, expected.array)
+
+ def test_translate_se3_along_x_with_rotation(self) -> None:
+ """Tests translating a SE3 state along the body frame forward direction with yaw rotation."""
+ pose: EulerStateSE3 = EulerStateSE3.from_array(np.array([0.0, 0.0, 0.0, 0.0, 0.0, np.pi / 2], dtype=np.float64))
+ distance: float = 2.5
+ result: EulerStateSE3 = translate_euler_se3_along_x(pose, distance)
+ expected: EulerStateSE3 = EulerStateSE3.from_array(
+ np.array([0.0, 2.5, 0.0, 0.0, 0.0, np.pi / 2], dtype=np.float64)
+ )
+ np.testing.assert_array_almost_equal(result.array, expected.array)
+
+ def test_translate_se3_along_y(self) -> None:
+ """Tests translating a SE3 state along the body frame lateral direction."""
+ pose: EulerStateSE3 = EulerStateSE3.from_array(np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float64))
+ distance: float = 1.0
+ result: EulerStateSE3 = translate_euler_se3_along_y(pose, distance)
+ expected: EulerStateSE3 = EulerStateSE3.from_array(np.array([0.0, 1.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float64))
+ np.testing.assert_array_almost_equal(result.array, expected.array)
+
+ def test_translate_se3_along_y_with_existing_position(self) -> None:
+ """Tests translating a SE3 state along the body frame lateral direction with existing position."""
+ pose: EulerStateSE3 = EulerStateSE3.from_array(np.array([1.0, 2.0, 3.0, 0.0, 0.0, 0.0], dtype=np.float64))
+ distance: float = 2.5
+ result: EulerStateSE3 = translate_euler_se3_along_y(pose, distance)
+ expected: EulerStateSE3 = EulerStateSE3.from_array(np.array([1.0, 4.5, 3.0, 0.0, 0.0, 0.0], dtype=np.float64))
+ np.testing.assert_array_almost_equal(result.array, expected.array)
+
+ def test_translate_se3_along_y_negative(self) -> None:
+ """Tests translating a SE3 state along the body frame lateral direction in the negative direction."""
+ pose: EulerStateSE3 = EulerStateSE3.from_array(np.array([1.0, 2.0, 3.0, 0.0, 0.0, 0.0], dtype=np.float64))
+ distance: float = -1.0
+ result: EulerStateSE3 = translate_euler_se3_along_y(pose, distance)
+ expected: EulerStateSE3 = EulerStateSE3.from_array(np.array([1.0, 1.0, 3.0, 0.0, 0.0, 0.0], dtype=np.float64))
+ np.testing.assert_array_almost_equal(result.array, expected.array)
+
+ def test_translate_se3_along_y_with_rotation(self) -> None:
+ """Tests translating a SE3 state along the body frame lateral direction with roll rotation."""
+ pose: EulerStateSE3 = EulerStateSE3.from_array(np.array([1.0, 2.0, 3.0, np.pi / 2, 0.0, 0.0], dtype=np.float64))
+ distance: float = -1.0
+ result: EulerStateSE3 = translate_euler_se3_along_y(pose, distance)
+ expected: EulerStateSE3 = EulerStateSE3.from_array(
+ np.array([1.0, 2.0, 2.0, np.pi / 2, 0.0, 0.0], dtype=np.float64)
+ )
+ np.testing.assert_array_almost_equal(result.array, expected.array)
+
+ def test_translate_se3_along_z(self) -> None:
+ """Tests translating a SE3 state along the body frame vertical direction."""
+ pose: EulerStateSE3 = EulerStateSE3.from_array(np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float64))
+ distance: float = 1.0
+ result: EulerStateSE3 = translate_euler_se3_along_z(pose, distance)
+ expected: EulerStateSE3 = EulerStateSE3.from_array(np.array([0.0, 0.0, 1.0, 0.0, 0.0, 0.0], dtype=np.float64))
+ np.testing.assert_array_almost_equal(result.array, expected.array)
+
+ def test_translate_se3_along_z_large_distance(self) -> None:
+ """Tests translating a SE3 state along the body frame vertical direction with a large distance."""
+ pose: EulerStateSE3 = EulerStateSE3.from_array(np.array([0.0, 0.0, 5.0, 0.0, 0.0, 0.0], dtype=np.float64))
+ distance: float = 10.0
+ result: EulerStateSE3 = translate_euler_se3_along_z(pose, distance)
+ expected: EulerStateSE3 = EulerStateSE3.from_array(np.array([0.0, 0.0, 15.0, 0.0, 0.0, 0.0], dtype=np.float64))
+ np.testing.assert_array_almost_equal(result.array, expected.array)
+
+ def test_translate_se3_along_z_negative(self) -> None:
+ """Tests translating a SE3 state along the body frame vertical direction in the negative direction."""
+ pose: EulerStateSE3 = EulerStateSE3.from_array(np.array([1.0, 2.0, 5.0, 0.0, 0.0, 0.0], dtype=np.float64))
+ distance: float = -2.0
+ result: EulerStateSE3 = translate_euler_se3_along_z(pose, distance)
+ expected: EulerStateSE3 = EulerStateSE3.from_array(np.array([1.0, 2.0, 3.0, 0.0, 0.0, 0.0], dtype=np.float64))
+ np.testing.assert_array_almost_equal(result.array, expected.array)
+
+ def test_translate_se3_along_z_with_rotation(self) -> None:
+ """Tests translating a SE3 state along the body frame vertical direction with pitch rotation."""
+ pose: EulerStateSE3 = EulerStateSE3.from_array(np.array([1.0, 2.0, 3.0, 0.0, np.pi / 2, 0.0], dtype=np.float64))
+ distance: float = 2.0
+ result: EulerStateSE3 = translate_euler_se3_along_z(pose, distance)
+ expected: EulerStateSE3 = EulerStateSE3.from_array(
+ np.array([3.0, 2.0, 3.0, 0.0, np.pi / 2, 0.0], dtype=np.float64)
+ )
+ np.testing.assert_array_almost_equal(result.array, expected.array)
+
+ def test_translate_se3_along_body_frame(self) -> None:
+ """Tests translating a SE3 state along the body frame forward direction."""
+ pose: EulerStateSE3 = EulerStateSE3.from_array(np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float64))
+ translation: Vector3D = Vector3D.from_array(np.array([1.0, 0.0, 0.0], dtype=np.float64))
+ result: EulerStateSE3 = translate_euler_se3_along_body_frame(pose, translation)
+ expected: EulerStateSE3 = EulerStateSE3.from_array(np.array([1.0, 0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float64))
+ np.testing.assert_array_almost_equal(result.array, expected.array)
+
+ def test_translate_se3_along_body_frame_multiple_axes(self) -> None:
+ """Tests translating a SE3 state along the body frame in multiple axes."""
+ pose: EulerStateSE3 = EulerStateSE3.from_array(np.array([1.0, 2.0, 3.0, 0.0, 0.0, 0.0], dtype=np.float64))
+ translation: Vector3D = Vector3D.from_array(np.array([0.5, -1.0, 2.0], dtype=np.float64))
+ result: EulerStateSE3 = translate_euler_se3_along_body_frame(pose, translation)
+ expected: EulerStateSE3 = EulerStateSE3.from_array(np.array([1.5, 1.0, 5.0, 0.0, 0.0, 0.0], dtype=np.float64))
+ np.testing.assert_array_almost_equal(result.array, expected.array)
+
+ def test_translate_se3_along_body_frame_zero_translation(self) -> None:
+ """Tests translating a SE3 state along the body frame with zero translation."""
+ pose: EulerStateSE3 = EulerStateSE3.from_array(np.array([1.0, 2.0, 3.0, 0.0, 0.0, 0.0], dtype=np.float64))
+ translation: Vector3D = Vector3D.from_array(np.array([0.0, 0.0, 0.0], dtype=np.float64))
+ result: EulerStateSE3 = translate_euler_se3_along_body_frame(pose, translation)
+ expected: EulerStateSE3 = EulerStateSE3.from_array(np.array([1.0, 2.0, 3.0, 0.0, 0.0, 0.0], dtype=np.float64))
+ np.testing.assert_array_almost_equal(result.array, expected.array)
+
+ def test_translate_se3_along_body_frame_with_rotation(self) -> None:
+ """Tests translating a SE3 state along the body frame forward direction with yaw rotation."""
+ # Rotate 90 degrees around z-axis, then translate 1 unit along body x-axis
+ pose: EulerStateSE3 = EulerStateSE3.from_array(np.array([0.0, 0.0, 0.0, 0.0, 0.0, np.pi / 2], dtype=np.float64))
+ translation: Vector3D = Vector3D.from_array(np.array([1.0, 0.0, 0.0], dtype=np.float64))
+ result: EulerStateSE3 = translate_euler_se3_along_body_frame(pose, translation)
+ # Should move in +Y direction in world frame
+ expected: EulerStateSE3 = EulerStateSE3.from_array(
+ np.array([0.0, 1.0, 0.0, 0.0, 0.0, np.pi / 2], dtype=np.float64)
+ )
+ np.testing.assert_array_almost_equal(result.array, expected.array, decimal=self.decimal)
+
+ def test_translate_se3_along_body_frame_consistency(self) -> None:
+ """Tests consistency between translate_se3_along_body_frame and axis-specific translation functions."""
+
+ for _ in range(self.num_consistency_tests):
+ # Generate random parameters
+ x_distance: float = np.random.uniform(-10.0, 10.0)
+ y_distance: float = np.random.uniform(-10.0, 10.0)
+ z_distance: float = np.random.uniform(-10.0, 10.0)
+
+ start_x: float = np.random.uniform(-5.0, 5.0)
+ start_y: float = np.random.uniform(-5.0, 5.0)
+ start_z: float = np.random.uniform(-5.0, 5.0)
+
+ start_roll: float = np.random.uniform(-np.pi, np.pi)
+ start_pitch: float = np.random.uniform(-np.pi, np.pi)
+ start_yaw: float = np.random.uniform(-np.pi, np.pi)
+
+ original_pose: EulerStateSE3 = EulerStateSE3.from_array(
+ np.array(
+ [
+ start_x,
+ start_y,
+ start_z,
+ start_roll,
+ start_pitch,
+ start_yaw,
+ ],
+ dtype=np.float64,
+ )
+ )
+
+ # x-axis translation
+ translation_x: Vector3D = Vector3D.from_array(np.array([x_distance, 0.0, 0.0], dtype=np.float64))
+ result_body_frame_x: EulerStateSE3 = translate_euler_se3_along_body_frame(original_pose, translation_x)
+ result_axis_x: EulerStateSE3 = translate_euler_se3_along_x(original_pose, x_distance)
+ np.testing.assert_array_almost_equal(result_body_frame_x.array, result_axis_x.array, decimal=self.decimal)
+
+ # y-axis translation
+ translation_y: Vector3D = Vector3D.from_array(np.array([0.0, y_distance, 0.0], dtype=np.float64))
+ result_body_frame_y: EulerStateSE3 = translate_euler_se3_along_body_frame(original_pose, translation_y)
+ result_axis_y: EulerStateSE3 = translate_euler_se3_along_y(original_pose, y_distance)
+ np.testing.assert_array_almost_equal(result_body_frame_y.array, result_axis_y.array, decimal=self.decimal)
+
+ # z-axis translation
+ translation_z: Vector3D = Vector3D.from_array(np.array([0.0, 0.0, z_distance], dtype=np.float64))
+ result_body_frame_z: EulerStateSE3 = translate_euler_se3_along_body_frame(original_pose, translation_z)
+ result_axis_z: EulerStateSE3 = translate_euler_se3_along_z(original_pose, z_distance)
+ np.testing.assert_array_almost_equal(result_body_frame_z.array, result_axis_z.array, decimal=self.decimal)
+
+ # all axes translation
+ translation_all: Vector3D = Vector3D.from_array(
+ np.array([x_distance, y_distance, z_distance], dtype=np.float64)
+ )
+ result_body_frame_all: EulerStateSE3 = translate_euler_se3_along_body_frame(original_pose, translation_all)
+ intermediate_pose: EulerStateSE3 = translate_euler_se3_along_x(original_pose, x_distance)
+ intermediate_pose = translate_euler_se3_along_y(intermediate_pose, y_distance)
+ result_axis_all: EulerStateSE3 = translate_euler_se3_along_z(intermediate_pose, z_distance)
+ np.testing.assert_array_almost_equal(
+ result_body_frame_all.array, result_axis_all.array, decimal=self.decimal
+ )
+
+ def test_convert_absolute_to_relative_se3_array(self) -> None:
+ """Tests converting absolute SE3 poses to relative SE3 poses."""
+ reference: EulerStateSE3 = EulerStateSE3.from_array(np.array([1.0, 1.0, 1.0, 0.0, 0.0, 0.0], dtype=np.float64))
+ absolute_poses: npt.NDArray[np.float64] = np.array(
+ [
+ [2.0, 2.0, 2.0, 0.0, 0.0, 0.0],
+ [0.0, 1.0, 0.0, 0.0, 0.0, 0.0],
+ ],
+ dtype=np.float64,
+ )
+ result: npt.NDArray[np.float64] = convert_absolute_to_relative_euler_se3_array(reference, absolute_poses)
+ expected: npt.NDArray[np.float64] = np.array(
+ [
+ [1.0, 1.0, 1.0, 0.0, 0.0, 0.0],
+ [-1.0, 0.0, -1.0, 0.0, 0.0, 0.0],
+ ],
+ dtype=np.float64,
+ )
+ np.testing.assert_array_almost_equal(result, expected)
+
+ def test_convert_absolute_to_relative_se3_array_single_pose(self) -> None:
+ """Tests converting a single absolute SE3 pose to a relative SE3 pose."""
+ reference: EulerStateSE3 = EulerStateSE3.from_array(np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float64))
+ absolute_poses: npt.NDArray[np.float64] = np.array([[1.0, 2.0, 3.0, 0.0, 0.0, 0.0]], dtype=np.float64)
+ result: npt.NDArray[np.float64] = convert_absolute_to_relative_euler_se3_array(reference, absolute_poses)
+ expected: npt.NDArray[np.float64] = np.array([[1.0, 2.0, 3.0, 0.0, 0.0, 0.0]], dtype=np.float64)
+ np.testing.assert_array_almost_equal(result, expected)
+
+ def test_convert_absolute_to_relative_se3_array_with_rotation(self) -> None:
+ """Tests converting absolute SE3 poses to relative SE3 poses with 90 degree yaw rotation."""
+ reference: EulerStateSE3 = EulerStateSE3.from_array(
+ np.array([0.0, 0.0, 0.0, 0.0, 0.0, np.pi / 2], dtype=np.float64)
+ )
+ absolute_poses: npt.NDArray[np.float64] = np.array([[1.0, 0.0, 0.0, 0.0, 0.0, 0.0]], dtype=np.float64)
+ result: npt.NDArray[np.float64] = convert_absolute_to_relative_euler_se3_array(reference, absolute_poses)
+ expected: npt.NDArray[np.float64] = np.array([[0.0, -1.0, 0.0, 0.0, 0.0, -np.pi / 2]], dtype=np.float64)
+ np.testing.assert_array_almost_equal(result, expected, decimal=self.decimal)
+
+ def test_convert_relative_to_absolute_se3_array(self) -> None:
+ """Tests converting relative SE3 poses to absolute SE3 poses."""
+ reference: EulerStateSE3 = EulerStateSE3.from_array(np.array([1.0, 1.0, 1.0, 0.0, 0.0, 0.0], dtype=np.float64))
+ relative_poses: npt.NDArray[np.float64] = np.array(
+ [
+ [1.0, 1.0, 1.0, 0.0, 0.0, 0.0],
+ [-1.0, 0.0, -1.0, 0.0, 0.0, 0.0],
+ ],
+ dtype=np.float64,
+ )
+ result: npt.NDArray[np.float64] = convert_relative_to_absolute_euler_se3_array(reference, relative_poses)
+ expected: npt.NDArray[np.float64] = np.array(
+ [
+ [2.0, 2.0, 2.0, 0.0, 0.0, 0.0],
+ [0.0, 1.0, 0.0, 0.0, 0.0, 0.0],
+ ],
+ dtype=np.float64,
+ )
+ np.testing.assert_array_almost_equal(result, expected)
+
+ def test_convert_relative_to_absolute_se3_array_with_rotation(self) -> None:
+ """Tests converting relative SE3 poses to absolute SE3 poses with 90 degree yaw rotation."""
+ reference: EulerStateSE3 = EulerStateSE3.from_array(
+ np.array([1.0, 0.0, 0.0, 0.0, 0.0, np.pi / 2], dtype=np.float64)
+ )
+ relative_poses: npt.NDArray[np.float64] = np.array([[1.0, 0.0, 0.0, 0.0, 0.0, 0.0]], dtype=np.float64)
+ result: npt.NDArray[np.float64] = convert_relative_to_absolute_euler_se3_array(reference, relative_poses)
+ expected: npt.NDArray[np.float64] = np.array([[1.0, 1.0, 0.0, 0.0, 0.0, np.pi / 2]], dtype=np.float64)
+ np.testing.assert_array_almost_equal(result, expected, decimal=self.decimal)
+
+ def test_convert_absolute_to_relative_points_3d_array(self) -> None:
+ """Tests converting absolute 3D points to relative 3D points."""
+ reference: EulerStateSE3 = EulerStateSE3.from_array(np.array([1.0, 1.0, 1.0, 0.0, 0.0, 0.0], dtype=np.float64))
+ absolute_points: npt.NDArray[np.float64] = np.array([[2.0, 2.0, 2.0], [0.0, 1.0, 0.0]], dtype=np.float64)
+ result: npt.NDArray[np.float64] = convert_absolute_to_relative_points_3d_array(reference, absolute_points)
+ expected: npt.NDArray[np.float64] = np.array([[1.0, 1.0, 1.0], [-1.0, 0.0, -1.0]], dtype=np.float64)
+ np.testing.assert_array_almost_equal(result, expected)
+
+ def test_convert_absolute_to_relative_points_3d_array_origin_reference(self) -> None:
+ """Tests converting absolute 3D points to relative 3D points with origin reference."""
+ reference: EulerStateSE3 = EulerStateSE3.from_array(np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0], dtype=np.float64))
+ absolute_points: npt.NDArray[np.float64] = np.array([[1.0, 2.0, 3.0], [-1.0, -2.0, -3.0]], dtype=np.float64)
+ result: npt.NDArray[np.float64] = convert_absolute_to_relative_points_3d_array(reference, absolute_points)
+ expected: npt.NDArray[np.float64] = np.array([[1.0, 2.0, 3.0], [-1.0, -2.0, -3.0]], dtype=np.float64)
+ np.testing.assert_array_almost_equal(result, expected)
+
+ def test_convert_absolute_to_relative_points_3d_array_with_rotation(self) -> None:
+ """Tests converting absolute 3D points to relative 3D points with 90 degree yaw rotation."""
+ reference: EulerStateSE3 = EulerStateSE3.from_array(
+ np.array([0.0, 0.0, 0.0, 0.0, 0.0, np.pi / 2], dtype=np.float64)
+ )
+ absolute_points: npt.NDArray[np.float64] = np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 1.0]], dtype=np.float64)
+ result: npt.NDArray[np.float64] = convert_absolute_to_relative_points_3d_array(reference, absolute_points)
+ expected: npt.NDArray[np.float64] = np.array([[0.0, -1.0, 0.0], [1.0, 0.0, 1.0]], dtype=np.float64)
+ np.testing.assert_array_almost_equal(result, expected, decimal=self.decimal)
+
+ def test_convert_relative_to_absolute_points_3d_array(self) -> None:
+ """Tests converting relative 3D points to absolute 3D points."""
+ reference: EulerStateSE3 = EulerStateSE3.from_array(np.array([1.0, 1.0, 1.0, 0.0, 0.0, 0.0], dtype=np.float64))
+ relative_points: npt.NDArray[np.float64] = np.array([[1.0, 1.0, 1.0], [-1.0, 0.0, -1.0]], dtype=np.float64)
+ result: npt.NDArray[np.float64] = convert_relative_to_absolute_points_3d_array(reference, relative_points)
+ expected: npt.NDArray[np.float64] = np.array([[2.0, 2.0, 2.0], [0.0, 1.0, 0.0]], dtype=np.float64)
+ np.testing.assert_array_almost_equal(result, expected)
+
+ def test_convert_relative_to_absolute_points_3d_array_empty(self) -> None:
+ """Tests converting an empty array of relative 3D points to absolute 3D points."""
+ reference: EulerStateSE3 = EulerStateSE3.from_array(np.array([1.0, 1.0, 1.0, 0.0, 0.0, 0.0], dtype=np.float64))
+ relative_points: npt.NDArray[np.float64] = np.array([], dtype=np.float64).reshape(0, 3)
+ result: npt.NDArray[np.float64] = convert_relative_to_absolute_points_3d_array(reference, relative_points)
+ expected: npt.NDArray[np.float64] = np.array([], dtype=np.float64).reshape(0, 3)
+ np.testing.assert_array_almost_equal(result, expected)
+
+ def test_convert_relative_to_absolute_points_3d_array_with_rotation(self) -> None:
+ """Tests converting relative 3D points to absolute 3D points with 90 degree yaw rotation."""
+ reference: EulerStateSE3 = EulerStateSE3.from_array(
+ np.array([1.0, 0.0, 0.0, 0.0, 0.0, np.pi / 2], dtype=np.float64)
+ )
+ relative_points: npt.NDArray[np.float64] = np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 1.0]], dtype=np.float64)
+ result: npt.NDArray[np.float64] = convert_relative_to_absolute_points_3d_array(reference, relative_points)
+ expected: npt.NDArray[np.float64] = np.array([[1.0, 1.0, 0.0], [0.0, 0.0, 1.0]], dtype=np.float64)
+ np.testing.assert_array_almost_equal(result, expected, decimal=self.decimal)
diff --git a/d123/geometry/transform/test/test_transform_se2.py b/d123/geometry/transform/test/test_transform_se2.py
new file mode 100644
index 00000000..503d87de
--- /dev/null
+++ b/d123/geometry/transform/test/test_transform_se2.py
@@ -0,0 +1,220 @@
+import unittest
+
+import numpy as np
+import numpy.typing as npt
+
+from d123.geometry.se import StateSE2
+from d123.geometry.transform.transform_se2 import (
+ convert_absolute_to_relative_point_2d_array,
+ convert_absolute_to_relative_se2_array,
+ convert_relative_to_absolute_point_2d_array,
+ convert_relative_to_absolute_se2_array,
+ translate_se2_along_body_frame,
+ translate_se2_along_x,
+ translate_se2_along_y,
+ translate_se2_array_along_body_frame,
+)
+from d123.geometry.vector import Vector2D
+
+
+class TestTransformSE2(unittest.TestCase):
+
+ def setUp(self):
+ self.decimal = 6 # Decimal places for np.testing.assert_array_almost_equal
+
+ def test_translate_se2_along_x(self) -> None:
+ """Tests translating a SE2 state along the X-axis."""
+ pose: StateSE2 = StateSE2.from_array(np.array([0.0, 0.0, 0.0], dtype=np.float64))
+ distance: float = 1.0
+ result: StateSE2 = translate_se2_along_x(pose, distance)
+ expected: StateSE2 = StateSE2.from_array(np.array([1.0, 0.0, 0.0], dtype=np.float64))
+ np.testing.assert_array_almost_equal(result.array, expected.array, decimal=self.decimal)
+
+ def test_translate_se2_along_x_negative(self) -> None:
+ """Tests translating a SE2 state along the X-axis in the negative direction."""
+ pose: StateSE2 = StateSE2.from_array(np.array([1.0, 2.0, 0.0], dtype=np.float64))
+ distance: float = -0.5
+ result: StateSE2 = translate_se2_along_x(pose, distance)
+ expected: StateSE2 = StateSE2.from_array(np.array([0.5, 2.0, 0.0], dtype=np.float64))
+ np.testing.assert_array_almost_equal(result.array, expected.array, decimal=self.decimal)
+
+ def test_translate_se2_along_x_with_rotation(self) -> None:
+ """Tests translating a SE2 state along the X-axis with 90 degree rotation."""
+ pose: StateSE2 = StateSE2.from_array(np.array([0.0, 0.0, np.pi / 2], dtype=np.float64))
+ distance: float = 1.0
+ result: StateSE2 = translate_se2_along_x(pose, distance)
+ expected: StateSE2 = StateSE2.from_array(np.array([0.0, 1.0, np.pi / 2], dtype=np.float64))
+ np.testing.assert_array_almost_equal(result.array, expected.array, decimal=self.decimal)
+
+ def test_translate_se2_along_y(self) -> None:
+ """Tests translating a SE2 state along the Y-axis."""
+ pose: StateSE2 = StateSE2.from_array(np.array([0.0, 0.0, 0.0], dtype=np.float64))
+ distance: float = 1.0
+ result: StateSE2 = translate_se2_along_y(pose, distance)
+ expected: StateSE2 = StateSE2.from_array(np.array([0.0, 1.0, 0.0], dtype=np.float64))
+ np.testing.assert_array_almost_equal(result.array, expected.array, decimal=self.decimal)
+
+ def test_translate_se2_along_y_negative(self) -> None:
+ """Tests translating a SE2 state along the Y-axis in the negative direction."""
+ pose: StateSE2 = StateSE2.from_array(np.array([1.0, 2.0, 0.0], dtype=np.float64))
+ distance: float = -1.5
+ result: StateSE2 = translate_se2_along_y(pose, distance)
+ expected: StateSE2 = StateSE2.from_array(np.array([1.0, 0.5, 0.0], dtype=np.float64))
+ np.testing.assert_array_almost_equal(result.array, expected.array, decimal=self.decimal)
+
+ def test_translate_se2_along_y_with_rotation(self) -> None:
+ """Tests translating a SE2 state along the Y-axis with -90 degree rotation."""
+ pose: StateSE2 = StateSE2.from_array(np.array([0.0, 0.0, -np.pi / 2], dtype=np.float64))
+ distance: float = 2.0
+ result: StateSE2 = translate_se2_along_y(pose, distance)
+ expected: StateSE2 = StateSE2.from_array(np.array([2.0, 0.0, -np.pi / 2], dtype=np.float64))
+ np.testing.assert_array_almost_equal(result.array, expected.array, decimal=self.decimal)
+
+ def test_translate_se2_along_body_frame_forward(self) -> None:
+ """Tests translating a SE2 state along the body frame forward direction, with 90 degree rotation."""
+ # Move 1 unit forward in the direction of yaw (pi/2 = 90 degrees = +Y direction)
+ pose: StateSE2 = StateSE2.from_array(np.array([0.0, 0.0, np.pi / 2], dtype=np.float64))
+ vector: Vector2D = Vector2D(1.0, 0.0)
+ result: StateSE2 = translate_se2_along_body_frame(pose, vector)
+ expected: StateSE2 = StateSE2.from_array(np.array([0.0, 1.0, np.pi / 2], dtype=np.float64))
+ np.testing.assert_array_almost_equal(result.array, expected.array, decimal=self.decimal)
+
+ def test_translate_se2_along_body_frame_backward(self) -> None:
+ """Tests translating a SE2 state along the body frame backward direction."""
+ pose: StateSE2 = StateSE2.from_array(np.array([0.0, 0.0, 0.0], dtype=np.float64))
+ vector: Vector2D = Vector2D(-1.0, 0.0)
+ result: StateSE2 = translate_se2_along_body_frame(pose, vector)
+ expected: StateSE2 = StateSE2.from_array(np.array([-1.0, 0.0, 0.0], dtype=np.float64))
+ np.testing.assert_array_almost_equal(result.array, expected.array, decimal=self.decimal)
+
+ def test_translate_se2_along_body_frame_diagonal(self) -> None:
+ """Tests translating a SE2 state along the body frame diagonal direction."""
+ pose: StateSE2 = StateSE2.from_array(np.array([1.0, 0.0, np.deg2rad(45)], dtype=np.float64))
+ vector: Vector2D = Vector2D(1.0, 0.0)
+ result: StateSE2 = translate_se2_along_body_frame(pose, vector)
+ expected: StateSE2 = StateSE2.from_array(
+ np.array([1.0 + np.sqrt(2.0) / 2, 0.0 + np.sqrt(2.0) / 2, np.deg2rad(45)], dtype=np.float64)
+ )
+ np.testing.assert_array_almost_equal(result.array, expected.array, decimal=self.decimal)
+
+ def test_translate_se2_along_body_frame_lateral(self) -> None:
+ """Tests translating a SE2 state along the body frame lateral direction."""
+ # Move 1 unit to the right (positive y in body frame)
+ pose: StateSE2 = StateSE2.from_array(np.array([0.0, 0.0, 0.0], dtype=np.float64))
+ vector: Vector2D = Vector2D(0.0, 1.0)
+ result: StateSE2 = translate_se2_along_body_frame(pose, vector)
+ expected: StateSE2 = StateSE2.from_array(np.array([0.0, 1.0, 0.0], dtype=np.float64))
+ np.testing.assert_array_almost_equal(result.array, expected.array, decimal=self.decimal)
+
+ def test_translate_se2_along_body_frame_lateral_with_rotation(self) -> None:
+ """Tests translating a SE2 state along the body frame lateral direction with 90 degree rotation."""
+ # Move 1 unit to the right when facing 90 degrees
+ pose: StateSE2 = StateSE2.from_array(np.array([0.0, 0.0, np.pi / 2], dtype=np.float64))
+ vector: Vector2D = Vector2D(0.0, 1.0)
+ result: StateSE2 = translate_se2_along_body_frame(pose, vector)
+ expected: StateSE2 = StateSE2.from_array(np.array([-1.0, 0.0, np.pi / 2], dtype=np.float64))
+ np.testing.assert_array_almost_equal(result.array, expected.array, decimal=self.decimal)
+
+ def test_translate_se2_array_along_body_frame_single_distance(self) -> None:
+ """Tests translating a SE2 state array along the body frame forward direction."""
+ poses: npt.NDArray[np.float64] = np.array([[0.0, 0.0, 0.0], [0.0, 0.0, np.pi / 2]], dtype=np.float64)
+ distance: Vector2D = Vector2D(1.0, 0.0)
+ result: npt.NDArray[np.float64] = translate_se2_array_along_body_frame(poses, distance)
+ expected: npt.NDArray[np.float64] = np.array([[1.0, 0.0, 0.0], [0.0, 1.0, np.pi / 2]], dtype=np.float64)
+ np.testing.assert_array_almost_equal(result, expected, decimal=self.decimal)
+
+ def test_translate_se2_array_along_body_frame_multiple_distances(self) -> None:
+ """Tests translating a SE2 state array along the body frame forward direction with different distances."""
+ poses: npt.NDArray[np.float64] = np.array([[0.0, 0.0, 0.0], [0.0, 0.0, np.pi]], dtype=np.float64)
+ distance: Vector2D = Vector2D(2.0, 0.0)
+ result: npt.NDArray[np.float64] = translate_se2_array_along_body_frame(poses, distance)
+ expected: npt.NDArray[np.float64] = np.array([[2.0, 0.0, 0.0], [-2.0, 0.0, np.pi]], dtype=np.float64)
+ np.testing.assert_array_almost_equal(result, expected, decimal=self.decimal)
+
+ def test_translate_se2_array_along_body_frame_lateral(self) -> None:
+ """Tests translating a SE2 state array along the body frame lateral direction with 90 degree rotation."""
+ poses: npt.NDArray[np.float64] = np.array([[0.0, 0.0, 0.0], [0.0, 0.0, np.pi / 2]], dtype=np.float64)
+ distance: Vector2D = Vector2D(0.0, 1.0)
+ result: npt.NDArray[np.float64] = translate_se2_array_along_body_frame(poses, distance)
+ expected: npt.NDArray[np.float64] = np.array([[0.0, 1.0, 0.0], [-1.0, 0.0, np.pi / 2]], dtype=np.float64)
+ np.testing.assert_array_almost_equal(result, expected, decimal=self.decimal)
+
+ def test_convert_absolute_to_relative_se2_array(self) -> None:
+ """Tests converting absolute SE2 poses to relative SE2 poses."""
+ origin: StateSE2 = StateSE2.from_array(np.array([1.0, 1.0, 0.0], dtype=np.float64))
+ absolute_poses: npt.NDArray[np.float64] = np.array([[2.0, 2.0, 0.0], [0.0, 1.0, np.pi / 2]], dtype=np.float64)
+ result: npt.NDArray[np.float64] = convert_absolute_to_relative_se2_array(origin, absolute_poses)
+ expected: npt.NDArray[np.float64] = np.array([[1.0, 1.0, 0.0], [-1.0, 0.0, np.pi / 2]], dtype=np.float64)
+ np.testing.assert_array_almost_equal(result, expected, decimal=self.decimal)
+
+ def test_convert_absolute_to_relative_se2_array_with_rotation(self) -> None:
+ """Tests converting absolute SE2 poses to relative SE2 poses with 90 degree rotation."""
+ reference: StateSE2 = StateSE2.from_array(np.array([0.0, 0.0, np.pi / 2], dtype=np.float64))
+ absolute_poses: npt.NDArray[np.float64] = np.array([[1.0, 0.0, np.pi / 2]], dtype=np.float64)
+ result: npt.NDArray[np.float64] = convert_absolute_to_relative_se2_array(reference, absolute_poses)
+ expected: npt.NDArray[np.float64] = np.array([[0.0, -1.0, 0.0]], dtype=np.float64)
+ np.testing.assert_array_almost_equal(result, expected, decimal=self.decimal)
+
+ def test_convert_absolute_to_relative_se2_array_identity(self) -> None:
+ """Tests converting absolute SE2 poses to relative SE2 poses with identity transformation."""
+ reference: StateSE2 = StateSE2.from_array(np.array([0.0, 0.0, 0.0], dtype=np.float64))
+ absolute_poses: npt.NDArray[np.float64] = np.array([[1.0, 2.0, np.pi / 4]], dtype=np.float64)
+ result: npt.NDArray[np.float64] = convert_absolute_to_relative_se2_array(reference, absolute_poses)
+ expected: npt.NDArray[np.float64] = np.array([[1.0, 2.0, np.pi / 4]], dtype=np.float64)
+ np.testing.assert_array_almost_equal(result, expected, decimal=self.decimal)
+
+ def test_convert_relative_to_absolute_se2_array(self) -> None:
+ """Tests converting relative SE2 poses to absolute SE2 poses."""
+ reference: StateSE2 = StateSE2.from_array(np.array([1.0, 1.0, 0.0], dtype=np.float64))
+ relative_poses: npt.NDArray[np.float64] = np.array([[1.0, 1.0, 0.0], [-1.0, 0.0, np.pi / 2]], dtype=np.float64)
+ result: npt.NDArray[np.float64] = convert_relative_to_absolute_se2_array(reference, relative_poses)
+ expected: npt.NDArray[np.float64] = np.array([[2.0, 2.0, 0.0], [0.0, 1.0, np.pi / 2]], dtype=np.float64)
+ np.testing.assert_array_almost_equal(result, expected, decimal=self.decimal)
+
+ def test_convert_relative_to_absolute_se2_array_with_rotation(self) -> None:
+ """Tests converting relative SE2 poses to absolute SE2 poses with rotation."""
+ reference: StateSE2 = StateSE2.from_array(np.array([1.0, 0.0, np.pi / 2], dtype=np.float64))
+ relative_poses: npt.NDArray[np.float64] = np.array([[1.0, 0.0, 0.0]], dtype=np.float64)
+ result: npt.NDArray[np.float64] = convert_relative_to_absolute_se2_array(reference, relative_poses)
+ expected: npt.NDArray[np.float64] = np.array([[1.0, 1.0, np.pi / 2]], dtype=np.float64)
+ np.testing.assert_array_almost_equal(result, expected, decimal=self.decimal)
+
+ def test_convert_absolute_to_relative_point_2d_array(self) -> None:
+ """Tests converting absolute 2D points to relative 2D points."""
+ reference: StateSE2 = StateSE2.from_array(np.array([1.0, 1.0, 0.0], dtype=np.float64))
+ absolute_points: npt.NDArray[np.float64] = np.array([[2.0, 2.0], [0.0, 1.0]], dtype=np.float64)
+ result: npt.NDArray[np.float64] = convert_absolute_to_relative_point_2d_array(reference, absolute_points)
+ expected: npt.NDArray[np.float64] = np.array([[1.0, 1.0], [-1.0, 0.0]], dtype=np.float64)
+ np.testing.assert_array_almost_equal(result, expected, decimal=self.decimal)
+
+ def test_convert_absolute_to_relative_point_2d_array_with_rotation(self) -> None:
+ """Tests converting absolute 2D points to relative 2D points with 90 degree rotation."""
+ reference: StateSE2 = StateSE2.from_array(np.array([0.0, 0.0, np.pi / 2], dtype=np.float64))
+ absolute_points: npt.NDArray[np.float64] = np.array([[0.0, 1.0], [1.0, 0.0]], dtype=np.float64)
+ result: npt.NDArray[np.float64] = convert_absolute_to_relative_point_2d_array(reference, absolute_points)
+ expected: npt.NDArray[np.float64] = np.array([[1.0, 0.0], [0.0, -1.0]], dtype=np.float64)
+ np.testing.assert_array_almost_equal(result, expected, decimal=self.decimal)
+
+ def test_convert_absolute_to_relative_point_2d_array_empty(self) -> None:
+ """Tests converting an empty array of absolute 2D points to relative 2D points."""
+ reference: StateSE2 = StateSE2.from_array(np.array([1.0, 1.0, 0.0], dtype=np.float64))
+ absolute_points: npt.NDArray[np.float64] = np.array([], dtype=np.float64).reshape(0, 2)
+ result: npt.NDArray[np.float64] = convert_absolute_to_relative_point_2d_array(reference, absolute_points)
+ expected: npt.NDArray[np.float64] = np.array([], dtype=np.float64).reshape(0, 2)
+ np.testing.assert_array_almost_equal(result, expected, decimal=self.decimal)
+
+ def test_convert_relative_to_absolute_point_2d_array(self) -> None:
+ """Tests converting relative 2D points to absolute 2D points."""
+ reference: StateSE2 = StateSE2.from_array(np.array([1.0, 1.0, 0.0], dtype=np.float64))
+ relative_points: npt.NDArray[np.float64] = np.array([[1.0, 1.0], [-1.0, 0.0]], dtype=np.float64)
+ result: npt.NDArray[np.float64] = convert_relative_to_absolute_point_2d_array(reference, relative_points)
+ expected: npt.NDArray[np.float64] = np.array([[2.0, 2.0], [0.0, 1.0]], dtype=np.float64)
+ np.testing.assert_array_almost_equal(result, expected, decimal=self.decimal)
+
+ def test_convert_relative_to_absolute_point_2d_array_with_rotation(self) -> None:
+ """Tests converting relative 2D points to absolute 2D points with 90 degree rotation."""
+ reference: StateSE2 = StateSE2.from_array(np.array([1.0, 0.0, np.pi / 2], dtype=np.float64))
+ relative_points: npt.NDArray[np.float64] = np.array([[1.0, 0.0], [0.0, 1.0]], dtype=np.float64)
+ result: npt.NDArray[np.float64] = convert_relative_to_absolute_point_2d_array(reference, relative_points)
+ expected: npt.NDArray[np.float64] = np.array([[1.0, 1.0], [0.0, 0.0]], dtype=np.float64)
+ np.testing.assert_array_almost_equal(result, expected, decimal=self.decimal)
diff --git a/d123/geometry/test/test_transform_quaternion.py b/d123/geometry/transform/test/test_transform_se3.py
similarity index 51%
rename from d123/geometry/test/test_transform_quaternion.py
rename to d123/geometry/transform/test/test_transform_se3.py
index 12c8433e..d7c077ea 100644
--- a/d123/geometry/test/test_transform_quaternion.py
+++ b/d123/geometry/transform/test/test_transform_se3.py
@@ -3,40 +3,31 @@
import numpy as np
import numpy.typing as npt
-from d123.geometry.geometry_index import (
- # EulerAnglesIndex,
- # Point2DIndex,
- # Point3DIndex,
- QuaternionSE3Index,
- # StateSE2Index,
- StateSE3Index,
-)
+from d123.geometry.geometry_index import StateSE3Index, EulerStateSE3Index
from d123.geometry.point import Point3D
-from d123.geometry.rotation import EulerAngles, Quaternion
-from d123.geometry.se import StateSE3, QuaternionSE3
-from d123.geometry.transform.transform_quaternion_se3 import (
+from d123.geometry.rotation import Quaternion
+from d123.geometry.se import EulerStateSE3, StateSE3
+from d123.geometry.transform.transform_se3 import (
convert_absolute_to_relative_points_3d_array,
convert_absolute_to_relative_se3_array,
convert_relative_to_absolute_points_3d_array,
convert_relative_to_absolute_se3_array,
- # translate_se3_along_body_frame,
- # translate_se3_along_x,
- # translate_se3_along_y,
- # translate_se3_along_z,
+ translate_se3_along_x,
+ translate_se3_along_y,
+ translate_se3_along_z,
+ translate_se3_along_body_frame,
)
-import d123.geometry.transform.transform_se3 as euler_transform_se3
+import d123.geometry.transform.transform_euler_se3 as euler_transform_se3
from d123.geometry.utils.rotation_utils import (
get_rotation_matrices_from_euler_array,
get_rotation_matrices_from_quaternion_array,
)
-# from d123.geometry.vector import Vector2D, Vector3D
-
-class TestTransformQuaternion(unittest.TestCase):
+class TestTransformSE3(unittest.TestCase):
def setUp(self):
- euler_se3_a = StateSE3(
+ euler_se3_a = EulerStateSE3(
x=1.0,
y=2.0,
z=3.0,
@@ -44,7 +35,7 @@ def setUp(self):
pitch=0.0,
yaw=0.0,
)
- euler_se3_b = StateSE3(
+ euler_se3_b = EulerStateSE3(
x=1.0,
y=-2.0,
z=3.0,
@@ -52,7 +43,7 @@ def setUp(self):
pitch=np.deg2rad(90),
yaw=0.0,
)
- euler_se3_c = StateSE3(
+ euler_se3_c = EulerStateSE3(
x=-1.0,
y=2.0,
z=-3.0,
@@ -61,9 +52,9 @@ def setUp(self):
yaw=np.deg2rad(90),
)
- quat_se3_a = euler_se3_a.quaternion_se3
- quat_se3_b = euler_se3_b.quaternion_se3
- quat_se3_c = euler_se3_c.quaternion_se3
+ quat_se3_a: StateSE3 = euler_se3_a.quaternion_se3
+ quat_se3_b: StateSE3 = euler_se3_b.quaternion_se3
+ quat_se3_c: StateSE3 = euler_se3_c.quaternion_se3
self.euler_se3 = [euler_se3_a, euler_se3_b, euler_se3_c]
self.quat_se3 = [quat_se3_a, quat_se3_b, quat_se3_c]
@@ -72,11 +63,13 @@ def setUp(self):
def _get_random_euler_se3_array(self, size: int) -> npt.NDArray[np.float64]:
"""Generate a random SE3 poses"""
- random_se3_array = np.zeros((size, len(StateSE3Index)), dtype=np.float64)
- random_se3_array[:, StateSE3Index.XYZ] = np.random.uniform(-self.max_pose_xyz, self.max_pose_xyz, (size, 3))
- random_se3_array[:, StateSE3Index.YAW] = np.random.uniform(-np.pi, np.pi, size)
- random_se3_array[:, StateSE3Index.PITCH] = np.random.uniform(-np.pi / 2, np.pi / 2, size)
- random_se3_array[:, StateSE3Index.ROLL] = np.random.uniform(-np.pi, np.pi, size)
+ random_se3_array = np.zeros((size, len(EulerStateSE3Index)), dtype=np.float64)
+ random_se3_array[:, EulerStateSE3Index.XYZ] = np.random.uniform(
+ -self.max_pose_xyz, self.max_pose_xyz, (size, 3)
+ )
+ random_se3_array[:, EulerStateSE3Index.YAW] = np.random.uniform(-np.pi, np.pi, size)
+ random_se3_array[:, EulerStateSE3Index.PITCH] = np.random.uniform(-np.pi / 2, np.pi / 2, size)
+ random_se3_array[:, EulerStateSE3Index.ROLL] = np.random.uniform(-np.pi, np.pi, size)
return random_se3_array
@@ -84,12 +77,12 @@ def _convert_euler_se3_array_to_quat_se3_array(
self, euler_se3_array: npt.NDArray[np.float64]
) -> npt.NDArray[np.float64]:
"""Convert an array of SE3 poses from Euler angles to Quaternion representation"""
- quat_se3_array = np.zeros((euler_se3_array.shape[0], len(QuaternionSE3Index)), dtype=np.float64)
- quat_se3_array[:, QuaternionSE3Index.XYZ] = euler_se3_array[:, StateSE3Index.XYZ]
- rotation_matrices = get_rotation_matrices_from_euler_array(euler_se3_array[:, StateSE3Index.EULER_ANGLES])
+ quat_se3_array = np.zeros((euler_se3_array.shape[0], len(StateSE3Index)), dtype=np.float64)
+ quat_se3_array[:, StateSE3Index.XYZ] = euler_se3_array[:, EulerStateSE3Index.XYZ]
+ rotation_matrices = get_rotation_matrices_from_euler_array(euler_se3_array[:, EulerStateSE3Index.EULER_ANGLES])
for idx, rotation_matrix in enumerate(rotation_matrices):
quat = Quaternion.from_rotation_matrix(rotation_matrix)
- quat_se3_array[idx, QuaternionSE3Index.QUATERNION] = quat.array
+ quat_se3_array[idx, StateSE3Index.QUATERNION] = quat.array
return quat_se3_array
def test_sanity(self):
@@ -112,15 +105,15 @@ def test_random_sanity(self):
random_quat_se3_array = self._convert_euler_se3_array_to_quat_se3_array(random_euler_se3_array)
np.testing.assert_allclose(
- random_euler_se3_array[:, StateSE3Index.XYZ],
- random_quat_se3_array[:, QuaternionSE3Index.XYZ],
+ random_euler_se3_array[:, EulerStateSE3Index.XYZ],
+ random_quat_se3_array[:, StateSE3Index.XYZ],
atol=1e-6,
)
quat_rotation_matrices = get_rotation_matrices_from_quaternion_array(
- random_quat_se3_array[:, QuaternionSE3Index.QUATERNION]
+ random_quat_se3_array[:, StateSE3Index.QUATERNION]
)
euler_rotation_matrices = get_rotation_matrices_from_euler_array(
- random_euler_se3_array[:, StateSE3Index.EULER_ANGLES]
+ random_euler_se3_array[:, EulerStateSE3Index.EULER_ANGLES]
)
np.testing.assert_allclose(euler_rotation_matrices, quat_rotation_matrices, atol=1e-6)
@@ -141,18 +134,18 @@ def test_convert_absolute_to_relative_se3_array(self):
random_quat_se3_array = self._convert_euler_se3_array_to_quat_se3_array(random_euler_se3_array)
rel_se3_quat = convert_absolute_to_relative_se3_array(quat_se3, random_quat_se3_array)
- rel_se3_euler = euler_transform_se3.convert_absolute_to_relative_se3_array(
+ rel_se3_euler = euler_transform_se3.convert_absolute_to_relative_euler_se3_array(
euler_se3, random_euler_se3_array
)
np.testing.assert_allclose(
- rel_se3_euler[..., StateSE3Index.XYZ], rel_se3_quat[..., QuaternionSE3Index.XYZ], atol=1e-6
+ rel_se3_euler[..., EulerStateSE3Index.XYZ], rel_se3_quat[..., StateSE3Index.XYZ], atol=1e-6
)
# We compare rotation matrices to avoid issues with quaternion sign ambiguity
quat_rotation_matrices = get_rotation_matrices_from_quaternion_array(
- rel_se3_quat[..., QuaternionSE3Index.QUATERNION]
+ rel_se3_quat[..., StateSE3Index.QUATERNION]
)
euler_rotation_matrices = get_rotation_matrices_from_euler_array(
- rel_se3_euler[..., StateSE3Index.EULER_ANGLES]
+ rel_se3_euler[..., EulerStateSE3Index.EULER_ANGLES]
)
np.testing.assert_allclose(quat_rotation_matrices, euler_rotation_matrices, atol=1e-6)
@@ -173,21 +166,65 @@ def test_convert_relative_to_absolute_se3_array(self):
random_quat_se3_array = self._convert_euler_se3_array_to_quat_se3_array(random_euler_se3_array)
abs_se3_quat = convert_relative_to_absolute_se3_array(quat_se3, random_quat_se3_array)
- abs_se3_euler = euler_transform_se3.convert_relative_to_absolute_se3_array(
+ abs_se3_euler = euler_transform_se3.convert_relative_to_absolute_euler_se3_array(
euler_se3, random_euler_se3_array
)
np.testing.assert_allclose(
- abs_se3_euler[..., StateSE3Index.XYZ], abs_se3_quat[..., QuaternionSE3Index.XYZ], atol=1e-6
+ abs_se3_euler[..., EulerStateSE3Index.XYZ], abs_se3_quat[..., StateSE3Index.XYZ], atol=1e-6
)
# We compare rotation matrices to avoid issues with quaternion sign ambiguity
quat_rotation_matrices = get_rotation_matrices_from_quaternion_array(
- abs_se3_quat[..., QuaternionSE3Index.QUATERNION]
+ abs_se3_quat[..., StateSE3Index.QUATERNION]
)
euler_rotation_matrices = get_rotation_matrices_from_euler_array(
- abs_se3_euler[..., StateSE3Index.EULER_ANGLES]
+ abs_se3_euler[..., EulerStateSE3Index.EULER_ANGLES]
)
np.testing.assert_allclose(quat_rotation_matrices, euler_rotation_matrices, atol=1e-6)
+ def test_translate_se3_along_x(self):
+ for _ in range(10):
+ distance = np.random.uniform(-self.max_pose_xyz, self.max_pose_xyz)
+ for quat_se3, euler_se3 in zip(self.quat_se3, self.euler_se3):
+ translated_quat = translate_se3_along_x(quat_se3, distance)
+ translated_euler = euler_transform_se3.translate_euler_se3_along_x(euler_se3, distance)
+ np.testing.assert_allclose(translated_quat.point_3d.array, translated_euler.point_3d.array, atol=1e-6)
+ np.testing.assert_allclose(translated_quat.rotation_matrix, translated_euler.rotation_matrix, atol=1e-6)
+ np.testing.assert_allclose(quat_se3.quaternion.array, translated_quat.quaternion.array, atol=1e-6)
+
+ def test_translate_se3_along_y(self):
+ for _ in range(10):
+ distance = np.random.uniform(-self.max_pose_xyz, self.max_pose_xyz)
+ for quat_se3, euler_se3 in zip(self.quat_se3, self.euler_se3):
+ translated_quat = translate_se3_along_y(quat_se3, distance)
+ translated_euler = euler_transform_se3.translate_euler_se3_along_y(euler_se3, distance)
+ np.testing.assert_allclose(translated_quat.point_3d.array, translated_euler.point_3d.array, atol=1e-6)
+ np.testing.assert_allclose(translated_quat.rotation_matrix, translated_euler.rotation_matrix, atol=1e-6)
+ np.testing.assert_allclose(quat_se3.quaternion.array, translated_quat.quaternion.array, atol=1e-6)
+
+ def test_translate_se3_along_z(self):
+ for _ in range(10):
+ distance = np.random.uniform(-self.max_pose_xyz, self.max_pose_xyz)
+ for quat_se3, euler_se3 in zip(self.quat_se3, self.euler_se3):
+ translated_quat = translate_se3_along_z(quat_se3, distance)
+ translated_euler = euler_transform_se3.translate_euler_se3_along_z(euler_se3, distance)
+ np.testing.assert_allclose(translated_quat.point_3d.array, translated_euler.point_3d.array, atol=1e-6)
+ np.testing.assert_allclose(translated_quat.rotation_matrix, translated_euler.rotation_matrix, atol=1e-6)
+ np.testing.assert_allclose(quat_se3.quaternion.array, translated_quat.quaternion.array, atol=1e-6)
+
+ def test_translate_se3_along_body_frame(self):
+ for _ in range(10):
+ vector_3d = Point3D(
+ x=np.random.uniform(-self.max_pose_xyz, self.max_pose_xyz),
+ y=np.random.uniform(-self.max_pose_xyz, self.max_pose_xyz),
+ z=np.random.uniform(-self.max_pose_xyz, self.max_pose_xyz),
+ )
+ for quat_se3, euler_se3 in zip(self.quat_se3, self.euler_se3):
+ translated_quat = translate_se3_along_body_frame(quat_se3, vector_3d)
+ translated_euler = euler_transform_se3.translate_euler_se3_along_body_frame(euler_se3, vector_3d)
+ np.testing.assert_allclose(translated_quat.point_3d.array, translated_euler.point_3d.array, atol=1e-6)
+ np.testing.assert_allclose(translated_quat.rotation_matrix, translated_euler.rotation_matrix, atol=1e-6)
+ np.testing.assert_allclose(quat_se3.quaternion.array, translated_quat.quaternion.array, atol=1e-6)
+
if __name__ == "__main__":
unittest.main()
diff --git a/d123/geometry/transform/transform_euler_se3.py b/d123/geometry/transform/transform_euler_se3.py
new file mode 100644
index 00000000..c2f897d5
--- /dev/null
+++ b/d123/geometry/transform/transform_euler_se3.py
@@ -0,0 +1,169 @@
+from typing import Union
+
+import numpy as np
+import numpy.typing as npt
+
+from d123.geometry import EulerStateSE3, EulerStateSE3Index, Vector3D
+from d123.geometry.geometry_index import Point3DIndex, Vector3DIndex
+from d123.geometry.rotation import EulerAngles
+from d123.geometry.utils.rotation_utils import (
+ get_rotation_matrices_from_euler_array,
+ get_rotation_matrix_from_euler_array,
+ normalize_angle,
+)
+
+
+def translate_euler_se3_along_z(state_se3: EulerStateSE3, distance: float) -> EulerStateSE3:
+
+ R = state_se3.rotation_matrix
+ z_axis = R[:, 2]
+
+ state_se3_array = state_se3.array.copy()
+ state_se3_array[EulerStateSE3Index.XYZ] += distance * z_axis[Vector3DIndex.XYZ]
+ return EulerStateSE3.from_array(state_se3_array, copy=False)
+
+
+def translate_euler_se3_along_y(state_se3: EulerStateSE3, distance: float) -> EulerStateSE3:
+
+ R = state_se3.rotation_matrix
+ y_axis = R[:, 1]
+
+ state_se3_array = state_se3.array.copy()
+ state_se3_array[EulerStateSE3Index.XYZ] += distance * y_axis[Vector3DIndex.XYZ]
+ return EulerStateSE3.from_array(state_se3_array, copy=False)
+
+
+def translate_euler_se3_along_x(state_se3: EulerStateSE3, distance: float) -> EulerStateSE3:
+
+ R = state_se3.rotation_matrix
+ x_axis = R[:, 0]
+
+ state_se3_array = state_se3.array.copy()
+ state_se3_array[EulerStateSE3Index.XYZ] += distance * x_axis[Vector3DIndex.XYZ]
+ return EulerStateSE3.from_array(state_se3_array, copy=False)
+
+
+def translate_euler_se3_along_body_frame(state_se3: EulerStateSE3, vector_3d: Vector3D) -> EulerStateSE3:
+
+ R = state_se3.rotation_matrix
+ world_translation = R @ vector_3d.array
+
+ state_se3_array = state_se3.array.copy()
+ state_se3_array[EulerStateSE3Index.XYZ] += world_translation[Vector3DIndex.XYZ]
+ return EulerStateSE3.from_array(state_se3_array, copy=False)
+
+
+def convert_absolute_to_relative_euler_se3_array(
+ origin: Union[EulerStateSE3, npt.NDArray[np.float64]], se3_array: npt.NDArray[np.float64]
+) -> npt.NDArray[np.float64]:
+
+ if isinstance(origin, EulerStateSE3):
+ origin_array = origin.array
+ t_origin = origin.point_3d.array
+ R_origin = origin.rotation_matrix
+ elif isinstance(origin, np.ndarray):
+ assert origin.ndim == 1 and origin.shape[-1] == len(EulerStateSE3Index)
+ origin_array = origin
+ t_origin = origin_array[EulerStateSE3Index.XYZ]
+ R_origin = get_rotation_matrix_from_euler_array(origin_array[EulerStateSE3Index.EULER_ANGLES])
+ else:
+ raise TypeError(f"Expected StateSE3 or np.ndarray, got {type(origin)}")
+
+ assert se3_array.ndim >= 1
+ assert se3_array.shape[-1] == len(EulerStateSE3Index)
+
+ # Prepare output array
+ rel_se3_array = se3_array.copy()
+
+ # Vectorized relative position calculation
+ abs_positions = se3_array[..., EulerStateSE3Index.XYZ]
+ rel_positions = (abs_positions - t_origin) @ R_origin
+ rel_se3_array[..., EulerStateSE3Index.XYZ] = rel_positions
+
+ # Convert absolute rotation matrices to relative rotation matrices
+ abs_rotation_matrices = get_rotation_matrices_from_euler_array(se3_array[..., EulerStateSE3Index.EULER_ANGLES])
+ rel_rotation_matrices = np.einsum("ij,...jk->...ik", R_origin.T, abs_rotation_matrices)
+ if se3_array.shape[0] != 0:
+ rel_euler_angles = np.array([EulerAngles.from_rotation_matrix(R).array for R in rel_rotation_matrices])
+ rel_se3_array[..., EulerStateSE3Index.EULER_ANGLES] = normalize_angle(rel_euler_angles)
+
+ return rel_se3_array
+
+
+def convert_relative_to_absolute_euler_se3_array(
+ origin: EulerStateSE3, se3_array: npt.NDArray[np.float64]
+) -> npt.NDArray[np.float64]:
+
+ if isinstance(origin, EulerStateSE3):
+ origin_array = origin.array
+ t_origin = origin.point_3d.array
+ R_origin = origin.rotation_matrix
+ elif isinstance(origin, np.ndarray):
+ assert origin.ndim == 1 and origin.shape[-1] == len(EulerStateSE3Index)
+ origin_array = origin
+ t_origin = origin_array[EulerStateSE3Index.XYZ]
+ R_origin = get_rotation_matrix_from_euler_array(origin_array[EulerStateSE3Index.EULER_ANGLES])
+ else:
+ raise TypeError(f"Expected StateSE3 or np.ndarray, got {type(origin)}")
+
+ assert se3_array.ndim >= 1
+ assert se3_array.shape[-1] == len(EulerStateSE3Index)
+
+ # Prepare output array
+ abs_se3_array = se3_array.copy()
+
+ # Vectorized absolute position calculation: rotate and translate
+ rel_positions = se3_array[..., EulerStateSE3Index.XYZ]
+ abs_positions = (rel_positions @ R_origin.T) + t_origin
+ abs_se3_array[..., EulerStateSE3Index.XYZ] = abs_positions
+
+ # Convert relative rotation matrices to absolute rotation matrices
+ rel_rotation_matrices = get_rotation_matrices_from_euler_array(se3_array[..., EulerStateSE3Index.EULER_ANGLES])
+ abs_rotation_matrices = np.einsum("ij,...jk->...ik", R_origin, rel_rotation_matrices)
+
+ if se3_array.shape[0] != 0:
+ abs_euler_angles = np.array([EulerAngles.from_rotation_matrix(R).array for R in abs_rotation_matrices])
+ abs_se3_array[..., EulerStateSE3Index.EULER_ANGLES] = normalize_angle(abs_euler_angles)
+
+ return abs_se3_array
+
+
+def convert_absolute_to_relative_points_3d_array(
+ origin: Union[EulerStateSE3, npt.NDArray[np.float64]], points_3d_array: npt.NDArray[np.float64]
+) -> npt.NDArray[np.float64]:
+
+ if isinstance(origin, EulerStateSE3):
+ t_origin = origin.point_3d.array
+ R_origin = origin.rotation_matrix
+ elif isinstance(origin, np.ndarray):
+ assert origin.ndim == 1 and origin.shape[-1] == len(EulerStateSE3Index)
+ t_origin = origin[EulerStateSE3Index.XYZ]
+ R_origin = get_rotation_matrix_from_euler_array(origin[EulerStateSE3Index.EULER_ANGLES])
+ else:
+ raise TypeError(f"Expected StateSE3 or np.ndarray, got {type(origin)}")
+
+ assert points_3d_array.ndim >= 1
+ assert points_3d_array.shape[-1] == len(Point3DIndex)
+
+ # Translate points to origin frame, then rotate to body frame
+ relative_points = (points_3d_array - t_origin) @ R_origin
+ return relative_points
+
+
+def convert_relative_to_absolute_points_3d_array(
+ origin: Union[EulerStateSE3, npt.NDArray[np.float64]], points_3d_array: npt.NDArray[np.float64]
+) -> npt.NDArray[np.float64]:
+
+ if isinstance(origin, EulerStateSE3):
+ origin_array = origin.array
+ elif isinstance(origin, np.ndarray):
+ assert origin.ndim == 1 and origin.shape[-1] == len(EulerStateSE3Index)
+ origin_array = origin
+ else:
+ raise TypeError(f"Expected EulerStateSE3 or np.ndarray, got {type(origin)}")
+
+ assert points_3d_array.shape[-1] == len(Point3DIndex)
+
+ R = EulerAngles.from_array(origin_array[EulerStateSE3Index.EULER_ANGLES]).rotation_matrix
+ absolute_points = points_3d_array @ R.T + origin.point_3d.array
+ return absolute_points
diff --git a/d123/geometry/transform/transform_quaternion_se3.py b/d123/geometry/transform/transform_quaternion_se3.py
deleted file mode 100644
index 32a6858b..00000000
--- a/d123/geometry/transform/transform_quaternion_se3.py
+++ /dev/null
@@ -1,213 +0,0 @@
-from typing import Union
-
-import numpy as np
-import numpy.typing as npt
-
-from d123.geometry import Vector3D
-from d123.geometry.geometry_index import Point3DIndex, QuaternionSE3Index, Vector3DIndex
-from d123.geometry.se import QuaternionSE3
-from d123.geometry.utils.rotation_utils import (
- conjugate_quaternion_array,
- get_rotation_matrix_from_quaternion_array,
- multiply_quaternion_arrays,
-)
-
-
-def convert_absolute_to_relative_points_3d_array(
- origin: Union[QuaternionSE3, npt.NDArray[np.float64]], points_3d_array: npt.NDArray[np.float64]
-) -> npt.NDArray[np.float64]:
- """Converts 3D points from the absolute frame to the relative frame.
-
- :param origin: The origin state in the absolute frame, as a StateSE3 or np.ndarray.
- :param points_3d_array: The 3D points in the absolute frame.
- :raises TypeError: If the origin is not a StateSE3 or np.ndarray.
- :return: The 3D points in the relative frame, indexed by :class:`~d123.geometry.Point3DIndex`.
- """
-
- if isinstance(origin, QuaternionSE3):
- t_origin = origin.point_3d.array
- R_origin = origin.rotation_matrix
- elif isinstance(origin, np.ndarray):
- assert origin.ndim == 1 and origin.shape[-1] == len(QuaternionSE3Index)
- t_origin = origin[QuaternionSE3Index.XYZ]
- R_origin = get_rotation_matrix_from_quaternion_array(origin[QuaternionSE3Index.QUATERNION])
- else:
- raise TypeError(f"Expected StateSE3 or np.ndarray, got {type(origin)}")
-
- assert points_3d_array.ndim >= 1
- assert points_3d_array.shape[-1] == len(Point3DIndex)
-
- # Translate points to origin frame, then rotate to body frame
- relative_points = (points_3d_array - t_origin) @ R_origin
- return relative_points
-
-
-def convert_absolute_to_relative_se3_array(
- origin: Union[QuaternionSE3, npt.NDArray[np.float64]], se3_array: npt.NDArray[np.float64]
-) -> npt.NDArray[np.float64]:
- """Converts an SE3 array from the absolute frame to the relative frame.
-
- :param origin: The origin state in the absolute frame, as a StateSE3 or np.ndarray.
- :param se3_array: The SE3 array in the absolute frame.
- :raises TypeError: If the origin is not a StateSE3 or np.ndarray.
- :return: The SE3 array in the relative frame, indexed by :class:`~d123.geometry.StateSE3Index`.
- """
- if isinstance(origin, QuaternionSE3):
- origin_array = origin.array
- t_origin = origin.point_3d.array
- R_origin = origin.rotation_matrix
- elif isinstance(origin, np.ndarray):
- assert origin.ndim == 1 and origin.shape[-1] == len(QuaternionSE3Index)
- origin_array = origin
- t_origin = origin_array[QuaternionSE3Index.XYZ]
- R_origin = get_rotation_matrix_from_quaternion_array(origin_array[QuaternionSE3Index.QUATERNION])
- else:
- raise TypeError(f"Expected StateSE3 or np.ndarray, got {type(origin)}")
-
- assert se3_array.ndim >= 1
- assert se3_array.shape[-1] == len(QuaternionSE3Index)
-
- abs_positions = se3_array[..., QuaternionSE3Index.XYZ]
- abs_quaternions = se3_array[..., QuaternionSE3Index.QUATERNION]
-
- rel_se3_array = np.zeros_like(se3_array)
-
- # 1. Vectorized relative position calculation: translate and rotate
- rel_positions = (abs_positions - t_origin) @ R_origin
- rel_se3_array[..., QuaternionSE3Index.XYZ] = rel_positions
-
- # 2. Vectorized relative orientation calculation: quaternion multiplication with conjugate
- q_origin_conj = conjugate_quaternion_array(origin_array[QuaternionSE3Index.QUATERNION])
- rel_quaternions = multiply_quaternion_arrays(q_origin_conj, abs_quaternions)
-
- rel_se3_array[..., QuaternionSE3Index.QUATERNION] = rel_quaternions
-
- return rel_se3_array
-
-
-def convert_relative_to_absolute_points_3d_array(
- origin: Union[QuaternionSE3, npt.NDArray[np.float64]], points_3d_array: npt.NDArray[np.float64]
-) -> npt.NDArray[np.float64]:
- """Converts 3D points from the relative frame to the absolute frame.
-
- :param origin: The origin state in the absolute frame, as a StateSE3 or np.ndarray.
- :param points_3d_array: The 3D points in the relative frame, indexed by :class:`~d123.geometry.Point3DIndex`.
- :raises TypeError: If the origin is not a StateSE3 or np.ndarray.
- :return: The 3D points in the absolute frame, indexed by :class:`~d123.geometry.Point3DIndex`.
- """
- if isinstance(origin, QuaternionSE3):
- t_origin = origin.point_3d.array
- R_origin = origin.rotation_matrix
- elif isinstance(origin, np.ndarray):
- assert origin.ndim == 1 and origin.shape[-1] == len(QuaternionSE3Index)
- t_origin = origin[QuaternionSE3Index.XYZ]
- R_origin = get_rotation_matrix_from_quaternion_array(origin[QuaternionSE3Index.QUATERNION])
- else:
- raise TypeError(f"Expected QuaternionSE3 or np.ndarray, got {type(origin)}")
-
- assert points_3d_array.shape[-1] == len(Point3DIndex)
-
- absolute_points = points_3d_array @ R_origin.T + t_origin
- return absolute_points
-
-
-def convert_relative_to_absolute_se3_array(
- origin: QuaternionSE3, se3_array: npt.NDArray[np.float64]
-) -> npt.NDArray[np.float64]:
- """Converts an SE3 array from the relative frame to the absolute frame.
-
- :param origin: The origin state in the relative frame, as a StateSE3 or np.ndarray.
- :param se3_array: The SE3 array in the relative frame.
- :raises TypeError: If the origin is not a StateSE3 or np.ndarray.
- :return: The SE3 array in the absolute frame, indexed by :class:`~d123.geometry.StateSE3Index`.
- """
-
- if isinstance(origin, QuaternionSE3):
- origin_array = origin.array
- t_origin = origin.point_3d.array
- R_origin = origin.rotation_matrix
- elif isinstance(origin, np.ndarray):
- assert origin.ndim == 1 and origin.shape[-1] == len(QuaternionSE3Index)
- origin_array = origin
- t_origin = origin_array[QuaternionSE3Index.XYZ]
- R_origin = get_rotation_matrix_from_quaternion_array(origin_array[QuaternionSE3Index.QUATERNION])
- else:
- raise TypeError(f"Expected QuaternionSE3 or np.ndarray, got {type(origin)}")
-
- assert se3_array.ndim >= 1
- assert se3_array.shape[-1] == len(QuaternionSE3Index)
-
- # Extract relative positions and orientations
- rel_positions = se3_array[..., QuaternionSE3Index.XYZ]
- rel_quaternions = se3_array[..., QuaternionSE3Index.QUATERNION]
-
- # Vectorized absolute position calculation: rotate and translate
- abs_positions = (R_origin @ rel_positions.T).T + t_origin
- abs_quaternions = multiply_quaternion_arrays(origin_array[QuaternionSE3Index.QUATERNION], rel_quaternions)
-
- # Prepare output array
- abs_se3_array = se3_array.copy()
- abs_se3_array[..., QuaternionSE3Index.XYZ] = abs_positions
- abs_se3_array[..., QuaternionSE3Index.QUATERNION] = abs_quaternions
-
- return abs_se3_array
-
-
-def translate_se3_along_z(state_se3: QuaternionSE3, distance: float) -> QuaternionSE3:
- """Translates a QuaternionSE3 state along the Z-axis.
-
- :param state_se3: The QuaternionSE3 state to translate.
- :param distance: The distance to translate along the Z-axis.
- :return: The translated QuaternionSE3 state.
- """
- R = state_se3.rotation_matrix
- z_axis = R[:, 2]
-
- state_se3_array = state_se3.array.copy()
- state_se3_array[QuaternionSE3Index.XYZ] += distance * z_axis[Vector3DIndex.XYZ]
- return QuaternionSE3.from_array(state_se3_array, copy=False)
-
-
-def translate_se3_along_y(state_se3: QuaternionSE3, distance: float) -> QuaternionSE3:
- """Translates a QuaternionSE3 state along the Y-axis.
-
- :param state_se3: The QuaternionSE3 state to translate.
- :param distance: The distance to translate along the Y-axis.
- :return: The translated QuaternionSE3 state.
- """
- R = state_se3.rotation_matrix
- y_axis = R[:, 1]
-
- state_se3_array = state_se3.array.copy()
- state_se3_array[QuaternionSE3Index.XYZ] += distance * y_axis[Vector3DIndex.XYZ]
- return QuaternionSE3.from_array(state_se3_array, copy=False)
-
-
-def translate_se3_along_x(state_se3: QuaternionSE3, distance: float) -> QuaternionSE3:
- """Translates a QuaternionSE3 state along the X-axis.
-
- :param state_se3: The QuaternionSE3 state to translate.
- :param distance: The distance to translate along the X-axis.
- :return: The translated QuaternionSE3 state.
- """
- R = state_se3.rotation_matrix
- x_axis = R[:, 0]
-
- state_se3_array = state_se3.array.copy()
- state_se3_array[QuaternionSE3Index.XYZ] += distance * x_axis[Vector3DIndex.XYZ]
- return QuaternionSE3.from_array(state_se3_array, copy=False)
-
-
-def translate_se3_along_body_frame(state_se3: QuaternionSE3, vector_3d: Vector3D) -> QuaternionSE3:
- """Translates a QuaternionSE3 state along a vector in the body frame.
-
- :param state_se3: The QuaternionSE3 state to translate.
- :param vector_3d: The vector to translate along in the body frame.
- :return: The translated QuaternionSE3 state.
- """
- R = state_se3.rotation_matrix
- world_translation = R @ vector_3d.array
-
- state_se3_array = state_se3.array.copy()
- state_se3_array[QuaternionSE3Index.XYZ] += world_translation
- return QuaternionSE3.from_array(state_se3_array, copy=False)
diff --git a/d123/geometry/transform/transform_se3.py b/d123/geometry/transform/transform_se3.py
index affb70a3..4725e4c2 100644
--- a/d123/geometry/transform/transform_se3.py
+++ b/d123/geometry/transform/transform_se3.py
@@ -3,78 +3,43 @@
import numpy as np
import numpy.typing as npt
-from d123.geometry import StateSE3, StateSE3Index, Vector3D
-from d123.geometry.geometry_index import Point3DIndex, Vector3DIndex
-from d123.geometry.rotation import EulerAngles
+from d123.geometry import Vector3D
+from d123.geometry.geometry_index import Point3DIndex, StateSE3Index, Vector3DIndex
+from d123.geometry.se import StateSE3
from d123.geometry.utils.rotation_utils import (
- get_rotation_matrices_from_euler_array,
- get_rotation_matrix_from_euler_array,
- normalize_angle,
+ conjugate_quaternion_array,
+ get_rotation_matrix_from_quaternion_array,
+ multiply_quaternion_arrays,
)
-def translate_se3_along_z(state_se3: StateSE3, distance: float) -> StateSE3:
- """Translates a SE3 state along the Z-axis.
-
- :param state_se3: The SE3 state to translate.
- :param distance: The distance to translate along the Z-axis.
- :return: The translated SE3 state.
- """
-
- R = state_se3.rotation_matrix
- z_axis = R[:, 2]
-
- state_se3_array = state_se3.array.copy()
- state_se3_array[StateSE3Index.XYZ] += distance * z_axis[Vector3DIndex.XYZ]
- return StateSE3.from_array(state_se3_array, copy=False)
-
-
-def translate_se3_along_y(state_se3: StateSE3, distance: float) -> StateSE3:
- """Translates a SE3 state along the Y-axis.
-
- :param state_se3: The SE3 state to translate.
- :param distance: The distance to translate along the Y-axis.
- :return: The translated SE3 state.
- """
-
- R = state_se3.rotation_matrix
- y_axis = R[:, 1]
-
- state_se3_array = state_se3.array.copy()
- state_se3_array[StateSE3Index.XYZ] += distance * y_axis[Vector3DIndex.XYZ]
- return StateSE3.from_array(state_se3_array, copy=False)
-
-
-def translate_se3_along_x(state_se3: StateSE3, distance: float) -> StateSE3:
- """Translates a SE3 state along the X-axis.
+def convert_absolute_to_relative_points_3d_array(
+ origin: Union[StateSE3, npt.NDArray[np.float64]], points_3d_array: npt.NDArray[np.float64]
+) -> npt.NDArray[np.float64]:
+ """Converts 3D points from the absolute frame to the relative frame.
- :param state_se3: The SE3 state to translate.
- :param distance: The distance to translate along the X-axis.
- :return: The translated SE3 state.
+ :param origin: The origin state in the absolute frame, as a StateSE3 or np.ndarray.
+ :param points_3d_array: The 3D points in the absolute frame.
+ :raises TypeError: If the origin is not a StateSE3 or np.ndarray.
+ :return: The 3D points in the relative frame, indexed by :class:`~d123.geometry.Point3DIndex`.
"""
- R = state_se3.rotation_matrix
- x_axis = R[:, 0]
-
- state_se3_array = state_se3.array.copy()
- state_se3_array[StateSE3Index.XYZ] += distance * x_axis[Vector3DIndex.XYZ]
- return StateSE3.from_array(state_se3_array, copy=False)
-
-
-def translate_se3_along_body_frame(state_se3: StateSE3, vector_3d: Vector3D) -> StateSE3:
- """Translates a SE3 state along a vector in the body frame.
-
- :param state_se3: The SE3 state to translate.
- :param vector_3d: The vector to translate along in the body frame.
- :return: The translated SE3 state.
- """
+ if isinstance(origin, StateSE3):
+ t_origin = origin.point_3d.array
+ R_origin = origin.rotation_matrix
+ elif isinstance(origin, np.ndarray):
+ assert origin.ndim == 1 and origin.shape[-1] == len(StateSE3Index)
+ t_origin = origin[StateSE3Index.XYZ]
+ R_origin = get_rotation_matrix_from_quaternion_array(origin[StateSE3Index.QUATERNION])
+ else:
+ raise TypeError(f"Expected StateSE3 or np.ndarray, got {type(origin)}")
- R = state_se3.rotation_matrix
- world_translation = R @ vector_3d.array
+ assert points_3d_array.ndim >= 1
+ assert points_3d_array.shape[-1] == len(Point3DIndex)
- state_se3_array = state_se3.array.copy()
- state_se3_array[StateSE3Index.XYZ] += world_translation[Vector3DIndex.XYZ]
- return StateSE3.from_array(state_se3_array, copy=False)
+ # Translate points to origin frame, then rotate to body frame
+ relative_points = (points_3d_array - t_origin) @ R_origin
+ return relative_points
def convert_absolute_to_relative_se3_array(
@@ -95,7 +60,7 @@ def convert_absolute_to_relative_se3_array(
assert origin.ndim == 1 and origin.shape[-1] == len(StateSE3Index)
origin_array = origin
t_origin = origin_array[StateSE3Index.XYZ]
- R_origin = get_rotation_matrix_from_euler_array(origin_array[StateSE3Index.EULER_ANGLES])
+ R_origin = get_rotation_matrix_from_quaternion_array(origin_array[StateSE3Index.QUATERNION])
else:
raise TypeError(f"Expected StateSE3 or np.ndarray, got {type(origin)}")
@@ -103,23 +68,49 @@ def convert_absolute_to_relative_se3_array(
assert se3_array.shape[-1] == len(StateSE3Index)
abs_positions = se3_array[..., StateSE3Index.XYZ]
- abs_rotation_matrices = get_rotation_matrices_from_euler_array(se3_array[..., StateSE3Index.EULER_ANGLES])
+ abs_quaternions = se3_array[..., StateSE3Index.QUATERNION]
- # Convert absolute rotation matrices to relative rotation matrices
- rel_rotation_matrices = np.einsum("ij,...jk->...ik", R_origin.T, abs_rotation_matrices)
- rel_euler_angles = np.array([EulerAngles.from_rotation_matrix(R).array for R in rel_rotation_matrices])
+ rel_se3_array = np.zeros_like(se3_array)
- # Vectorized relative position calculation
+ # 1. Vectorized relative position calculation: translate and rotate
rel_positions = (abs_positions - t_origin) @ R_origin
-
- # Prepare output array
- rel_se3_array = se3_array.copy()
rel_se3_array[..., StateSE3Index.XYZ] = rel_positions
- rel_se3_array[..., StateSE3Index.EULER_ANGLES] = normalize_angle(rel_euler_angles)
+
+ # 2. Vectorized relative orientation calculation: quaternion multiplication with conjugate
+ q_origin_conj = conjugate_quaternion_array(origin_array[StateSE3Index.QUATERNION])
+ rel_quaternions = multiply_quaternion_arrays(q_origin_conj, abs_quaternions)
+
+ rel_se3_array[..., StateSE3Index.QUATERNION] = rel_quaternions
return rel_se3_array
+def convert_relative_to_absolute_points_3d_array(
+ origin: Union[StateSE3, npt.NDArray[np.float64]], points_3d_array: npt.NDArray[np.float64]
+) -> npt.NDArray[np.float64]:
+ """Converts 3D points from the relative frame to the absolute frame.
+
+ :param origin: The origin state in the absolute frame, as a StateSE3 or np.ndarray.
+ :param points_3d_array: The 3D points in the relative frame, indexed by :class:`~d123.geometry.Point3DIndex`.
+ :raises TypeError: If the origin is not a StateSE3 or np.ndarray.
+ :return: The 3D points in the absolute frame, indexed by :class:`~d123.geometry.Point3DIndex`.
+ """
+ if isinstance(origin, StateSE3):
+ t_origin = origin.point_3d.array
+ R_origin = origin.rotation_matrix
+ elif isinstance(origin, np.ndarray):
+ assert origin.ndim == 1 and origin.shape[-1] == len(StateSE3Index)
+ t_origin = origin[StateSE3Index.XYZ]
+ R_origin = get_rotation_matrix_from_quaternion_array(origin[StateSE3Index.QUATERNION])
+ else:
+ raise TypeError(f"Expected QuaternionSE3 or np.ndarray, got {type(origin)}")
+
+ assert points_3d_array.shape[-1] == len(Point3DIndex)
+
+ absolute_points = points_3d_array @ R_origin.T + t_origin
+ return absolute_points
+
+
def convert_relative_to_absolute_se3_array(
origin: StateSE3, se3_array: npt.NDArray[np.float64]
) -> npt.NDArray[np.float64]:
@@ -139,81 +130,84 @@ def convert_relative_to_absolute_se3_array(
assert origin.ndim == 1 and origin.shape[-1] == len(StateSE3Index)
origin_array = origin
t_origin = origin_array[StateSE3Index.XYZ]
- R_origin = get_rotation_matrix_from_euler_array(origin_array[StateSE3Index.EULER_ANGLES])
+ R_origin = get_rotation_matrix_from_quaternion_array(origin_array[StateSE3Index.QUATERNION])
else:
- raise TypeError(f"Expected StateSE3 or np.ndarray, got {type(origin)}")
+ raise TypeError(f"Expected QuaternionSE3 or np.ndarray, got {type(origin)}")
assert se3_array.ndim >= 1
assert se3_array.shape[-1] == len(StateSE3Index)
# Extract relative positions and orientations
rel_positions = se3_array[..., StateSE3Index.XYZ]
- rel_rotation_matrices = get_rotation_matrices_from_euler_array(se3_array[..., StateSE3Index.EULER_ANGLES])
+ rel_quaternions = se3_array[..., StateSE3Index.QUATERNION]
# Vectorized absolute position calculation: rotate and translate
- abs_positions = (rel_positions @ R_origin.T) + t_origin
-
- # Convert relative rotation matrices to absolute rotation matrices
- abs_rotation_matrices = np.einsum("ij,...jk->...ik", R_origin, rel_rotation_matrices)
- abs_euler_angles = np.array([EulerAngles.from_rotation_matrix(R).array for R in abs_rotation_matrices])
+ abs_positions = (R_origin @ rel_positions.T).T + t_origin
+ abs_quaternions = multiply_quaternion_arrays(origin_array[StateSE3Index.QUATERNION], rel_quaternions)
# Prepare output array
abs_se3_array = se3_array.copy()
abs_se3_array[..., StateSE3Index.XYZ] = abs_positions
- abs_se3_array[..., StateSE3Index.EULER_ANGLES] = normalize_angle(abs_euler_angles)
+ abs_se3_array[..., StateSE3Index.QUATERNION] = abs_quaternions
return abs_se3_array
-def convert_absolute_to_relative_points_3d_array(
- origin: Union[StateSE3, npt.NDArray[np.float64]], points_3d_array: npt.NDArray[np.float64]
-) -> npt.NDArray[np.float64]:
- """Converts 3D points from the absolute frame to the relative frame.
+def translate_se3_along_z(state_se3: StateSE3, distance: float) -> StateSE3:
+ """Translates an SE3 state along the Z-axis.
- :param origin: The origin state in the absolute frame, as a StateSE3 or np.ndarray.
- :param points_3d_array: The 3D points in the absolute frame.
- :raises TypeError: If the origin is not a StateSE3 or np.ndarray.
- :return: The 3D points in the relative frame , indexed by :class:`~d123.geometry.Point3DIndex`.
+ :param state_se3: The SE3 state to translate.
+ :param distance: The distance to translate along the Z-axis.
+ :return: The translated SE3 state.
"""
+ R = state_se3.rotation_matrix
+ z_axis = R[:, 2]
- if isinstance(origin, StateSE3):
- t_origin = origin.point_3d.array
- R_origin = origin.rotation_matrix
- elif isinstance(origin, np.ndarray):
- assert origin.ndim == 1 and origin.shape[-1] == len(StateSE3Index)
- t_origin = origin[StateSE3Index.XYZ]
- R_origin = get_rotation_matrix_from_euler_array(origin[StateSE3Index.EULER_ANGLES])
- else:
- raise TypeError(f"Expected StateSE3 or np.ndarray, got {type(origin)}")
+ state_se3_array = state_se3.array.copy()
+ state_se3_array[StateSE3Index.XYZ] += distance * z_axis[Vector3DIndex.XYZ]
+ return StateSE3.from_array(state_se3_array, copy=False)
- assert points_3d_array.ndim >= 1
- assert points_3d_array.shape[-1] == len(Point3DIndex)
- # Translate points to origin frame, then rotate to body frame
- relative_points = (points_3d_array - t_origin) @ R_origin
- return relative_points
+def translate_se3_along_y(state_se3: StateSE3, distance: float) -> StateSE3:
+ """Translates a SE3 state along the Y-axis.
+ :param state_se3: The SE3 state to translate.
+ :param distance: The distance to translate along the Y-axis.
+ :return: The translated SE3 state.
+ """
+ R = state_se3.rotation_matrix
+ y_axis = R[:, 1]
-def convert_relative_to_absolute_points_3d_array(
- origin: Union[StateSE3, npt.NDArray[np.float64]], points_3d_array: npt.NDArray[np.float64]
-) -> npt.NDArray[np.float64]:
- """Converts 3D points from the relative frame to the absolute frame.
+ state_se3_array = state_se3.array.copy()
+ state_se3_array[StateSE3Index.XYZ] += distance * y_axis[Vector3DIndex.XYZ]
+ return StateSE3.from_array(state_se3_array, copy=False)
- :param origin: The origin state in the absolute frame, as a StateSE3 or np.ndarray.
- :param points_3d_array: The 3D points in the relative frame, indexed by :class:`~d123.geometry.Point3DIndex`.
- :raises TypeError: If the origin is not a StateSE3 or np.ndarray.
- :return: The 3D points in the absolute frame, indexed by :class:`~d123.geometry.Point3DIndex`.
+
+def translate_se3_along_x(state_se3: StateSE3, distance: float) -> StateSE3:
+ """Translates a SE3 state along the X-axis.
+
+ :param state_se3: The SE3 state to translate.
+ :param distance: The distance to translate along the X-axis.
+ :return: The translated SE3 state.
"""
- if isinstance(origin, StateSE3):
- origin_array = origin.array
- elif isinstance(origin, np.ndarray):
- assert origin.ndim == 1 and origin.shape[-1] == len(StateSE3Index)
- origin_array = origin
- else:
- raise TypeError(f"Expected StateSE3 or np.ndarray, got {type(origin)}")
+ R = state_se3.rotation_matrix
+ x_axis = R[:, 0]
- assert points_3d_array.shape[-1] == len(Point3DIndex)
+ state_se3_array = state_se3.array.copy()
+ state_se3_array[StateSE3Index.XYZ] += distance * x_axis[Vector3DIndex.XYZ]
+ return StateSE3.from_array(state_se3_array, copy=False)
- R = EulerAngles.from_array(origin_array[StateSE3Index.EULER_ANGLES]).rotation_matrix
- absolute_points = points_3d_array @ R.T + origin.point_3d.array
- return absolute_points
+
+def translate_se3_along_body_frame(state_se3: StateSE3, vector_3d: Vector3D) -> StateSE3:
+ """Translates a SE3 state along a vector in the body frame.
+
+ :param state_se3: The SE3 state to translate.
+ :param vector_3d: The vector to translate along in the body frame.
+ :return: The translated SE3 state.
+ """
+ R = state_se3.rotation_matrix
+ world_translation = R @ vector_3d.array
+
+ state_se3_array = state_se3.array.copy()
+ state_se3_array[StateSE3Index.XYZ] += world_translation
+ return StateSE3.from_array(state_se3_array, copy=False)
From 8c4117e52b6d75976702aba1ddf105ae39c18cc2 Mon Sep 17 00:00:00 2001
From: Daniel Dauner
Date: Mon, 22 Sep 2025 15:24:50 +0200
Subject: [PATCH 039/145] Use quaternions for se3 bounding boxes (#43), add
tests and refactor `geometry.utils` (#44)
---
d123/geometry/__init__.py | 8 +-
d123/geometry/bounding_box.py | 21 +-
d123/geometry/geometry_index.py | 29 +-
d123/geometry/se.py | 8 +
d123/geometry/test/test_bounding_box.py | 26 +-
d123/geometry/transform/transform_se2.py | 28 +-
d123/geometry/transform/transform_se3.py | 24 +-
d123/geometry/utils/bounding_box_utils.py | 185 ++++++------
d123/geometry/utils/rotation_utils.py | 45 ++-
d123/geometry/utils/test/__init__.py | 0
.../utils/test/test_bounding_box_utils.py | 269 ++++++++++++++++++
.../utils/test/test_polyline_utils.py | 0
.../utils/test/test_rotation_utils.py | 0
d123/geometry/utils/utils.py | 63 ----
14 files changed, 503 insertions(+), 203 deletions(-)
create mode 100644 d123/geometry/utils/test/__init__.py
create mode 100644 d123/geometry/utils/test/test_bounding_box_utils.py
create mode 100644 d123/geometry/utils/test/test_polyline_utils.py
create mode 100644 d123/geometry/utils/test/test_rotation_utils.py
delete mode 100644 d123/geometry/utils/utils.py
diff --git a/d123/geometry/__init__.py b/d123/geometry/__init__.py
index ec44efd5..68721954 100644
--- a/d123/geometry/__init__.py
+++ b/d123/geometry/__init__.py
@@ -1,4 +1,3 @@
-from d123.geometry.bounding_box import BoundingBoxSE2, BoundingBoxSE3
from d123.geometry.geometry_index import (
BoundingBoxSE2Index,
BoundingBoxSE3Index,
@@ -11,8 +10,9 @@
Vector2DIndex,
Vector3DIndex,
)
-from d123.geometry.occupancy_map import OccupancyMap2D
from d123.geometry.point import Point2D, Point3D
-from d123.geometry.polyline import Polyline2D, Polyline3D, PolylineSE2
-from d123.geometry.se import StateSE2, EulerStateSE3
from d123.geometry.vector import Vector2D, Vector3D
+from d123.geometry.se import StateSE2, StateSE3, EulerStateSE3
+from d123.geometry.polyline import Polyline2D, Polyline3D, PolylineSE2
+from d123.geometry.bounding_box import BoundingBoxSE2, BoundingBoxSE3
+from d123.geometry.occupancy_map import OccupancyMap2D
diff --git a/d123/geometry/bounding_box.py b/d123/geometry/bounding_box.py
index 6ace64e7..b164fc40 100644
--- a/d123/geometry/bounding_box.py
+++ b/d123/geometry/bounding_box.py
@@ -11,7 +11,7 @@
from d123.common.utils.mixin import ArrayMixin
from d123.geometry.geometry_index import BoundingBoxSE2Index, BoundingBoxSE3Index, Corners2DIndex, Corners3DIndex
from d123.geometry.point import Point2D, Point3D
-from d123.geometry.se import StateSE2, EulerStateSE3
+from d123.geometry.se import StateSE2, StateSE3
from d123.geometry.utils.bounding_box_utils import bbse2_array_to_corners_array, bbse3_array_to_corners_array
@@ -137,22 +137,22 @@ def corners_dict(self) -> Dict[Corners2DIndex, Point2D]:
class BoundingBoxSE3(ArrayMixin):
"""
- Rotated bounding box in 3D defined by center (StateSE3), length, width and height.
+ Rotated bounding box in 3D defined by center with quaternion rotation (StateSE3), length, width and height.
Example:
>>> from d123.geometry import StateSE3
- >>> bbox = BoundingBoxSE3(center=StateSE3(1.0, 2.0, 3.0, 0.1, 0.2, 0.3), length=4.0, width=2.0, height=1.5)
+ >>> bbox = BoundingBoxSE3(center=StateSE3(1.0, 2.0, 3.0, 1.0, 0.0, 0.0, 0.0), length=4.0, width=2.0, height=1.5)
>>> bbox.array
- array([1. , 2. , 3. , 0.1, 0.2, 0.3, 4. , 2. , 1.5])
+ array([1. , 2. , 3. , 1. , 0. , 0. , 0. , 4. , 2. , 1.5])
>>> bbox.bounding_box_se2.array
- array([1. , 2. , 0.3, 4. , 2. ])
+ array([1., 2., 0., 4., 2.])
>>> bbox.shapely_polygon.area
8.0
"""
_array: npt.NDArray[np.float64]
- def __init__(self, center: EulerStateSE3, length: float, width: float, height: float):
+ def __init__(self, center: StateSE3, length: float, width: float, height: float):
"""Initialize BoundingBoxSE3 with center (StateSE3), length, width and height.
:param center: Center of the bounding box as a StateSE3 instance.
@@ -183,15 +183,15 @@ def from_array(cls, array: npt.NDArray[np.float64], copy: bool = True) -> Boundi
return instance
@property
- def center(self) -> EulerStateSE3:
+ def center(self) -> StateSE3:
"""The center of the bounding box as a StateSE3 instance.
:return: The center of the bounding box as a StateSE3 instance.
"""
- return EulerStateSE3.from_array(self._array[BoundingBoxSE3Index.STATE_SE3])
+ return StateSE3.from_array(self._array[BoundingBoxSE3Index.STATE_SE3])
@property
- def center_se3(self) -> EulerStateSE3:
+ def center_se3(self) -> StateSE3:
"""The center of the bounding box as a StateSE3 instance.
:return: The center of the bounding box as a StateSE3 instance.
@@ -245,9 +245,8 @@ def bounding_box_se2(self) -> BoundingBoxSE2:
:return: A BoundingBoxSE2 instance.
"""
- center_se3 = self.center_se3
return BoundingBoxSE2(
- center=StateSE2(center_se3.x, center_se3.y, center_se3.yaw),
+ center=self.center_se2,
length=self.length,
width=self.width,
)
diff --git a/d123/geometry/geometry_index.py b/d123/geometry/geometry_index.py
index aa0924db..1da7c945 100644
--- a/d123/geometry/geometry_index.py
+++ b/d123/geometry/geometry_index.py
@@ -183,6 +183,10 @@ def XY(cls) -> slice:
def SE2(cls) -> slice:
return slice(cls.X, cls.YAW + 1)
+ @classproperty
+ def EXTENT(cls) -> slice:
+ return slice(cls.LENGTH, cls.WIDTH + 1)
+
class Corners2DIndex(IntEnum):
"""
@@ -197,19 +201,22 @@ class Corners2DIndex(IntEnum):
class BoundingBoxSE3Index(IntEnum):
"""
- Indexes array-like representations of rotated 3D bounding boxes (x,y,z,roll,pitch,yaw,length,width,height).
- TODO: Use quaternions for rotation.
+ Indexes array-like representations of rotated 3D bounding boxes
+ - center (x,y,z).
+ - rotation (qw,qx,qy,qz).
+ - extent (length,width,height).
"""
X = 0
Y = 1
Z = 2
- ROLL = 3
- PITCH = 4
- YAW = 5
- LENGTH = 6
- WIDTH = 7
- HEIGHT = 8
+ QW = 3
+ QX = 4
+ QY = 5
+ QZ = 6
+ LENGTH = 7
+ WIDTH = 8
+ HEIGHT = 9
@classproperty
def XYZ(cls) -> slice:
@@ -217,11 +224,11 @@ def XYZ(cls) -> slice:
@classproperty
def STATE_SE3(cls) -> slice:
- return slice(cls.X, cls.YAW + 1)
+ return slice(cls.X, cls.QZ + 1)
@classproperty
- def EULER_ANGLES(cls) -> slice:
- return slice(cls.ROLL, cls.YAW + 1)
+ def QUATERNION(cls) -> slice:
+ return slice(cls.QW, cls.QZ + 1)
@classproperty
def EXTENT(cls) -> slice:
diff --git a/d123/geometry/se.py b/d123/geometry/se.py
index 148868ea..b63bea7d 100644
--- a/d123/geometry/se.py
+++ b/d123/geometry/se.py
@@ -245,6 +245,14 @@ def quaternion(self) -> Quaternion:
"""
return Quaternion.from_array(self.array[StateSE3Index.QUATERNION])
+ @property
+ def euler_angles(self) -> EulerAngles:
+ """Returns the Euler angles (roll, pitch, yaw) representation of the state's orientation.
+
+ :return: An EulerAngles instance representing the Euler angles.
+ """
+ return self.quaternion.euler_angles
+
@property
def rotation_matrix(self) -> npt.NDArray[np.float64]:
"""Returns the 3x3 rotation matrix representation of the state's orientation.
diff --git a/d123/geometry/test/test_bounding_box.py b/d123/geometry/test/test_bounding_box.py
index 06522d7a..f34639ad 100644
--- a/d123/geometry/test/test_bounding_box.py
+++ b/d123/geometry/test/test_bounding_box.py
@@ -12,6 +12,7 @@
Corners3DIndex,
Point2DIndex,
)
+from d123.geometry.se import StateSE3
class TestBoundingBoxSE2(unittest.TestCase):
@@ -109,7 +110,8 @@ class TestBoundingBoxSE3(unittest.TestCase):
def setUp(self):
"""Set up test fixtures."""
- self.center = EulerStateSE3(1.0, 2.0, 3.0, 0.1, 0.2, 0.3)
+ self.array = np.array([1.0, 2.0, 3.0, 0.98185617, 0.06407135, 0.09115755, 0.1534393, 4.0, 2.0, 1.5])
+ self.center = StateSE3(1.0, 2.0, 3.0, 0.98185617, 0.06407135, 0.09115755, 0.1534393)
self.length = 4.0
self.width = 2.0
self.height = 1.5
@@ -125,13 +127,13 @@ def test_init(self):
def test_from_array(self):
"""Test BoundingBoxSE3.from_array method."""
- array = np.array([1.0, 2.0, 3.0, 0.1, 0.2, 0.3, 4.0, 2.0, 1.5])
+ array = self.array.copy()
bbox = BoundingBoxSE3.from_array(array)
np.testing.assert_array_equal(bbox.array, array)
def test_from_array_copy(self):
"""Test BoundingBoxSE3.from_array with copy parameter."""
- array = np.array([1.0, 2.0, 3.0, 0.1, 0.2, 0.3, 4.0, 2.0, 1.5])
+ array = self.array.copy()
bbox_copy = BoundingBoxSE3.from_array(array, copy=True)
bbox_no_copy = BoundingBoxSE3.from_array(array, copy=False)
@@ -149,14 +151,14 @@ def test_properties(self):
def test_array_property(self):
"""Test array property."""
- expected = np.array([1.0, 2.0, 3.0, 0.1, 0.2, 0.3, 4.0, 2.0, 1.5])
+ expected = self.array.copy()
np.testing.assert_array_equal(self.bbox.array, expected)
def test_array_mixin(self):
"""Test that BoundingBoxSE3 is an instance of ArrayMixin."""
self.assertIsInstance(self.bbox, ArrayMixin)
- expected = np.array([1.0, 2.0, 3.0, 0.1, 0.2, 0.3, 4.0, 2.0, 1.5], dtype=np.float16)
+ expected = np.array(self.array, dtype=np.float16)
output_array = np.array(self.bbox, dtype=np.float16)
np.testing.assert_array_equal(output_array, expected)
self.assertEqual(output_array.dtype, np.float16)
@@ -170,7 +172,7 @@ def test_bounding_box_se2_property(self):
self.assertEqual(bbox_2d.width, self.width)
self.assertEqual(bbox_2d.center.x, self.center.x)
self.assertEqual(bbox_2d.center.y, self.center.y)
- self.assertEqual(bbox_2d.center.yaw, self.center.yaw)
+ self.assertEqual(bbox_2d.center.yaw, self.center.euler_angles.yaw)
def test_corners_array(self):
"""Test corners_array property."""
@@ -196,15 +198,19 @@ def test_array_assertions(self):
"""Test array assertions in from_array."""
# Test 2D array
with self.assertRaises(AssertionError):
- BoundingBoxSE3.from_array(np.array([[1, 2, 3, 4, 5, 6, 7, 8, 9]]))
+ BoundingBoxSE3.from_array(np.array([[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]]))
- # Test wrong size
+ # Test wrong size, less than required
+ with self.assertRaises(AssertionError):
+ BoundingBoxSE3.from_array(np.array([1, 2, 3, 4, 5, 6, 7, 8, 9]))
+
+ # Test wrong size, greater than required
with self.assertRaises(AssertionError):
- BoundingBoxSE3.from_array(np.array([1, 2, 3, 4, 5, 6, 7, 8]))
+ BoundingBoxSE3.from_array(np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]))
def test_zero_dimensions(self):
"""Test bounding box with zero dimensions."""
- center = EulerStateSE3(0.0, 0.0, 0.0, 0.0, 0.0, 0.0)
+ center = StateSE3(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0)
bbox = BoundingBoxSE3(center, 0.0, 0.0, 0.0)
self.assertEqual(bbox.length, 0.0)
self.assertEqual(bbox.width, 0.0)
diff --git a/d123/geometry/transform/transform_se2.py b/d123/geometry/transform/transform_se2.py
index faaa3587..c3a5ac6e 100644
--- a/d123/geometry/transform/transform_se2.py
+++ b/d123/geometry/transform/transform_se2.py
@@ -3,7 +3,7 @@
import numpy as np
import numpy.typing as npt
-from d123.geometry.geometry_index import Vector2DIndex
+from d123.geometry.geometry_index import Point2DIndex, Vector2DIndex
from d123.geometry.se import StateSE2, StateSE2Index
from d123.geometry.utils.rotation_utils import normalize_angle
from d123.geometry.vector import Vector2D
@@ -180,3 +180,29 @@ def translate_se2_along_y(state_se2: StateSE2, distance: float) -> StateSE2:
"""
translation = Vector2D.from_array(np.array([0.0, distance], dtype=np.float64))
return StateSE2.from_array(translate_se2_array_along_body_frame(state_se2.array, translation), copy=False)
+
+
+def translate_2d_along_body_frame(
+ points_2d: npt.NDArray[np.float64],
+ yaws: npt.NDArray[np.float64],
+ x_translate: npt.NDArray[np.float64],
+ y_translate: npt.NDArray[np.float64],
+) -> npt.NDArray[np.float64]:
+ """Translate 2D points along their body frame.
+
+ :param points_2d: Array of 2D points, indexed by :class:`~d123.geometry.Point2DIndex`.
+ :param yaws: Array of yaw angles.
+ :param x_translate: Array of x translation, i.e. forward translation.
+ :param y_translate: Array of y translation, i.e. left translation.
+ :return: Array of translated 2D points, indexed by :class:`~d123.geometry.Point2DIndex`.
+ """
+ assert points_2d.shape[-1] == len(Point2DIndex)
+ half_pi = np.pi / 2.0
+ translation: npt.NDArray[np.float64] = np.stack(
+ [
+ (y_translate * np.cos(yaws + half_pi)) + (x_translate * np.cos(yaws)),
+ (y_translate * np.sin(yaws + half_pi)) + (x_translate * np.sin(yaws)),
+ ],
+ axis=-1,
+ )
+ return points_2d + translation
diff --git a/d123/geometry/transform/transform_se3.py b/d123/geometry/transform/transform_se3.py
index 4725e4c2..e5132fe2 100644
--- a/d123/geometry/transform/transform_se3.py
+++ b/d123/geometry/transform/transform_se3.py
@@ -4,10 +4,11 @@
import numpy.typing as npt
from d123.geometry import Vector3D
-from d123.geometry.geometry_index import Point3DIndex, StateSE3Index, Vector3DIndex
+from d123.geometry.geometry_index import Point3DIndex, QuaternionIndex, StateSE3Index, Vector3DIndex
from d123.geometry.se import StateSE3
from d123.geometry.utils.rotation_utils import (
conjugate_quaternion_array,
+ get_rotation_matrices_from_quaternion_array,
get_rotation_matrix_from_quaternion_array,
multiply_quaternion_arrays,
)
@@ -211,3 +212,24 @@ def translate_se3_along_body_frame(state_se3: StateSE3, vector_3d: Vector3D) ->
state_se3_array = state_se3.array.copy()
state_se3_array[StateSE3Index.XYZ] += world_translation
return StateSE3.from_array(state_se3_array, copy=False)
+
+
+def translate_3d_along_body_frame(
+ points_3d: npt.NDArray[np.float64],
+ quaternions: npt.NDArray[np.float64],
+ translation: npt.NDArray[np.float64],
+) -> npt.NDArray[np.float64]:
+ """Translates 3D points along a vector in the body frame defined by quaternions.
+
+ :param points_3d: Array of 3D points, index by :class:`~d123.geometry.Point3DIndex`.
+ :param quaternions: Array of quaternions, index by :class:`~d123.geometry.QuaternionIndex`.
+ :param translation: Array of translation vectors, index by :class:`~d123.geometry.Vector3DIndex`.
+ :return: The translated 3D points in the world frame, index by :class:`~d123.geometry.Point3DIndex`.
+ """
+ assert points_3d.shape[-1] == len(Point3DIndex)
+ assert quaternions.shape[-1] == len(QuaternionIndex)
+ assert translation.shape[-1] == len(Vector3DIndex)
+
+ R = get_rotation_matrices_from_quaternion_array(quaternions)
+ world_translation = np.einsum("...ij,...j->...i", R, translation)
+ return points_3d + world_translation
diff --git a/d123/geometry/utils/bounding_box_utils.py b/d123/geometry/utils/bounding_box_utils.py
index ff8bb237..4d3a4cd0 100644
--- a/d123/geometry/utils/bounding_box_utils.py
+++ b/d123/geometry/utils/bounding_box_utils.py
@@ -6,136 +6,133 @@
BoundingBoxSE2Index,
BoundingBoxSE3Index,
Corners2DIndex,
+ Corners3DIndex,
Point2DIndex,
+ Vector2DIndex,
+ Vector3DIndex,
)
+from d123.geometry.transform.transform_se2 import translate_2d_along_body_frame
+from d123.geometry.transform.transform_se3 import translate_3d_along_body_frame
-def bbse2_array_to_corners_array(bbse2: npt.NDArray[np.float64]) -> npt.NDArray[np.float64]:
+def get_corners_2d_factors() -> npt.NDArray[np.float64]:
+ """Returns the factors to compute the corners of a SE2 bounding box in the body frame.
+
+ The factors are defined such that multiplying them with the length and width
+ of the bounding box yields the corner coordinates in the body frame.
+
+ :return: A (4, 2), indexed by :class:`~d123.geometry.Corners2DIndex` and
+ :class:`~d123.geometry.Point2DIndex`, respectively.
+ """
+ # NOTE: ISO 8855 convention for rotation
+ factors = np.zeros((len(Corners2DIndex), len(Point2DIndex)), dtype=np.float64)
+ factors.fill(0.5)
+ factors[Corners2DIndex.FRONT_LEFT] *= [+1, +1]
+ factors[Corners2DIndex.FRONT_RIGHT] *= [+1, -1]
+ factors[Corners2DIndex.BACK_RIGHT] *= [-1, -1]
+ factors[Corners2DIndex.BACK_LEFT] *= [-1, +1]
+ return factors
+
+
+def get_corners_3d_factors() -> npt.NDArray[np.float64]:
+ """Returns the factors to compute the corners of a SE3 bounding box in the body frame.
+
+ The factors are defined such that multiplying them with the length, width, and height
+ of the bounding box yields the corner coordinates in the body frame.
+
+ :return: A (8, 3), indexed by :class:`~d123.geometry.Corners3DIndex` and
+ :class:`~d123.geometry.Vector3DIndex`, respectively.
"""
- Converts an array of BoundingBoxSE2 objects to a coordinates array.
- :param bbse2: Array of BoundingBoxSE2 objects.
- :return: Coordinates array of shape (n, 5, 2) where n is the number of bounding boxes.
+ # NOTE: ISO 8855 convention for rotation
+ factors = np.zeros((len(Corners3DIndex), len(Vector3DIndex)), dtype=np.float64)
+ factors.fill(0.5)
+ factors[Corners3DIndex.FRONT_LEFT_BOTTOM] *= [+1, +1, -1]
+ factors[Corners3DIndex.FRONT_RIGHT_BOTTOM] *= [+1, -1, -1]
+ factors[Corners3DIndex.BACK_RIGHT_BOTTOM] *= [-1, -1, -1]
+ factors[Corners3DIndex.BACK_LEFT_BOTTOM] *= [-1, +1, -1]
+ factors[Corners3DIndex.FRONT_LEFT_TOP] *= [+1, +1, +1]
+ factors[Corners3DIndex.FRONT_RIGHT_TOP] *= [+1, -1, +1]
+ factors[Corners3DIndex.BACK_RIGHT_TOP] *= [-1, -1, +1]
+ factors[Corners3DIndex.BACK_LEFT_TOP] *= [-1, +1, +1]
+ return factors
+
+
+def bbse2_array_to_corners_array(bbse2: npt.NDArray[np.float64]) -> npt.NDArray[np.float64]:
+ """Converts an array of BoundingBoxSE2 objects to the 2D coordinates array of their corners.
+
+ :param bbse2: Array of SE2 bounding boxes, indexed by :class:`~d123.geometry.BoundingBoxSE2Index`.
+ :return: Coordinates array of shape (..., 4, 2), indexed by
+ :class:`~d123.geometry.Corners2DIndex` and :class:`~d123.geometry.Point2DIndex`, respectively.
"""
assert bbse2.shape[-1] == len(BoundingBoxSE2Index)
ndim_one: bool = bbse2.ndim == 1
if ndim_one:
- bbse2 = bbse2[None, :]
-
- corners_array = np.zeros((*bbse2.shape[:-1], len(Corners2DIndex), len(Point2DIndex)), dtype=np.float64)
+ bbse2 = bbse2[None, ...]
centers = bbse2[..., BoundingBoxSE2Index.XY]
yaws = bbse2[..., BoundingBoxSE2Index.YAW]
- half_length = bbse2[..., BoundingBoxSE2Index.LENGTH] / 2.0
- half_width = bbse2[..., BoundingBoxSE2Index.WIDTH] / 2.0
-
- corners_array[..., Corners2DIndex.FRONT_LEFT, :] = translate_along_yaw_array(
- centers,
- yaws,
- half_length,
- half_width,
- )
- corners_array[..., Corners2DIndex.FRONT_RIGHT, :] = translate_along_yaw_array(
- centers,
- yaws,
- half_length,
- -half_width,
- )
- corners_array[..., Corners2DIndex.BACK_RIGHT, :] = translate_along_yaw_array(
- centers,
- yaws,
- -half_length,
- -half_width,
- )
- corners_array[..., Corners2DIndex.BACK_LEFT, :] = translate_along_yaw_array(
- centers,
- yaws,
- -half_length,
- half_width,
- )
+ extents = bbse2[..., BoundingBoxSE2Index.EXTENT] # (..., 2)
+
+ factors = get_corners_2d_factors() # (4, 2)
+ corner_translation_body = extents[..., None, :] * factors[None, :, :] # (..., 4, 2)
+
+ corners_array = translate_2d_along_body_frame( # (..., 4, 2)
+ points_2d=centers[..., None, :], # (..., 1, 2)
+ yaws=yaws[..., None], # (..., 1)
+ x_translate=corner_translation_body[..., Vector2DIndex.X],
+ y_translate=corner_translation_body[..., Vector2DIndex.Y],
+ ) # (..., 4, 2)
return corners_array.squeeze(axis=0) if ndim_one else corners_array
def corners_2d_array_to_polygon_array(corners_array: npt.NDArray[np.float64]) -> npt.NDArray[np.object_]:
+ """Converts an array of 2D corners to an array of shapely Polygons.
+ TODO: Consider removing this function?
+
+ :param corners_array: Array of shape (..., 4, 2) where 4 is the number of corners.
+ :return: Array of shapely Polygons.
+ """
polygons = shapely.creation.polygons(corners_array)
return polygons
-def bbse2_array_to_polygon_array(bbse2: npt.NDArray[np.float64]) -> npt.NDArray[np.float64]:
- return corners_2d_array_to_polygon_array(bbse2_array_to_corners_array(bbse2))
-
+def bbse2_array_to_polygon_array(bbse2: npt.NDArray[np.float64]) -> npt.NDArray[np.object_]:
+ """Converts an array of BoundingBoxSE2 objects to an array of shapely Polygons.
-def translate_along_yaw_array(
- points_2d: npt.NDArray[np.float64],
- headings: npt.NDArray[np.float64],
- lon: npt.NDArray[np.float64],
- lat: npt.NDArray[np.float64],
-) -> npt.NDArray[np.float64]:
- # TODO: move somewhere else
- assert points_2d.shape[-1] == len(Point2DIndex)
- half_pi = np.pi / 2.0
- translation: npt.NDArray[np.float64] = np.stack(
- [
- (lat * np.cos(headings + half_pi)) + (lon * np.cos(headings)),
- (lat * np.sin(headings + half_pi)) + (lon * np.sin(headings)),
- ],
- axis=-1,
- )
- return points_2d + translation
+ :param bbse2: Array of SE2 bounding boxes, indexed by :class:`~d123.geometry.BoundingBoxSE2Index`.
+ :return: Array of shapely Polygons.
+ """
+ return corners_2d_array_to_polygon_array(bbse2_array_to_corners_array(bbse2))
def bbse3_array_to_corners_array(bbse3_array: npt.NDArray[np.float64]) -> npt.NDArray[np.float64]:
- """Converts an array of BoundingBoxSE3 objects to a coordinates array.
- TODO: Fix this function
+ """Converts an array of BoundingBoxSE3 objects to the 3D coordinates array of their corners.
- :param bbse3_array: Array of BoundingBoxSE3 objects, shape (..., 7) [x, y, z, yaw, pitch, roll, length, width, height].
- :return: Coordinates array of shape (..., 8, 3) where 8 is the number of corners.
+ :param bbse3_array: Array of SE3 bounding boxes, indexed by :class:`~d123.geometry.BoundingBoxSE3Index`.
+ :return: Coordinates array of shape (..., 8, 3), indexed by
+ :class:`~d123.geometry.Corners3DIndex` and :class:`~d123.geometry.Point3DIndex`, respectively.
"""
assert bbse3_array.shape[-1] == len(BoundingBoxSE3Index)
+ # Flag whether to unsqueeze and squeeze the input dim
ndim_one: bool = bbse3_array.ndim == 1
if ndim_one:
- bbse3_array = bbse3_array[None, :]
+ bbse3_array = bbse3_array[None, ...]
# Extract parameters
centers = bbse3_array[..., BoundingBoxSE3Index.XYZ] # (..., 3)
- yaws = bbse3_array[..., BoundingBoxSE3Index.YAW] # (...,)
- pitches = bbse3_array[..., BoundingBoxSE3Index.PITCH] # (...,)
- rolls = bbse3_array[..., BoundingBoxSE3Index.ROLL] # (...,)
-
- # Corner factors: (x, y, z) in box frame
- factors = np.array(
- [
- [+0.5, -0.5, -0.5], # FRONT_LEFT_BOTTOM
- [+0.5, +0.5, -0.5], # FRONT_RIGHT_BOTTOM
- [-0.5, +0.5, -0.5], # BACK_RIGHT_BOTTOM
- [-0.5, -0.5, -0.5], # BACK_LEFT_BOTTOM
- [+0.5, -0.5, +0.5], # FRONT_LEFT_TOP
- [+0.5, +0.5, +0.5], # FRONT_RIGHT_TOP
- [-0.5, +0.5, +0.5], # BACK_RIGHT_TOP
- [-0.5, -0.5, +0.5], # BACK_LEFT_TOP
- ],
- dtype=np.float64,
- ) # (8, 3)
+ quaternions = bbse3_array[..., BoundingBoxSE3Index.QUATERNION] # (..., 4)
# Box extents
- extents = bbse3_array[..., BoundingBoxSE3Index.EXTENT] # (...,)
- corners_local = factors[None, :, :] * extents # (..., 8, 3)
-
- # Rotation matrices (yaw, pitch, roll)
- def rotation_matrix(yaw, pitch, roll):
- cy, sy = np.cos(yaw), np.sin(yaw)
- cp, sp = np.cos(pitch), np.sin(pitch)
- cr, sr = np.cos(roll), np.sin(roll)
- Rz = np.array([[cy, -sy, 0], [sy, cy, 0], [0, 0, 1]])
- Ry = np.array([[cp, 0, sp], [0, 1, 0], [-sp, 0, cp]])
- Rx = np.array([[1, 0, 0], [0, cr, -sr], [0, sr, cr]])
- return Rz @ Ry @ Rx
-
- corners_world = np.empty((*bbse3_array.shape[:-1], 8, 3), dtype=np.float64)
- for idx in np.ndindex(bbse3_array.shape[:-1]):
- R = rotation_matrix(yaws[idx], pitches[idx], rolls[idx])
- corners_world[idx] = centers[idx] + (corners_local[idx] @ R.T)
+ factors = get_corners_3d_factors() # (8, 3)
+ extents = bbse3_array[..., BoundingBoxSE3Index.EXTENT] # (..., 3)
+ corner_translation_body = extents[..., None, :] * factors[None, :, :] # (..., 8, 3)
+ corners_world = translate_3d_along_body_frame(
+ centers[..., None, :], # (..., 1, 3)
+ quaternions[..., None, :], # (..., 1, 4)
+ corner_translation_body,
+ )
return corners_world.squeeze(axis=0) if ndim_one else corners_world
diff --git a/d123/geometry/utils/rotation_utils.py b/d123/geometry/utils/rotation_utils.py
index 871ec29c..ead501b1 100644
--- a/d123/geometry/utils/rotation_utils.py
+++ b/d123/geometry/utils/rotation_utils.py
@@ -39,12 +39,21 @@ def get_rotation_matrices_from_euler_array(euler_angles_array: npt.NDArray[np.fl
Convention: Intrinsic rotations in order Z-Y-X (yaw, pitch, roll)
Equivalent to: R = R_x(roll) @ R_y(pitch) @ R_z(yaw)
"""
- assert euler_angles_array.ndim == 2 and euler_angles_array.shape[1] == len(EulerAnglesIndex)
+ assert euler_angles_array.ndim >= 1 and euler_angles_array.shape[-1] == len(EulerAnglesIndex)
+
+ # Store original shape for reshaping later
+ original_shape = euler_angles_array.shape[:-1]
+
+ # Flatten to 2D if needed
+ if euler_angles_array.ndim > 2:
+ euler_angles_array_ = euler_angles_array.reshape(-1, len(EulerAnglesIndex))
+ else:
+ euler_angles_array_ = euler_angles_array
# Extract roll, pitch, yaw for all samples at once
- roll = euler_angles_array[:, EulerAnglesIndex.ROLL]
- pitch = euler_angles_array[:, EulerAnglesIndex.PITCH]
- yaw = euler_angles_array[:, EulerAnglesIndex.YAW]
+ roll = euler_angles_array_[:, EulerAnglesIndex.ROLL]
+ pitch = euler_angles_array_[:, EulerAnglesIndex.PITCH]
+ yaw = euler_angles_array_[:, EulerAnglesIndex.YAW]
# Compute sin/cos for all angles at once
cos_roll, sin_roll = np.cos(roll), np.sin(roll)
@@ -52,7 +61,7 @@ def get_rotation_matrices_from_euler_array(euler_angles_array: npt.NDArray[np.fl
cos_yaw, sin_yaw = np.cos(yaw), np.sin(yaw)
# Build rotation matrices for entire batch
- batch_size = euler_angles_array.shape[0]
+ batch_size = euler_angles_array_.shape[0]
rotation_matrices = np.zeros((batch_size, 3, 3), dtype=np.float64)
# ZYX Tait-Bryan rotation matrix elements
@@ -68,6 +77,10 @@ def get_rotation_matrices_from_euler_array(euler_angles_array: npt.NDArray[np.fl
rotation_matrices[:, 2, 1] = cos_roll * sin_pitch * sin_yaw + sin_roll * cos_yaw
rotation_matrices[:, 2, 2] = cos_roll * cos_pitch
+ # Reshape back to original batch dimensions + (3, 3)
+ if len(original_shape) > 1:
+ rotation_matrices = rotation_matrices.reshape(original_shape + (3, 3))
+
return rotation_matrices
@@ -140,12 +153,28 @@ def get_rotation_matrix_from_euler_array(euler_angles: npt.NDArray[np.float64])
def get_rotation_matrices_from_quaternion_array(quaternion_array: npt.NDArray[np.float64]) -> npt.NDArray[np.float64]:
- assert quaternion_array.ndim == 2 and quaternion_array.shape[-1] == len(QuaternionIndex)
- norm_quaternion = normalize_quaternion_array(quaternion_array)
+ assert quaternion_array.ndim >= 1 and quaternion_array.shape[-1] == len(QuaternionIndex)
+
+ # Store original shape for reshaping later
+ original_shape = quaternion_array.shape[:-1]
+
+ # Flatten to 2D if needed
+ if quaternion_array.ndim > 2:
+ quaternion_array_ = quaternion_array.reshape(-1, len(QuaternionIndex))
+ else:
+ quaternion_array_ = quaternion_array
+
+ norm_quaternion = normalize_quaternion_array(quaternion_array_)
Q_matrices = get_q_matrices(norm_quaternion)
Q_bar_matrices = get_q_bar_matrices(norm_quaternion)
rotation_matrix = batch_matmul(Q_matrices, Q_bar_matrices.conj().swapaxes(-1, -2))
- return rotation_matrix[:, 1:][:, :, 1:]
+ rotation_matrix = rotation_matrix[:, 1:][:, :, 1:]
+
+ # Reshape back to original batch dimensions + (3, 3)
+ if len(original_shape) > 1:
+ rotation_matrix = rotation_matrix.reshape(original_shape + (3, 3))
+
+ return rotation_matrix
def get_rotation_matrix_from_quaternion_array(quaternion_array: npt.NDArray[np.float64]) -> npt.NDArray[np.float64]:
diff --git a/d123/geometry/utils/test/__init__.py b/d123/geometry/utils/test/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/d123/geometry/utils/test/test_bounding_box_utils.py b/d123/geometry/utils/test/test_bounding_box_utils.py
new file mode 100644
index 00000000..1099bc6f
--- /dev/null
+++ b/d123/geometry/utils/test/test_bounding_box_utils.py
@@ -0,0 +1,269 @@
+import unittest
+
+import numpy as np
+import numpy.typing as npt
+import shapely
+
+from d123.geometry.geometry_index import (
+ BoundingBoxSE3Index,
+ Corners2DIndex,
+ Corners3DIndex,
+ Point2DIndex,
+ Point3DIndex,
+ StateSE3Index,
+ EulerStateSE3Index,
+)
+from d123.geometry.point import Point3D
+from d123.geometry.rotation import Quaternion
+from d123.geometry.se import EulerStateSE3, StateSE3
+from d123.geometry.transform.transform_se3 import translate_se3_along_body_frame
+from d123.geometry.utils.bounding_box_utils import (
+ bbse2_array_to_corners_array,
+ corners_2d_array_to_polygon_array,
+ bbse2_array_to_polygon_array,
+ bbse3_array_to_corners_array,
+ get_corners_3d_factors,
+)
+from d123.geometry.vector import Vector3D
+
+
+class TestBoundingBoxUtils(unittest.TestCase):
+
+ def setUp(self):
+ self._num_consistency_checks = 10
+ self._max_pose_xyz = 100.0
+ self._max_extent = 200.0
+
+ def _get_random_euler_se3_array(self, size: int) -> npt.NDArray[np.float64]:
+ """Generate random SE3 poses"""
+ random_se3_array = np.zeros((size, len(EulerStateSE3Index)), dtype=np.float64)
+ random_se3_array[:, EulerStateSE3Index.XYZ] = np.random.uniform(
+ -self._max_pose_xyz,
+ self._max_pose_xyz,
+ (size, len(Point3DIndex)),
+ )
+ random_se3_array[:, EulerStateSE3Index.YAW] = np.random.uniform(-np.pi, np.pi, size)
+ random_se3_array[:, EulerStateSE3Index.PITCH] = np.random.uniform(-np.pi / 2, np.pi / 2, size)
+ random_se3_array[:, EulerStateSE3Index.ROLL] = np.random.uniform(-np.pi, np.pi, size)
+
+ return random_se3_array
+
+ def test_bbse2_array_to_corners_array_one_dim(self):
+ bounding_box_se2_array = np.array([1.0, 2.0, 0.0, 4.0, 2.0])
+ corners_array = bbse2_array_to_corners_array(bounding_box_se2_array)
+
+ # fill expected
+ expected_corners = np.zeros((4, 2), dtype=np.float64)
+ expected_corners[Corners2DIndex.FRONT_LEFT] = [1.0 + 2.0, 2.0 + 1.0]
+ expected_corners[Corners2DIndex.FRONT_RIGHT] = [1.0 + 2.0, 2.0 - 1.0]
+ expected_corners[Corners2DIndex.BACK_RIGHT] = [1.0 - 2.0, 2.0 - 1.0]
+ expected_corners[Corners2DIndex.BACK_LEFT] = [1.0 - 2.0, 2.0 + 1.0]
+
+ np.testing.assert_allclose(corners_array, expected_corners, atol=1e-6)
+
+ def test_bbse2_array_to_corners_array_n_dim(self):
+ bounding_box_se2_array = np.array([1.0, 2.0, 0.0, 4.0, 2.0])
+ bounding_box_se2_array = np.tile(bounding_box_se2_array, (3, 1))
+
+ corners_array = bbse2_array_to_corners_array(bounding_box_se2_array)
+
+ # fill expected
+ expected_corners = np.zeros((4, 2), dtype=np.float64)
+ expected_corners[Corners2DIndex.FRONT_LEFT] = [1.0 + 2.0, 2.0 + 1.0]
+ expected_corners[Corners2DIndex.FRONT_RIGHT] = [1.0 + 2.0, 2.0 - 1.0]
+ expected_corners[Corners2DIndex.BACK_RIGHT] = [1.0 - 2.0, 2.0 - 1.0]
+ expected_corners[Corners2DIndex.BACK_LEFT] = [1.0 - 2.0, 2.0 + 1.0]
+ expected_corners = np.tile(expected_corners, (3, 1, 1))
+
+ np.testing.assert_allclose(corners_array, expected_corners, atol=1e-6)
+
+ def test_bbse2_array_to_corners_array_zero_dim(self):
+ bounding_box_se2_array = np.zeros((0, 5), dtype=np.float64)
+ corners_array = bbse2_array_to_corners_array(bounding_box_se2_array)
+ expected_corners = np.zeros((0, 4, 2), dtype=np.float64)
+ np.testing.assert_allclose(corners_array, expected_corners, atol=1e-6)
+
+ def test_bbse2_array_to_corners_array_rotation(self):
+ bounding_box_se2_array = np.array([1.0, 2.0, np.pi / 2, 4.0, 2.0])
+ corners_array = bbse2_array_to_corners_array(bounding_box_se2_array)
+
+ # fill expected
+ expected_corners = np.zeros((len(Corners2DIndex), len(Point2DIndex)), dtype=np.float64)
+ expected_corners[Corners2DIndex.FRONT_LEFT] = [1.0 - 1.0, 2.0 + 2.0]
+ expected_corners[Corners2DIndex.FRONT_RIGHT] = [1.0 + 1.0, 2.0 + 2.0]
+ expected_corners[Corners2DIndex.BACK_RIGHT] = [1.0 + 1.0, 2.0 - 2.0]
+ expected_corners[Corners2DIndex.BACK_LEFT] = [1.0 - 1.0, 2.0 - 2.0]
+
+ np.testing.assert_allclose(corners_array, expected_corners, atol=1e-6)
+
+ def test_corners_2d_array_to_polygon_array_one_dim(self):
+ corners_array = np.array(
+ [
+ [3.0, 3.0],
+ [3.0, 1.0],
+ [-1.0, 1.0],
+ [-1.0, 3.0],
+ ]
+ )
+ polygon = corners_2d_array_to_polygon_array(corners_array)
+
+ expected_polygon = shapely.geometry.Polygon(corners_array)
+ np.testing.assert_allclose(polygon.area, expected_polygon.area, atol=1e-6)
+ self.assertTrue(polygon.equals(expected_polygon))
+
+ def test_corners_2d_array_to_polygon_array_n_dim(self):
+ corners_array = np.array(
+ [
+ [
+ [3.0, 3.0],
+ [3.0, 1.0],
+ [-1.0, 1.0],
+ [-1.0, 3.0],
+ ],
+ [
+ [4.0, 4.0],
+ [4.0, 2.0],
+ [0.0, 2.0],
+ [0.0, 4.0],
+ ],
+ ]
+ )
+ polygons = corners_2d_array_to_polygon_array(corners_array)
+
+ expected_polygon_1 = shapely.geometry.Polygon(corners_array[0])
+ expected_polygon_2 = shapely.geometry.Polygon(corners_array[1])
+
+ np.testing.assert_allclose(polygons[0].area, expected_polygon_1.area, atol=1e-6)
+ self.assertTrue(polygons[0].equals(expected_polygon_1))
+
+ np.testing.assert_allclose(polygons[1].area, expected_polygon_2.area, atol=1e-6)
+ self.assertTrue(polygons[1].equals(expected_polygon_2))
+
+ def test_corners_2d_array_to_polygon_array_zero_dim(self):
+ corners_array = np.zeros((0, 4, 2), dtype=np.float64)
+ polygons = corners_2d_array_to_polygon_array(corners_array)
+ expected_polygons = np.zeros((0,), dtype=np.object_)
+ np.testing.assert_array_equal(polygons, expected_polygons)
+
+ def test_bbse2_array_to_polygon_array_one_dim(self):
+ bounding_box_se2_array = np.array([1.0, 2.0, 0.0, 4.0, 2.0])
+ polygon = bbse2_array_to_polygon_array(bounding_box_se2_array)
+
+ expected_corners = np.zeros((4, 2), dtype=np.float64)
+ expected_corners[Corners2DIndex.FRONT_LEFT] = [1.0 + 2.0, 2.0 + 1.0]
+ expected_corners[Corners2DIndex.FRONT_RIGHT] = [1.0 + 2.0, 2.0 - 1.0]
+ expected_corners[Corners2DIndex.BACK_RIGHT] = [1.0 - 2.0, 2.0 - 1.0]
+ expected_corners[Corners2DIndex.BACK_LEFT] = [1.0 - 2.0, 2.0 + 1.0]
+ expected_polygon = shapely.geometry.Polygon(expected_corners)
+
+ np.testing.assert_allclose(polygon.area, expected_polygon.area, atol=1e-6)
+ self.assertTrue(polygon.equals(expected_polygon))
+
+ def test_bbse2_array_to_polygon_array_n_dim(self):
+ bounding_box_se2_array = np.array([1.0, 2.0, 0.0, 4.0, 2.0])
+ bounding_box_se2_array = np.tile(bounding_box_se2_array, (3, 1))
+
+ polygons = bbse2_array_to_polygon_array(bounding_box_se2_array)
+
+ expected_corners = np.zeros((4, 2), dtype=np.float64)
+ expected_corners[Corners2DIndex.FRONT_LEFT] = [1.0 + 2.0, 2.0 + 1.0]
+ expected_corners[Corners2DIndex.FRONT_RIGHT] = [1.0 + 2.0, 2.0 - 1.0]
+ expected_corners[Corners2DIndex.BACK_RIGHT] = [1.0 - 2.0, 2.0 - 1.0]
+ expected_corners[Corners2DIndex.BACK_LEFT] = [1.0 - 2.0, 2.0 + 1.0]
+ expected_polygon = shapely.geometry.Polygon(expected_corners)
+
+ for polygon in polygons:
+ np.testing.assert_allclose(polygon.area, expected_polygon.area, atol=1e-6)
+ self.assertTrue(polygon.equals(expected_polygon))
+
+ def test_bbse2_array_to_polygon_array_zero_dim(self):
+ bounding_box_se2_array = np.zeros((0, 5), dtype=np.float64)
+ polygons = bbse2_array_to_polygon_array(bounding_box_se2_array)
+ expected_polygons = np.zeros((0,), dtype=np.object_)
+ np.testing.assert_array_equal(polygons, expected_polygons)
+
+ def test_bbse3_array_to_corners_array_one_dim(self):
+ bounding_box_se3_array = np.array([1.0, 2.0, 3.0, 1.0, 0.0, 0.0, 0.0, 4.0, 2.0, 6.0])
+ corners_array = bbse3_array_to_corners_array(bounding_box_se3_array)
+
+ # fill expected
+ expected_corners = np.zeros((8, 3), dtype=np.float64)
+ expected_corners[Corners3DIndex.FRONT_LEFT_BOTTOM] = [1.0 + 2.0, 2.0 + 1.0, 3.0 - 3.0]
+ expected_corners[Corners3DIndex.FRONT_RIGHT_BOTTOM] = [1.0 + 2.0, 2.0 - 1.0, 3.0 - 3.0]
+ expected_corners[Corners3DIndex.BACK_RIGHT_BOTTOM] = [1.0 - 2.0, 2.0 - 1.0, 3.0 - 3.0]
+ expected_corners[Corners3DIndex.BACK_LEFT_BOTTOM] = [1.0 - 2.0, 2.0 + 1.0, 3.0 - 3.0]
+ expected_corners[Corners3DIndex.FRONT_LEFT_TOP] = [1.0 + 2.0, 2.0 + 1.0, 3.0 + 3.0]
+ expected_corners[Corners3DIndex.FRONT_RIGHT_TOP] = [1.0 + 2.0, 2.0 - 1.0, 3.0 + 3.0]
+ expected_corners[Corners3DIndex.BACK_RIGHT_TOP] = [1.0 - 2.0, 2.0 - 1.0, 3.0 + 3.0]
+ expected_corners[Corners3DIndex.BACK_LEFT_TOP] = [1.0 - 2.0, 2.0 + 1.0, 3.0 + 3.0]
+
+ np.testing.assert_allclose(corners_array, expected_corners, atol=1e-6)
+
+ def test_bbse3_array_to_corners_array_one_dim_rotation(self):
+ for _ in range(self._num_consistency_checks):
+ se3_state = EulerStateSE3.from_array(self._get_random_euler_se3_array(1)[0]).quaternion_se3
+ se3_array = se3_state.array
+
+ # construct a bounding box
+ bounding_box_se3_array = np.zeros((len(BoundingBoxSE3Index),), dtype=np.float64)
+ length, width, height = np.random.uniform(0.0, self._max_extent, size=3)
+
+ bounding_box_se3_array[BoundingBoxSE3Index.STATE_SE3] = se3_array
+ bounding_box_se3_array[BoundingBoxSE3Index.LENGTH] = length
+ bounding_box_se3_array[BoundingBoxSE3Index.WIDTH] = width
+ bounding_box_se3_array[BoundingBoxSE3Index.HEIGHT] = height
+
+ corners_array = bbse3_array_to_corners_array(bounding_box_se3_array)
+
+ corners_3d_factors = get_corners_3d_factors()
+ for corner_idx in Corners3DIndex:
+ body_translate_vector = Vector3D.from_array(
+ corners_3d_factors[corner_idx] * bounding_box_se3_array[BoundingBoxSE3Index.EXTENT]
+ )
+ np.testing.assert_allclose(
+ corners_array[corner_idx],
+ translate_se3_along_body_frame(se3_state, body_translate_vector).point_3d.array,
+ atol=1e-6,
+ )
+
+ def test_bbse3_array_to_corners_array_n_dim(self):
+ for _ in range(self._num_consistency_checks):
+ N = np.random.randint(1, 20)
+ se3_array = self._get_random_euler_se3_array(N)
+ se3_state_array = np.array([EulerStateSE3.from_array(arr).quaternion_se3.array for arr in se3_array])
+
+ # construct a bounding box
+ bounding_box_se3_array = np.zeros((N, len(BoundingBoxSE3Index)), dtype=np.float64)
+ lengths, widths, heights = np.random.uniform(0.0, self._max_extent, size=(3, N))
+
+ bounding_box_se3_array[:, BoundingBoxSE3Index.STATE_SE3] = se3_state_array
+ bounding_box_se3_array[:, BoundingBoxSE3Index.LENGTH] = lengths
+ bounding_box_se3_array[:, BoundingBoxSE3Index.WIDTH] = widths
+ bounding_box_se3_array[:, BoundingBoxSE3Index.HEIGHT] = heights
+
+ corners_array = bbse3_array_to_corners_array(bounding_box_se3_array)
+
+ corners_3d_factors = get_corners_3d_factors()
+ for obj_idx in range(N):
+ for corner_idx in Corners3DIndex:
+ body_translate_vector = Vector3D.from_array(
+ corners_3d_factors[corner_idx] * bounding_box_se3_array[obj_idx, BoundingBoxSE3Index.EXTENT]
+ )
+ np.testing.assert_allclose(
+ corners_array[obj_idx, corner_idx],
+ translate_se3_along_body_frame(
+ StateSE3.from_array(bounding_box_se3_array[obj_idx, BoundingBoxSE3Index.STATE_SE3]),
+ body_translate_vector,
+ ).point_3d.array,
+ atol=1e-6,
+ )
+
+ def test_bbse3_array_to_corners_array_zero_dim(self):
+ bounding_box_se3_array = np.zeros((0, len(BoundingBoxSE3Index)), dtype=np.float64)
+ corners_array = bbse3_array_to_corners_array(bounding_box_se3_array)
+ expected_corners = np.zeros((0, 8, 3), dtype=np.float64)
+ np.testing.assert_allclose(corners_array, expected_corners, atol=1e-6)
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/d123/geometry/utils/test/test_polyline_utils.py b/d123/geometry/utils/test/test_polyline_utils.py
new file mode 100644
index 00000000..e69de29b
diff --git a/d123/geometry/utils/test/test_rotation_utils.py b/d123/geometry/utils/test/test_rotation_utils.py
new file mode 100644
index 00000000..e69de29b
diff --git a/d123/geometry/utils/utils.py b/d123/geometry/utils/utils.py
deleted file mode 100644
index b1ce46b2..00000000
--- a/d123/geometry/utils/utils.py
+++ /dev/null
@@ -1,63 +0,0 @@
-import numpy as np
-import numpy.typing as npt
-import shapely
-
-from d123.geometry.geometry_index import BoundingBoxSE2Index, Corners2DIndex, Point2DIndex
-
-
-def bbse2_array_to_corners_array(bbse2: npt.NDArray[np.float64]) -> npt.NDArray[np.float64]:
- """
- Converts an array of BoundingBoxSE2 objects to a coordinates array.
- :param bbse2: Array of BoundingBoxSE2 objects.
- :return: Coordinates array of shape (n, 5, 2) where n is the number of bounding boxes.
- """
- assert bbse2.shape[-1] == len(BoundingBoxSE2Index)
-
- ndim_one: bool = bbse2.ndim == 1
- if ndim_one:
- bbse2 = bbse2[None, :]
-
- corners_array = np.zeros((*bbse2.shape[:-1], len(Corners2DIndex), len(Point2DIndex)), dtype=np.float64)
-
- centers = bbse2[..., BoundingBoxSE2Index.XY]
- yaws = bbse2[..., BoundingBoxSE2Index.YAW]
- half_length = bbse2[..., BoundingBoxSE2Index.LENGTH] / 2.0
- half_width = bbse2[..., BoundingBoxSE2Index.WIDTH] / 2.0
-
- corners_array[..., Corners2DIndex.FRONT_LEFT, :] = translate_along_yaw_array(centers, yaws, half_length, half_width)
- corners_array[..., Corners2DIndex.FRONT_RIGHT, :] = translate_along_yaw_array(
- centers, yaws, half_length, -half_width
- )
- corners_array[..., Corners2DIndex.BACK_RIGHT, :] = translate_along_yaw_array(
- centers, yaws, -half_length, -half_width
- )
- corners_array[..., Corners2DIndex.BACK_LEFT, :] = translate_along_yaw_array(centers, yaws, -half_length, half_width)
-
- return corners_array.squeeze(axis=0) if ndim_one else corners_array
-
-
-def corners_array_to_polygon_array(corners_array: npt.NDArray[np.float64]) -> npt.NDArray[np.object_]:
- polygons = shapely.creation.polygons(corners_array)
- return polygons
-
-
-def bbse2_array_to_polygon_array(bbse2: npt.NDArray[np.float64]) -> npt.NDArray[np.float64]:
- return corners_array_to_polygon_array(bbse2_array_to_corners_array(bbse2))
-
-
-def translate_along_yaw_array(
- points_2d: npt.NDArray[np.float64],
- headings: npt.NDArray[np.float64],
- lon: npt.NDArray[np.float64],
- lat: npt.NDArray[np.float64],
-) -> npt.NDArray[np.float64]:
- assert points_2d.shape[-1] == len(Point2DIndex)
- half_pi = np.pi / 2.0
- translation: npt.NDArray[np.float64] = np.stack(
- [
- (lat * np.cos(headings + half_pi)) + (lon * np.cos(headings)),
- (lat * np.sin(headings + half_pi)) + (lon * np.sin(headings)),
- ],
- axis=-1,
- )
- return points_2d + translation
From f6c842e5315a763ce080fef5ac709be631526825 Mon Sep 17 00:00:00 2001
From: Daniel Dauner
Date: Mon, 22 Sep 2025 15:27:46 +0200
Subject: [PATCH 040/145] Run pre-commit file & code formatting
---
d123/common/datatypes/detection/detection.py | 2 +-
.../datatypes/vehicle_state/ego_state.py | 2 +-
.../vehicle_state/vehicle_parameters.py | 4 ++--
d123/common/visualization/matplotlib/utils.py | 2 +-
d123/common/visualization/viser/utils.py | 2 +-
.../av2/av2_data_converter.py | 5 ++++-
.../dataset_specific/nuplan/load_sensor.py | 4 +---
.../nuplan/nuplan_data_converter.py | 1 -
.../wopd/wopd_data_converter.py | 3 +--
d123/geometry/__init__.py | 10 ++++-----
d123/geometry/se.py | 2 +-
d123/geometry/test/test_bounding_box.py | 2 +-
.../test/test_transform_consistency.py | 22 +++++++++----------
.../test/test_transform_euler_se3.py | 4 ++--
.../transform/test/test_transform_se3.py | 6 ++---
d123/geometry/utils/rotation_utils.py | 4 ++--
.../utils/test/test_bounding_box_utils.py | 7 ++----
17 files changed, 39 insertions(+), 43 deletions(-)
diff --git a/d123/common/datatypes/detection/detection.py b/d123/common/datatypes/detection/detection.py
index ff18a561..8ada50fe 100644
--- a/d123/common/datatypes/detection/detection.py
+++ b/d123/common/datatypes/detection/detection.py
@@ -7,7 +7,7 @@
from d123.common.datatypes.detection.detection_types import DetectionType
from d123.common.datatypes.time.time_point import TimePoint
from d123.common.utils.enums import SerialIntEnum
-from d123.geometry import BoundingBoxSE2, BoundingBoxSE3, OccupancyMap2D, StateSE2, EulerStateSE3, Vector2D, Vector3D
+from d123.geometry import BoundingBoxSE2, BoundingBoxSE3, EulerStateSE3, OccupancyMap2D, StateSE2, Vector2D, Vector3D
@dataclass
diff --git a/d123/common/datatypes/vehicle_state/ego_state.py b/d123/common/datatypes/vehicle_state/ego_state.py
index 1d378fcd..79522517 100644
--- a/d123/common/datatypes/vehicle_state/ego_state.py
+++ b/d123/common/datatypes/vehicle_state/ego_state.py
@@ -23,7 +23,7 @@
rear_axle_se3_to_center_se3,
)
from d123.common.utils.enums import classproperty
-from d123.geometry import BoundingBoxSE2, BoundingBoxSE3, StateSE2, EulerStateSE3, Vector2D, Vector3D
+from d123.geometry import BoundingBoxSE2, BoundingBoxSE3, EulerStateSE3, StateSE2, Vector2D, Vector3D
# TODO: Find an appropriate way to handle SE2 and SE3 states.
diff --git a/d123/common/datatypes/vehicle_state/vehicle_parameters.py b/d123/common/datatypes/vehicle_state/vehicle_parameters.py
index 4698206b..5bf38706 100644
--- a/d123/common/datatypes/vehicle_state/vehicle_parameters.py
+++ b/d123/common/datatypes/vehicle_state/vehicle_parameters.py
@@ -1,8 +1,8 @@
from dataclasses import dataclass
-from d123.geometry import StateSE2, EulerStateSE3, Vector2D, Vector3D
-from d123.geometry.transform.transform_se2 import translate_se2_along_body_frame
+from d123.geometry import EulerStateSE3, StateSE2, Vector2D, Vector3D
from d123.geometry.transform.transform_euler_se3 import translate_euler_se3_along_body_frame
+from d123.geometry.transform.transform_se2 import translate_se2_along_body_frame
# TODO: Add more vehicle parameters, potentially extend the parameters.
diff --git a/d123/common/visualization/matplotlib/utils.py b/d123/common/visualization/matplotlib/utils.py
index 34cc2819..1742a864 100644
--- a/d123/common/visualization/matplotlib/utils.py
+++ b/d123/common/visualization/matplotlib/utils.py
@@ -9,7 +9,7 @@
from matplotlib.path import Path
from d123.common.visualization.color.config import PlotConfig
-from d123.geometry import StateSE2, EulerStateSE3
+from d123.geometry import EulerStateSE3, StateSE2
def add_shapely_polygon_to_ax(
diff --git a/d123/common/visualization/viser/utils.py b/d123/common/visualization/viser/utils.py
index 18af79d3..1ef2ca18 100644
--- a/d123/common/visualization/viser/utils.py
+++ b/d123/common/visualization/viser/utils.py
@@ -15,7 +15,7 @@
from d123.dataset.maps.abstract_map import MapLayer
from d123.dataset.maps.abstract_map_objects import AbstractLane, AbstractSurfaceMapObject
from d123.dataset.scene.abstract_scene import AbstractScene
-from d123.geometry import BoundingBoxSE3, Point3D, Polyline3D, EulerStateSE3
+from d123.geometry import BoundingBoxSE3, EulerStateSE3, Point3D, Polyline3D
from d123.geometry.transform.transform_euler_se3 import convert_relative_to_absolute_points_3d_array
# TODO: Refactor this file.
diff --git a/d123/dataset/dataset_specific/av2/av2_data_converter.py b/d123/dataset/dataset_specific/av2/av2_data_converter.py
index d32e18e5..ffa45db3 100644
--- a/d123/dataset/dataset_specific/av2/av2_data_converter.py
+++ b/d123/dataset/dataset_specific/av2/av2_data_converter.py
@@ -35,7 +35,10 @@
from d123.dataset.dataset_specific.raw_data_converter import DataConverterConfig, RawDataConverter
from d123.dataset.logs.log_metadata import LogMetadata
from d123.geometry import BoundingBoxSE3Index, EulerStateSE3, Vector3D, Vector3DIndex
-from d123.geometry.transform.transform_euler_se3 import convert_relative_to_absolute_euler_se3_array, get_rotation_matrix
+from d123.geometry.transform.transform_euler_se3 import (
+ convert_relative_to_absolute_euler_se3_array,
+ get_rotation_matrix,
+)
from d123.geometry.utils.constants import DEFAULT_PITCH, DEFAULT_ROLL
diff --git a/d123/dataset/dataset_specific/nuplan/load_sensor.py b/d123/dataset/dataset_specific/nuplan/load_sensor.py
index 3e0033b3..f7dd16e8 100644
--- a/d123/dataset/dataset_specific/nuplan/load_sensor.py
+++ b/d123/dataset/dataset_specific/nuplan/load_sensor.py
@@ -1,9 +1,8 @@
import io
from pathlib import Path
-
-from d123.common.utils.dependencies import check_dependencies
from d123.common.datatypes.sensor.lidar import LiDAR, LiDARMetadata
+from d123.common.utils.dependencies import check_dependencies
check_dependencies(["nuplan"], "nuplan")
from nuplan.database.utils.pointclouds.lidar import LidarPointCloud
@@ -14,4 +13,3 @@ def load_nuplan_lidar_from_path(filepath: Path, lidar_metadata: LiDARMetadata) -
with open(filepath, "rb") as fp:
buffer = io.BytesIO(fp.read())
return LiDAR(metadata=lidar_metadata, point_cloud=LidarPointCloud.from_buffer(buffer, "pcd").points)
-
diff --git a/d123/dataset/dataset_specific/nuplan/nuplan_data_converter.py b/d123/dataset/dataset_specific/nuplan/nuplan_data_converter.py
index a87d0c31..64ba1c72 100644
--- a/d123/dataset/dataset_specific/nuplan/nuplan_data_converter.py
+++ b/d123/dataset/dataset_specific/nuplan/nuplan_data_converter.py
@@ -12,7 +12,6 @@
import yaml
from pyquaternion import Quaternion
-
import d123.dataset.dataset_specific.nuplan.utils as nuplan_utils
from d123.common.datatypes.detection.detection import TrafficLightStatus
from d123.common.datatypes.detection.detection_types import DetectionType
diff --git a/d123/dataset/dataset_specific/wopd/wopd_data_converter.py b/d123/dataset/dataset_specific/wopd/wopd_data_converter.py
index 1fd26b6b..d74a7ea8 100644
--- a/d123/dataset/dataset_specific/wopd/wopd_data_converter.py
+++ b/d123/dataset/dataset_specific/wopd/wopd_data_converter.py
@@ -12,7 +12,6 @@
import pyarrow as pa
from pyquaternion import Quaternion
-
from d123.common.datatypes.detection.detection_types import DetectionType
from d123.common.datatypes.sensor.camera import CameraMetadata, CameraType, camera_metadata_dict_to_json
from d123.common.datatypes.sensor.lidar import LiDARMetadata, LiDARType, lidar_metadata_dict_to_json
@@ -26,7 +25,7 @@
from d123.dataset.dataset_specific.wopd.waymo_map_utils.wopd_map_utils import convert_wopd_map
from d123.dataset.dataset_specific.wopd.wopd_utils import parse_range_image_and_camera_projection
from d123.dataset.logs.log_metadata import LogMetadata
-from d123.geometry import BoundingBoxSE3Index, Point3D, EulerStateSE3, Vector3D, Vector3DIndex
+from d123.geometry import BoundingBoxSE3Index, EulerStateSE3, Point3D, Vector3D, Vector3DIndex
from d123.geometry.transform.transform_euler_se3 import convert_relative_to_absolute_euler_se3_array
from d123.geometry.utils.constants import DEFAULT_PITCH, DEFAULT_ROLL
diff --git a/d123/geometry/__init__.py b/d123/geometry/__init__.py
index 68721954..d845b66c 100644
--- a/d123/geometry/__init__.py
+++ b/d123/geometry/__init__.py
@@ -1,18 +1,18 @@
+from d123.geometry.bounding_box import BoundingBoxSE2, BoundingBoxSE3
from d123.geometry.geometry_index import (
BoundingBoxSE2Index,
BoundingBoxSE3Index,
Corners2DIndex,
Corners3DIndex,
+ EulerStateSE3Index,
Point2DIndex,
Point3DIndex,
StateSE2Index,
- EulerStateSE3Index,
Vector2DIndex,
Vector3DIndex,
)
+from d123.geometry.occupancy_map import OccupancyMap2D
from d123.geometry.point import Point2D, Point3D
-from d123.geometry.vector import Vector2D, Vector3D
-from d123.geometry.se import StateSE2, StateSE3, EulerStateSE3
from d123.geometry.polyline import Polyline2D, Polyline3D, PolylineSE2
-from d123.geometry.bounding_box import BoundingBoxSE2, BoundingBoxSE3
-from d123.geometry.occupancy_map import OccupancyMap2D
+from d123.geometry.se import EulerStateSE3, StateSE2, StateSE3
+from d123.geometry.vector import Vector2D, Vector3D
diff --git a/d123/geometry/se.py b/d123/geometry/se.py
index b63bea7d..6538bfe9 100644
--- a/d123/geometry/se.py
+++ b/d123/geometry/se.py
@@ -8,7 +8,7 @@
from pyparsing import cached_property
from d123.common.utils.mixin import ArrayMixin
-from d123.geometry.geometry_index import Point3DIndex, StateSE3Index, StateSE2Index, EulerStateSE3Index
+from d123.geometry.geometry_index import EulerStateSE3Index, Point3DIndex, StateSE2Index, StateSE3Index
from d123.geometry.point import Point2D, Point3D
from d123.geometry.rotation import EulerAngles, Quaternion
diff --git a/d123/geometry/test/test_bounding_box.py b/d123/geometry/test/test_bounding_box.py
index f34639ad..e8f44986 100644
--- a/d123/geometry/test/test_bounding_box.py
+++ b/d123/geometry/test/test_bounding_box.py
@@ -4,7 +4,7 @@
import shapely.geometry as geom
from d123.common.utils.mixin import ArrayMixin
-from d123.geometry import BoundingBoxSE2, BoundingBoxSE3, Point2D, Point3D, StateSE2, EulerStateSE3
+from d123.geometry import BoundingBoxSE2, BoundingBoxSE3, Point2D, Point3D, StateSE2
from d123.geometry.geometry_index import (
BoundingBoxSE2Index,
BoundingBoxSE3Index,
diff --git a/d123/geometry/transform/test/test_transform_consistency.py b/d123/geometry/transform/test/test_transform_consistency.py
index b4f73ab1..1865a3ff 100644
--- a/d123/geometry/transform/test/test_transform_consistency.py
+++ b/d123/geometry/transform/test/test_transform_consistency.py
@@ -3,8 +3,17 @@
import numpy as np
import numpy.typing as npt
-from d123.geometry.geometry_index import Point2DIndex, Point3DIndex, StateSE2Index, EulerStateSE3Index
-from d123.geometry.se import StateSE2, EulerStateSE3
+from d123.geometry.geometry_index import EulerStateSE3Index, Point2DIndex, Point3DIndex, StateSE2Index
+from d123.geometry.se import EulerStateSE3, StateSE2
+from d123.geometry.transform.transform_euler_se3 import (
+ convert_absolute_to_relative_euler_se3_array,
+ convert_absolute_to_relative_points_3d_array,
+ convert_relative_to_absolute_euler_se3_array,
+ convert_relative_to_absolute_points_3d_array,
+ translate_euler_se3_along_body_frame,
+ translate_euler_se3_along_x,
+ translate_euler_se3_along_y,
+)
from d123.geometry.transform.transform_se2 import (
convert_absolute_to_relative_point_2d_array,
convert_absolute_to_relative_se2_array,
@@ -15,15 +24,6 @@
translate_se2_along_y,
translate_se2_array_along_body_frame,
)
-from d123.geometry.transform.transform_euler_se3 import (
- convert_absolute_to_relative_points_3d_array,
- convert_absolute_to_relative_euler_se3_array,
- convert_relative_to_absolute_points_3d_array,
- convert_relative_to_absolute_euler_se3_array,
- translate_euler_se3_along_body_frame,
- translate_euler_se3_along_x,
- translate_euler_se3_along_y,
-)
from d123.geometry.vector import Vector2D, Vector3D
diff --git a/d123/geometry/transform/test/test_transform_euler_se3.py b/d123/geometry/transform/test/test_transform_euler_se3.py
index b40ab4c7..f63bc3bd 100644
--- a/d123/geometry/transform/test/test_transform_euler_se3.py
+++ b/d123/geometry/transform/test/test_transform_euler_se3.py
@@ -5,10 +5,10 @@
from d123.geometry.se import EulerStateSE3
from d123.geometry.transform.transform_euler_se3 import (
- convert_absolute_to_relative_points_3d_array,
convert_absolute_to_relative_euler_se3_array,
- convert_relative_to_absolute_points_3d_array,
+ convert_absolute_to_relative_points_3d_array,
convert_relative_to_absolute_euler_se3_array,
+ convert_relative_to_absolute_points_3d_array,
translate_euler_se3_along_body_frame,
translate_euler_se3_along_x,
translate_euler_se3_along_y,
diff --git a/d123/geometry/transform/test/test_transform_se3.py b/d123/geometry/transform/test/test_transform_se3.py
index d7c077ea..0a752f68 100644
--- a/d123/geometry/transform/test/test_transform_se3.py
+++ b/d123/geometry/transform/test/test_transform_se3.py
@@ -3,7 +3,8 @@
import numpy as np
import numpy.typing as npt
-from d123.geometry.geometry_index import StateSE3Index, EulerStateSE3Index
+import d123.geometry.transform.transform_euler_se3 as euler_transform_se3
+from d123.geometry.geometry_index import EulerStateSE3Index, StateSE3Index
from d123.geometry.point import Point3D
from d123.geometry.rotation import Quaternion
from d123.geometry.se import EulerStateSE3, StateSE3
@@ -12,12 +13,11 @@
convert_absolute_to_relative_se3_array,
convert_relative_to_absolute_points_3d_array,
convert_relative_to_absolute_se3_array,
+ translate_se3_along_body_frame,
translate_se3_along_x,
translate_se3_along_y,
translate_se3_along_z,
- translate_se3_along_body_frame,
)
-import d123.geometry.transform.transform_euler_se3 as euler_transform_se3
from d123.geometry.utils.rotation_utils import (
get_rotation_matrices_from_euler_array,
get_rotation_matrices_from_quaternion_array,
diff --git a/d123/geometry/utils/rotation_utils.py b/d123/geometry/utils/rotation_utils.py
index ead501b1..3d3248ce 100644
--- a/d123/geometry/utils/rotation_utils.py
+++ b/d123/geometry/utils/rotation_utils.py
@@ -3,10 +3,10 @@
import numpy as np
import numpy.typing as npt
-# import pyquaternion
-
from d123.geometry.geometry_index import EulerAnglesIndex, QuaternionIndex
+# import pyquaternion
+
def batch_matmul(A: npt.NDArray[np.float64], B: npt.NDArray[np.float64]) -> npt.NDArray[np.float64]:
"""Batch matrix multiplication for arrays of matrices.
diff --git a/d123/geometry/utils/test/test_bounding_box_utils.py b/d123/geometry/utils/test/test_bounding_box_utils.py
index 1099bc6f..3bba0330 100644
--- a/d123/geometry/utils/test/test_bounding_box_utils.py
+++ b/d123/geometry/utils/test/test_bounding_box_utils.py
@@ -8,20 +8,17 @@
BoundingBoxSE3Index,
Corners2DIndex,
Corners3DIndex,
+ EulerStateSE3Index,
Point2DIndex,
Point3DIndex,
- StateSE3Index,
- EulerStateSE3Index,
)
-from d123.geometry.point import Point3D
-from d123.geometry.rotation import Quaternion
from d123.geometry.se import EulerStateSE3, StateSE3
from d123.geometry.transform.transform_se3 import translate_se3_along_body_frame
from d123.geometry.utils.bounding_box_utils import (
bbse2_array_to_corners_array,
- corners_2d_array_to_polygon_array,
bbse2_array_to_polygon_array,
bbse3_array_to_corners_array,
+ corners_2d_array_to_polygon_array,
get_corners_3d_factors,
)
from d123.geometry.vector import Vector3D
From 6abde9e867ff06e3c09b04a391c56bc7bca7c7a9 Mon Sep 17 00:00:00 2001
From: Daniel Dauner
Date: Mon, 22 Sep 2025 20:09:17 +0200
Subject: [PATCH 041/145] Very large refactoring of folder structure and
imports (#39)
---
.../datatypes/recording/abstract_recording.py | 23 -----------
.../recording/detection_recording.py | 15 -------
.../utils/arrow_helper.py} | 0
d123/common/utils/timer.py | 7 +---
d123/common/visualization/color/default.py | 6 +--
.../visualization/matplotlib/observation.py | 8 ++--
d123/common/visualization/matplotlib/plots.py | 2 +-
d123/common/visualization/viser/server.py | 2 +-
d123/common/visualization/viser/utils.py | 6 +--
d123/common/visualization/viser/utils_v2.py | 2 +-
d123/dataset/arrow/__init__.py | 2 -
d123/dataset/logs/__init__.py | 0
d123/dataset/logs/log_metadata.py | 21 ----------
d123/dataset/maps/gpkg/__init__.py | 0
d123/dataset/scene/__init__.py | 0
.../detection => datasets}/__init__.py | 0
.../av2/av2_constants.py | 6 +--
.../av2/av2_data_converter.py | 26 ++++++------
.../av2/av2_helper.py | 2 +-
.../av2/av2_map_conversion.py | 10 ++---
.../recording => datasets/carla}/__init__.py | 0
.../carla/carla_data_converter.py | 24 +++++------
.../carla/load_sensor.py | 2 +-
.../kitti_360/.gitkeep | 0
.../time => datasets/nuplan}/__init__.py | 0
.../nuplan/load_sensor.py | 2 +-
.../nuplan/nuplan_data_converter.py | 30 +++++++-------
.../nuplan/nuplan_map_conversion.py | 6 +--
.../nuplan/utils/log_splits.yaml | 0
.../nuscenes/.gitkeep | 0
.../raw_data_converter.py | 0
.../utils}/__init__.py | 0
.../utils/maps}/__init__.py | 0
.../utils/maps/opendrive}/__init__.py | 0
.../opendrive/opendrive_map_conversion.py | 16 ++++----
.../utils/maps/opendrive/parser}/__init__.py | 0
.../utils/maps}/opendrive/parser/elevation.py | 2 +-
.../utils/maps}/opendrive/parser/geometry.py | 0
.../utils/maps}/opendrive/parser/lane.py | 2 +-
.../utils/maps}/opendrive/parser/objects.py | 0
.../utils/maps}/opendrive/parser/opendrive.py | 2 +-
.../maps}/opendrive/parser/polynomial.py | 0
.../utils/maps}/opendrive/parser/reference.py | 8 ++--
.../utils/maps}/opendrive/parser/road.py | 8 ++--
.../utils/maps}/opendrive/utils/collection.py | 12 +++---
.../utils/maps}/opendrive/utils/id_mapping.py | 0
.../utils/maps}/opendrive/utils/id_system.py | 0
.../maps}/opendrive/utils/lane_helper.py | 8 ++--
.../maps}/opendrive/utils/objects_helper.py | 4 +-
.../utils/maps/road_edge}/__init__.py | 0
.../maps}/road_edge/road_edge_2d_utils.py | 0
.../maps}/road_edge/road_edge_3d_utils.py | 2 +-
.../waymo_map_utils/womp_boundary_utils.py | 0
.../wopd/waymo_map_utils/wopd_map_utils.py | 9 +++--
.../wopd/wopd_data_converter.py | 22 +++++-----
.../wopd/wopd_utils.py | 3 ++
d123/{common => }/datatypes/__init__.py | 0
.../detections}/__init__.py | 0
.../detections}/detection.py | 11 ++++-
.../detections}/detection_types.py | 0
.../maps/abstract_map.py | 4 +-
.../maps/abstract_map_objects.py | 2 +-
.../maps/gpkg}/__init__.py | 0
.../maps/gpkg/gpkg_map.py | 10 ++---
.../maps/gpkg/gpkg_map_objects.py | 6 +--
.../{dataset => datatypes}/maps/gpkg/utils.py | 0
.../maps/map_datatypes.py | 0
.../scene}/__init__.py | 0
.../scene/abstract_scene.py | 38 ++++--------------
.../datatypes/scene/abstract_scene_builder.py | 24 +++++++++++
.../scene/arrow}/arrow_scene.py | 25 ++++++------
.../scene/arrow/arrow_scene_builder.py} | 26 +++---------
.../scene/arrow/utils}/conversion.py | 23 +++++------
.../scene/scene_filter.py | 2 +-
d123/datatypes/scene/scene_metadata.py | 40 +++++++++++++++++++
.../sensor => datatypes/sensors}/camera.py | 0
.../sensor => datatypes/sensors}/lidar.py | 2 +-
.../sensors}/lidar_index.py | 0
.../carla => datatypes/time}/__init__.py | 0
.../{common => }/datatypes/time/time_point.py | 0
.../vehicle_state}/__init__.py | 0
.../datatypes/vehicle_state/ego_state.py | 14 +++----
.../vehicle_state/vehicle_parameters.py | 2 -
d123/geometry/__init__.py | 4 ++
d123/geometry/test/test_bounding_box.py | 3 +-
d123/geometry/test/test_point.py | 5 +--
d123/geometry/test/test_polyline.py | 4 +-
.../test/test_transform_consistency.py | 3 +-
.../test/test_transform_euler_se3.py | 3 +-
.../transform/test/test_transform_se2.py | 3 +-
.../transform/test/test_transform_se3.py | 5 +--
.../geometry/transform/transform_euler_se3.py | 4 +-
d123/geometry/transform/transform_se2.py | 4 +-
d123/geometry/transform/transform_se3.py | 4 +-
.../script/builders/data_converter_builder.py | 2 +-
d123/script/builders/scene_builder_builder.py | 2 +-
d123/script/builders/scene_filter_builder.py | 2 +-
d123/script/run_preprocessing.py | 2 +-
d123/script/run_simulation.py | 2 +-
d123/simulation/agents/abstract_agents.py | 4 +-
.../agents/constant_velocity_agents.py | 4 +-
d123/simulation/agents/idm_agents.py | 6 +--
d123/simulation/agents/path_following.py | 4 +-
d123/simulation/agents/smart_agents.py | 6 +--
.../controller/abstract_controller.py | 2 +-
.../controller/action_controller.py | 2 +-
d123/simulation/gym/demo_gym_env.py | 6 +--
.../environment/helper/environment_cache.py | 6 +--
d123/simulation/gym/gym_env.py | 4 +-
d123/simulation/history/simulation_history.py | 2 +-
.../history/simulation_history_buffer.py | 2 +-
.../metrics/sim_agents/interaction_based.py | 2 +-
.../metrics/sim_agents/map_based.py | 6 +--
.../metrics/sim_agents/sim_agents.py | 4 +-
d123/simulation/metrics/sim_agents/utils.py | 2 +-
.../observation/abstract_observation.py | 2 +-
.../observation/agents_observation.py | 4 +-
.../observation/log_replay_observation.py | 2 +-
d123/simulation/planning/abstract_planner.py | 2 +-
d123/simulation/simulation_2d.py | 2 +-
.../abstract_time_controller.py | 2 +-
.../time_controller/log_time_controller.py | 2 +-
.../feature_builder/smart_feature_builder.py | 6 +--
123 files changed, 308 insertions(+), 357 deletions(-)
delete mode 100644 d123/common/datatypes/recording/abstract_recording.py
delete mode 100644 d123/common/datatypes/recording/detection_recording.py
rename d123/{dataset/arrow/helper.py => common/utils/arrow_helper.py} (100%)
delete mode 100644 d123/dataset/arrow/__init__.py
delete mode 100644 d123/dataset/logs/__init__.py
delete mode 100644 d123/dataset/logs/log_metadata.py
delete mode 100644 d123/dataset/maps/gpkg/__init__.py
delete mode 100644 d123/dataset/scene/__init__.py
rename d123/{common/datatypes/detection => datasets}/__init__.py (100%)
rename d123/{dataset/dataset_specific => datasets}/av2/av2_constants.py (95%)
rename d123/{dataset/dataset_specific => datasets}/av2/av2_data_converter.py (96%)
rename d123/{dataset/dataset_specific => datasets}/av2/av2_helper.py (98%)
rename d123/{dataset/dataset_specific => datasets}/av2/av2_map_conversion.py (98%)
rename d123/{common/datatypes/recording => datasets/carla}/__init__.py (100%)
rename d123/{dataset/dataset_specific => datasets}/carla/carla_data_converter.py (95%)
rename d123/{dataset/dataset_specific => datasets}/carla/load_sensor.py (80%)
rename d123/{dataset/dataset_specific => datasets}/kitti_360/.gitkeep (100%)
rename d123/{common/datatypes/time => datasets/nuplan}/__init__.py (100%)
rename d123/{dataset/dataset_specific => datasets}/nuplan/load_sensor.py (88%)
rename d123/{dataset/dataset_specific => datasets}/nuplan/nuplan_data_converter.py (95%)
rename d123/{dataset/dataset_specific => datasets}/nuplan/nuplan_map_conversion.py (98%)
rename d123/{dataset/dataset_specific => datasets}/nuplan/utils/log_splits.yaml (100%)
rename d123/{dataset/dataset_specific => datasets}/nuscenes/.gitkeep (100%)
rename d123/{dataset/dataset_specific => datasets}/raw_data_converter.py (100%)
rename d123/{common/datatypes/vehicle_state => datasets/utils}/__init__.py (100%)
rename d123/{dataset => datasets/utils/maps}/__init__.py (100%)
rename d123/{dataset/conversion => datasets/utils/maps/opendrive}/__init__.py (100%)
rename d123/{dataset/conversion/map => datasets/utils/maps}/opendrive/opendrive_map_conversion.py (96%)
rename d123/{dataset/conversion/map => datasets/utils/maps/opendrive/parser}/__init__.py (100%)
rename d123/{dataset/conversion/map => datasets/utils/maps}/opendrive/parser/elevation.py (97%)
rename d123/{dataset/conversion/map => datasets/utils/maps}/opendrive/parser/geometry.py (100%)
rename d123/{dataset/conversion/map => datasets/utils/maps}/opendrive/parser/lane.py (98%)
rename d123/{dataset/conversion/map => datasets/utils/maps}/opendrive/parser/objects.py (100%)
rename d123/{dataset/conversion/map => datasets/utils/maps}/opendrive/parser/opendrive.py (99%)
rename d123/{dataset/conversion/map => datasets/utils/maps}/opendrive/parser/polynomial.py (100%)
rename d123/{dataset/conversion/map => datasets/utils/maps}/opendrive/parser/reference.py (94%)
rename d123/{dataset/conversion/map => datasets/utils/maps}/opendrive/parser/road.py (93%)
rename d123/{dataset/conversion/map => datasets/utils/maps}/opendrive/utils/collection.py (96%)
rename d123/{dataset/conversion/map => datasets/utils/maps}/opendrive/utils/id_mapping.py (100%)
rename d123/{dataset/conversion/map => datasets/utils/maps}/opendrive/utils/id_system.py (100%)
rename d123/{dataset/conversion/map => datasets/utils/maps}/opendrive/utils/lane_helper.py (97%)
rename d123/{dataset/conversion/map => datasets/utils/maps}/opendrive/utils/objects_helper.py (94%)
rename d123/{dataset/conversion/map/opendrive => datasets/utils/maps/road_edge}/__init__.py (100%)
rename d123/{dataset/conversion/map => datasets/utils/maps}/road_edge/road_edge_2d_utils.py (100%)
rename d123/{dataset/conversion/map => datasets/utils/maps}/road_edge/road_edge_3d_utils.py (99%)
rename d123/{dataset/dataset_specific => datasets}/wopd/waymo_map_utils/womp_boundary_utils.py (100%)
rename d123/{dataset/dataset_specific => datasets}/wopd/waymo_map_utils/wopd_map_utils.py (97%)
rename d123/{dataset/dataset_specific => datasets}/wopd/wopd_data_converter.py (95%)
rename d123/{dataset/dataset_specific => datasets}/wopd/wopd_utils.py (96%)
rename d123/{common => }/datatypes/__init__.py (100%)
rename d123/{dataset/conversion/map/opendrive/parser => datatypes/detections}/__init__.py (100%)
rename d123/{common/datatypes/detection => datatypes/detections}/detection.py (94%)
rename d123/{common/datatypes/detection => datatypes/detections}/detection_types.py (100%)
rename d123/{dataset => datatypes}/maps/abstract_map.py (94%)
rename d123/{dataset => datatypes}/maps/abstract_map_objects.py (98%)
rename d123/{dataset/conversion/map/road_edge => datatypes/maps/gpkg}/__init__.py (100%)
rename d123/{dataset => datatypes}/maps/gpkg/gpkg_map.py (98%)
rename d123/{dataset => datatypes}/maps/gpkg/gpkg_map_objects.py (98%)
rename d123/{dataset => datatypes}/maps/gpkg/utils.py (100%)
rename d123/{dataset => datatypes}/maps/map_datatypes.py (100%)
rename d123/{dataset/dataset_specific => datatypes/scene}/__init__.py (100%)
rename d123/{dataset => datatypes}/scene/abstract_scene.py (66%)
create mode 100644 d123/datatypes/scene/abstract_scene_builder.py
rename d123/{dataset/scene => datatypes/scene/arrow}/arrow_scene.py (88%)
rename d123/{dataset/scene/scene_builder.py => datatypes/scene/arrow/arrow_scene_builder.py} (87%)
rename d123/{dataset/arrow => datatypes/scene/arrow/utils}/conversion.py (88%)
rename d123/{dataset => datatypes}/scene/scene_filter.py (96%)
create mode 100644 d123/datatypes/scene/scene_metadata.py
rename d123/{common/datatypes/sensor => datatypes/sensors}/camera.py (100%)
rename d123/{common/datatypes/sensor => datatypes/sensors}/lidar.py (97%)
rename d123/{common/datatypes/sensor => datatypes/sensors}/lidar_index.py (100%)
rename d123/{dataset/dataset_specific/carla => datatypes/time}/__init__.py (100%)
rename d123/{common => }/datatypes/time/time_point.py (100%)
rename d123/{dataset/dataset_specific/nuplan => datatypes/vehicle_state}/__init__.py (100%)
rename d123/{common => }/datatypes/vehicle_state/ego_state.py (96%)
rename d123/{common => }/datatypes/vehicle_state/vehicle_parameters.py (98%)
diff --git a/d123/common/datatypes/recording/abstract_recording.py b/d123/common/datatypes/recording/abstract_recording.py
deleted file mode 100644
index 4554d10f..00000000
--- a/d123/common/datatypes/recording/abstract_recording.py
+++ /dev/null
@@ -1,23 +0,0 @@
-import abc
-from dataclasses import dataclass
-from enum import IntEnum
-
-
-class RecordingType(IntEnum):
- DETECTION = 0
- # SENSOR = 1 NOTE: not used yet, but reserved for future use
-
-
-@dataclass
-class Recording(abc.ABC):
- """
- Abstract observation container.
- """
-
- # @classmethod
- # @abc.abstractmethod
- # def observation_type(cls) -> ObservationType:
- # """
- # Returns detection type of the observation.
- # """
- # raise NotImplementedError
diff --git a/d123/common/datatypes/recording/detection_recording.py b/d123/common/datatypes/recording/detection_recording.py
deleted file mode 100644
index 19b68827..00000000
--- a/d123/common/datatypes/recording/detection_recording.py
+++ /dev/null
@@ -1,15 +0,0 @@
-from dataclasses import dataclass
-
-from d123.common.datatypes.detection.detection import BoxDetectionWrapper, TrafficLightDetectionWrapper
-from d123.common.datatypes.recording.abstract_recording import Recording
-
-# TODO: Reconsider if these "wrapper" datatypes are necessary.
-# Might be needed to package multiple datatypes into a single object (e.g. as planner input)
-# On the other hand, an enum based dictionary might be more flexible.
-
-
-@dataclass
-class DetectionRecording(Recording):
-
- box_detections: BoxDetectionWrapper
- traffic_light_detections: TrafficLightDetectionWrapper
diff --git a/d123/dataset/arrow/helper.py b/d123/common/utils/arrow_helper.py
similarity index 100%
rename from d123/dataset/arrow/helper.py
rename to d123/common/utils/arrow_helper.py
diff --git a/d123/common/utils/timer.py b/d123/common/utils/timer.py
index adcc9752..b2c57015 100644
--- a/d123/common/utils/timer.py
+++ b/d123/common/utils/timer.py
@@ -59,21 +59,16 @@ def stats(self, verbose: bool = True) -> Optional[pd.DataFrame]:
"""
Returns a DataFrame with statistics of the logged times.
:param verbose: whether to print the timings, defaults to True
- :return: pandas dataframe.F
+ :return: pandas dataframe.
"""
statistics = {}
-
for key, timings in self._time_logs.items():
-
timings_array = np.array(timings)
timings_statistics = {}
-
for name, function in self._statistic_functions.items():
timings_statistics[name] = function(timings_array)
-
statistics[key] = timings_statistics
-
dataframe = pd.DataFrame.from_dict(statistics).transpose()
if verbose:
diff --git a/d123/common/visualization/color/default.py b/d123/common/visualization/color/default.py
index 076a1c81..9eda3f7c 100644
--- a/d123/common/visualization/color/default.py
+++ b/d123/common/visualization/color/default.py
@@ -1,7 +1,5 @@
from typing import Dict
-from d123.common.datatypes.detection.detection import TrafficLightStatus
-from d123.common.datatypes.detection.detection_types import DetectionType
from d123.common.visualization.color.color import (
BLACK,
DARKER_GREY,
@@ -13,7 +11,9 @@
Color,
)
from d123.common.visualization.color.config import PlotConfig
-from d123.dataset.maps.map_datatypes import MapLayer
+from d123.datatypes.detections.detection import TrafficLightStatus
+from d123.datatypes.detections.detection_types import DetectionType
+from d123.datatypes.maps.map_datatypes import MapLayer
HEADING_MARKER_STYLE: str = "^" # "^": triangle, "-": line
diff --git a/d123/common/visualization/matplotlib/observation.py b/d123/common/visualization/matplotlib/observation.py
index 27a2fff1..eb37ad74 100644
--- a/d123/common/visualization/matplotlib/observation.py
+++ b/d123/common/visualization/matplotlib/observation.py
@@ -22,10 +22,10 @@
get_pose_triangle,
shapely_geometry_local_coords,
)
-from d123.dataset.maps.abstract_map import AbstractMap
-from d123.dataset.maps.abstract_map_objects import AbstractLane
-from d123.dataset.maps.map_datatypes import MapLayer
-from d123.dataset.scene.abstract_scene import AbstractScene
+from d123.datasets.maps.abstract_map import AbstractMap
+from d123.datasets.maps.abstract_map_objects import AbstractLane
+from d123.datasets.maps.map_datatypes import MapLayer
+from d123.datasets.scene.abstract_scene import AbstractScene
from d123.geometry import BoundingBoxSE2, BoundingBoxSE3, Point2D
from d123.geometry.geometry_index import StateSE2Index
from d123.geometry.transform.transform_se2 import translate_se2_along_body_frame
diff --git a/d123/common/visualization/matplotlib/plots.py b/d123/common/visualization/matplotlib/plots.py
index 80e51714..7a1010cb 100644
--- a/d123/common/visualization/matplotlib/plots.py
+++ b/d123/common/visualization/matplotlib/plots.py
@@ -11,7 +11,7 @@
add_ego_vehicle_to_ax,
add_traffic_lights_to_ax,
)
-from d123.dataset.scene.abstract_scene import AbstractScene
+from d123.datasets.scene.abstract_scene import AbstractScene
def _plot_scene_on_ax(ax: plt.Axes, scene: AbstractScene, iteration: int = 0, radius: float = 80) -> plt.Axes:
diff --git a/d123/common/visualization/viser/server.py b/d123/common/visualization/viser/server.py
index c9ce3601..ed5f7a17 100644
--- a/d123/common/visualization/viser/server.py
+++ b/d123/common/visualization/viser/server.py
@@ -15,7 +15,7 @@
get_map_meshes,
)
from d123.common.visualization.viser.utils_v2 import get_bounding_box_outlines
-from d123.dataset.scene.abstract_scene import AbstractScene
+from d123.datasets.scene.abstract_scene import AbstractScene
# TODO: Try to fix performance issues.
# TODO: Refactor this file.
diff --git a/d123/common/visualization/viser/utils.py b/d123/common/visualization/viser/utils.py
index 1ef2ca18..af209718 100644
--- a/d123/common/visualization/viser/utils.py
+++ b/d123/common/visualization/viser/utils.py
@@ -12,9 +12,9 @@
from d123.common.visualization.color.color import TAB_10, Color
from d123.common.visualization.color.config import PlotConfig
from d123.common.visualization.color.default import BOX_DETECTION_CONFIG, EGO_VEHICLE_CONFIG, MAP_SURFACE_CONFIG
-from d123.dataset.maps.abstract_map import MapLayer
-from d123.dataset.maps.abstract_map_objects import AbstractLane, AbstractSurfaceMapObject
-from d123.dataset.scene.abstract_scene import AbstractScene
+from d123.datasets.maps.abstract_map import MapLayer
+from d123.datasets.maps.abstract_map_objects import AbstractLane, AbstractSurfaceMapObject
+from d123.datasets.scene.abstract_scene import AbstractScene
from d123.geometry import BoundingBoxSE3, EulerStateSE3, Point3D, Polyline3D
from d123.geometry.transform.transform_euler_se3 import convert_relative_to_absolute_points_3d_array
diff --git a/d123/common/visualization/viser/utils_v2.py b/d123/common/visualization/viser/utils_v2.py
index 54c4eaab..9b4fd8d8 100644
--- a/d123/common/visualization/viser/utils_v2.py
+++ b/d123/common/visualization/viser/utils_v2.py
@@ -3,7 +3,7 @@
from d123.common.visualization.color.default import BOX_DETECTION_CONFIG, EGO_VEHICLE_CONFIG
from d123.common.visualization.viser.utils import BRIGHTNESS_FACTOR
-from d123.dataset.scene.abstract_scene import AbstractScene
+from d123.datasets.scene.abstract_scene import AbstractScene
# from d123.common.datatypes.sensor.camera_parameters import get_nuplan_camera_parameters
from d123.geometry import BoundingBoxSE3, Corners3DIndex, Point3D, Point3DIndex, Vector3D
diff --git a/d123/dataset/arrow/__init__.py b/d123/dataset/arrow/__init__.py
deleted file mode 100644
index 406e0d32..00000000
--- a/d123/dataset/arrow/__init__.py
+++ /dev/null
@@ -1,2 +0,0 @@
-class SceneBuilder:
- pass
diff --git a/d123/dataset/logs/__init__.py b/d123/dataset/logs/__init__.py
deleted file mode 100644
index e69de29b..00000000
diff --git a/d123/dataset/logs/log_metadata.py b/d123/dataset/logs/log_metadata.py
deleted file mode 100644
index 818bc495..00000000
--- a/d123/dataset/logs/log_metadata.py
+++ /dev/null
@@ -1,21 +0,0 @@
-from dataclasses import dataclass
-
-import d123
-
-# TODO: move this files and dataclass to a more appropriate place.
-
-
-@dataclass
-class LogMetadata:
-
- # TODO: add
- # - split
- # - global/local map
-
- dataset: str
- log_name: str
- location: str
- timestep_seconds: float
-
- map_has_z: bool
- version: str = str(d123.__version__)
diff --git a/d123/dataset/maps/gpkg/__init__.py b/d123/dataset/maps/gpkg/__init__.py
deleted file mode 100644
index e69de29b..00000000
diff --git a/d123/dataset/scene/__init__.py b/d123/dataset/scene/__init__.py
deleted file mode 100644
index e69de29b..00000000
diff --git a/d123/common/datatypes/detection/__init__.py b/d123/datasets/__init__.py
similarity index 100%
rename from d123/common/datatypes/detection/__init__.py
rename to d123/datasets/__init__.py
diff --git a/d123/dataset/dataset_specific/av2/av2_constants.py b/d123/datasets/av2/av2_constants.py
similarity index 95%
rename from d123/dataset/dataset_specific/av2/av2_constants.py
rename to d123/datasets/av2/av2_constants.py
index 836bb12c..5d163245 100644
--- a/d123/dataset/dataset_specific/av2/av2_constants.py
+++ b/d123/datasets/av2/av2_constants.py
@@ -1,7 +1,7 @@
-from d123.common.datatypes.detection.detection_types import DetectionType
-from d123.common.datatypes.sensor.camera import CameraType
from d123.common.utils.enums import SerialIntEnum
-from d123.dataset.maps.map_datatypes import RoadLineType
+from d123.datatypes.detections.detection_types import DetectionType
+from d123.datatypes.maps.map_datatypes import RoadLineType
+from d123.datatypes.sensors.camera import CameraType
class AV2SensorBoxDetectionType(SerialIntEnum):
diff --git a/d123/dataset/dataset_specific/av2/av2_data_converter.py b/d123/datasets/av2/av2_data_converter.py
similarity index 96%
rename from d123/dataset/dataset_specific/av2/av2_data_converter.py
rename to d123/datasets/av2/av2_data_converter.py
index ffa45db3..11e2549b 100644
--- a/d123/dataset/dataset_specific/av2/av2_data_converter.py
+++ b/d123/datasets/av2/av2_data_converter.py
@@ -11,29 +11,29 @@
import pyarrow as pa
from pyquaternion import Quaternion
-from d123.common.datatypes.sensor.camera import CameraMetadata, CameraType, camera_metadata_dict_to_json
-from d123.common.datatypes.sensor.lidar import LiDARMetadata, LiDARType, lidar_metadata_dict_to_json
-from d123.common.datatypes.time.time_point import TimePoint
-from d123.common.datatypes.vehicle_state.ego_state import DynamicStateSE3, EgoStateSE3, EgoStateSE3Index
-from d123.common.datatypes.vehicle_state.vehicle_parameters import (
- get_av2_ford_fusion_hybrid_parameters,
- rear_axle_se3_to_center_se3,
-)
from d123.common.multithreading.worker_utils import WorkerPool, worker_map
-from d123.dataset.dataset_specific.av2.av2_constants import (
+from d123.datasets.av2.av2_constants import (
AV2_CAMERA_TYPE_MAPPING,
AV2_TO_DETECTION_TYPE,
AV2SensorBoxDetectionType,
)
-from d123.dataset.dataset_specific.av2.av2_helper import (
+from d123.datasets.av2.av2_helper import (
build_sensor_dataframe,
build_synchronization_dataframe,
find_closest_target_fpath,
get_slice_with_timestamp_ns,
)
-from d123.dataset.dataset_specific.av2.av2_map_conversion import convert_av2_map
-from d123.dataset.dataset_specific.raw_data_converter import DataConverterConfig, RawDataConverter
-from d123.dataset.logs.log_metadata import LogMetadata
+from d123.datasets.av2.av2_map_conversion import convert_av2_map
+from d123.datasets.raw_data_converter import DataConverterConfig, RawDataConverter
+from d123.datatypes.scene.scene_metadata import LogMetadata
+from d123.datatypes.sensors.camera import CameraMetadata, CameraType, camera_metadata_dict_to_json
+from d123.datatypes.sensors.lidar import LiDARMetadata, LiDARType, lidar_metadata_dict_to_json
+from d123.datatypes.time.time_point import TimePoint
+from d123.datatypes.vehicle_state.ego_state import DynamicStateSE3, EgoStateSE3, EgoStateSE3Index
+from d123.datatypes.vehicle_state.vehicle_parameters import (
+ get_av2_ford_fusion_hybrid_parameters,
+ rear_axle_se3_to_center_se3,
+)
from d123.geometry import BoundingBoxSE3Index, EulerStateSE3, Vector3D, Vector3DIndex
from d123.geometry.transform.transform_euler_se3 import (
convert_relative_to_absolute_euler_se3_array,
diff --git a/d123/dataset/dataset_specific/av2/av2_helper.py b/d123/datasets/av2/av2_helper.py
similarity index 98%
rename from d123/dataset/dataset_specific/av2/av2_helper.py
rename to d123/datasets/av2/av2_helper.py
index 8ad881cc..5e130eeb 100644
--- a/d123/dataset/dataset_specific/av2/av2_helper.py
+++ b/d123/datasets/av2/av2_helper.py
@@ -3,7 +3,7 @@
import pandas as pd
-from d123.dataset.dataset_specific.av2.av2_constants import AV2_CAMERA_TYPE_MAPPING
+from d123.datasets.av2.av2_constants import AV2_CAMERA_TYPE_MAPPING
AV2_SENSOR_CAM_SHUTTER_INTERVAL_MS: Final[float] = 50.0
AV2_SENSOR_LIDAR_SWEEP_INTERVAL_W_BUFFER_NS: Final[float] = 102000000.0
diff --git a/d123/dataset/dataset_specific/av2/av2_map_conversion.py b/d123/datasets/av2/av2_map_conversion.py
similarity index 98%
rename from d123/dataset/dataset_specific/av2/av2_map_conversion.py
rename to d123/datasets/av2/av2_map_conversion.py
index daadcd3b..d158007d 100644
--- a/d123/dataset/dataset_specific/av2/av2_map_conversion.py
+++ b/d123/datasets/av2/av2_map_conversion.py
@@ -10,12 +10,10 @@
import shapely
import shapely.geometry as geom
-from d123.dataset.conversion.map.road_edge.road_edge_2d_utils import split_line_geometry_by_max_length
-from d123.dataset.conversion.map.road_edge.road_edge_3d_utils import (
- get_road_edges_3d_from_generic_drivable_area_df,
-)
-from d123.dataset.dataset_specific.av2.av2_constants import AV2_ROAD_LINE_TYPE_MAPPING
-from d123.dataset.maps.map_datatypes import MapLayer, RoadEdgeType
+from d123.datasets.av2.av2_constants import AV2_ROAD_LINE_TYPE_MAPPING
+from d123.datasets.utils.maps.road_edge.road_edge_2d_utils import split_line_geometry_by_max_length
+from d123.datasets.utils.maps.road_edge.road_edge_3d_utils import get_road_edges_3d_from_generic_drivable_area_df
+from d123.datatypes.maps.map_datatypes import MapLayer, RoadEdgeType
from d123.geometry import OccupancyMap2D, Point3DIndex, Polyline2D, Polyline3D
LANE_GROUP_MARK_TYPES: List[str] = [
diff --git a/d123/common/datatypes/recording/__init__.py b/d123/datasets/carla/__init__.py
similarity index 100%
rename from d123/common/datatypes/recording/__init__.py
rename to d123/datasets/carla/__init__.py
diff --git a/d123/dataset/dataset_specific/carla/carla_data_converter.py b/d123/datasets/carla/carla_data_converter.py
similarity index 95%
rename from d123/dataset/dataset_specific/carla/carla_data_converter.py
rename to d123/datasets/carla/carla_data_converter.py
index c6ce3622..ec3a4824 100644
--- a/d123/dataset/dataset_specific/carla/carla_data_converter.py
+++ b/d123/datasets/carla/carla_data_converter.py
@@ -11,19 +11,19 @@
import numpy as np
import pyarrow as pa
-from d123.common.datatypes.sensor.camera import CameraMetadata, CameraType, camera_metadata_dict_to_json
-from d123.common.datatypes.sensor.lidar import LiDARMetadata, LiDARType, lidar_metadata_dict_to_json
-from d123.common.datatypes.sensor.lidar_index import CarlaLidarIndex
-from d123.common.datatypes.vehicle_state.ego_state import EgoStateSE3Index
-from d123.common.datatypes.vehicle_state.vehicle_parameters import get_carla_lincoln_mkz_2020_parameters
from d123.common.multithreading.worker_utils import WorkerPool, worker_map
-from d123.dataset.arrow.helper import open_arrow_table, write_arrow_table
-from d123.dataset.conversion.map.opendrive.opendrive_map_conversion import convert_from_xodr
-from d123.dataset.dataset_specific.raw_data_converter import DataConverterConfig, RawDataConverter
-from d123.dataset.logs.log_metadata import LogMetadata
-from d123.dataset.maps.abstract_map import AbstractMap, MapLayer
-from d123.dataset.maps.abstract_map_objects import AbstractLane
-from d123.dataset.scene.arrow_scene import get_map_api_from_names
+from d123.common.utils.arrow_helper import open_arrow_table, write_arrow_table
+from d123.datasets.raw_data_converter import DataConverterConfig, RawDataConverter
+from d123.datasets.utils.maps.opendrive.opendrive_map_conversion import convert_from_xodr
+from d123.datatypes.maps.abstract_map import AbstractMap, MapLayer
+from d123.datatypes.maps.abstract_map_objects import AbstractLane
+from d123.datatypes.maps.gpkg.gpkg_map import get_map_api_from_names
+from d123.datatypes.scene.scene_metadata import LogMetadata
+from d123.datatypes.sensors.camera import CameraMetadata, CameraType, camera_metadata_dict_to_json
+from d123.datatypes.sensors.lidar import LiDARMetadata, LiDARType, lidar_metadata_dict_to_json
+from d123.datatypes.sensors.lidar_index import CarlaLidarIndex
+from d123.datatypes.vehicle_state.ego_state import EgoStateSE3Index
+from d123.datatypes.vehicle_state.vehicle_parameters import get_carla_lincoln_mkz_2020_parameters
from d123.geometry import BoundingBoxSE3Index, Point2D, Point3D, Vector3DIndex
AVAILABLE_CARLA_MAP_LOCATIONS: Final[List[str]] = [
diff --git a/d123/dataset/dataset_specific/carla/load_sensor.py b/d123/datasets/carla/load_sensor.py
similarity index 80%
rename from d123/dataset/dataset_specific/carla/load_sensor.py
rename to d123/datasets/carla/load_sensor.py
index 56ff68fa..5fcbc890 100644
--- a/d123/dataset/dataset_specific/carla/load_sensor.py
+++ b/d123/datasets/carla/load_sensor.py
@@ -2,7 +2,7 @@
import numpy as np
-from d123.common.datatypes.sensor.lidar import LiDAR, LiDARMetadata
+from d123.datatypes.sensors.lidar import LiDAR, LiDARMetadata
def load_carla_lidar_from_path(filepath: Path, lidar_metadata: LiDARMetadata) -> LiDAR:
diff --git a/d123/dataset/dataset_specific/kitti_360/.gitkeep b/d123/datasets/kitti_360/.gitkeep
similarity index 100%
rename from d123/dataset/dataset_specific/kitti_360/.gitkeep
rename to d123/datasets/kitti_360/.gitkeep
diff --git a/d123/common/datatypes/time/__init__.py b/d123/datasets/nuplan/__init__.py
similarity index 100%
rename from d123/common/datatypes/time/__init__.py
rename to d123/datasets/nuplan/__init__.py
diff --git a/d123/dataset/dataset_specific/nuplan/load_sensor.py b/d123/datasets/nuplan/load_sensor.py
similarity index 88%
rename from d123/dataset/dataset_specific/nuplan/load_sensor.py
rename to d123/datasets/nuplan/load_sensor.py
index f7dd16e8..c00e4f31 100644
--- a/d123/dataset/dataset_specific/nuplan/load_sensor.py
+++ b/d123/datasets/nuplan/load_sensor.py
@@ -1,8 +1,8 @@
import io
from pathlib import Path
-from d123.common.datatypes.sensor.lidar import LiDAR, LiDARMetadata
from d123.common.utils.dependencies import check_dependencies
+from d123.datatypes.sensors.lidar import LiDAR, LiDARMetadata
check_dependencies(["nuplan"], "nuplan")
from nuplan.database.utils.pointclouds.lidar import LidarPointCloud
diff --git a/d123/dataset/dataset_specific/nuplan/nuplan_data_converter.py b/d123/datasets/nuplan/nuplan_data_converter.py
similarity index 95%
rename from d123/dataset/dataset_specific/nuplan/nuplan_data_converter.py
rename to d123/datasets/nuplan/nuplan_data_converter.py
index 64ba1c72..a7386917 100644
--- a/d123/dataset/dataset_specific/nuplan/nuplan_data_converter.py
+++ b/d123/datasets/nuplan/nuplan_data_converter.py
@@ -12,24 +12,24 @@
import yaml
from pyquaternion import Quaternion
-import d123.dataset.dataset_specific.nuplan.utils as nuplan_utils
-from d123.common.datatypes.detection.detection import TrafficLightStatus
-from d123.common.datatypes.detection.detection_types import DetectionType
-from d123.common.datatypes.sensor.camera import CameraMetadata, CameraType, camera_metadata_dict_to_json
-from d123.common.datatypes.sensor.lidar import LiDARMetadata, LiDARType, lidar_metadata_dict_to_json
-from d123.common.datatypes.sensor.lidar_index import NuplanLidarIndex
-from d123.common.datatypes.time.time_point import TimePoint
-from d123.common.datatypes.vehicle_state.ego_state import DynamicStateSE3, EgoStateSE3, EgoStateSE3Index
-from d123.common.datatypes.vehicle_state.vehicle_parameters import (
+import d123.datasets.nuplan.utils as nuplan_utils
+from d123.common.multithreading.worker_utils import WorkerPool, worker_map
+from d123.common.utils.arrow_helper import open_arrow_table, write_arrow_table
+from d123.common.utils.dependencies import check_dependencies
+from d123.datasets.nuplan.nuplan_map_conversion import MAP_LOCATIONS, NuPlanMapConverter
+from d123.datasets.raw_data_converter import DataConverterConfig, RawDataConverter
+from d123.datatypes.detections.detection import TrafficLightStatus
+from d123.datatypes.detections.detection_types import DetectionType
+from d123.datatypes.scene.scene_metadata import LogMetadata
+from d123.datatypes.sensors.camera import CameraMetadata, CameraType, camera_metadata_dict_to_json
+from d123.datatypes.sensors.lidar import LiDARMetadata, LiDARType, lidar_metadata_dict_to_json
+from d123.datatypes.sensors.lidar_index import NuplanLidarIndex
+from d123.datatypes.time.time_point import TimePoint
+from d123.datatypes.vehicle_state.ego_state import DynamicStateSE3, EgoStateSE3, EgoStateSE3Index
+from d123.datatypes.vehicle_state.vehicle_parameters import (
get_nuplan_chrysler_pacifica_parameters,
rear_axle_se3_to_center_se3,
)
-from d123.common.multithreading.worker_utils import WorkerPool, worker_map
-from d123.common.utils.dependencies import check_dependencies
-from d123.dataset.arrow.helper import open_arrow_table, write_arrow_table
-from d123.dataset.dataset_specific.nuplan.nuplan_map_conversion import MAP_LOCATIONS, NuPlanMapConverter
-from d123.dataset.dataset_specific.raw_data_converter import DataConverterConfig, RawDataConverter
-from d123.dataset.logs.log_metadata import LogMetadata
from d123.geometry import BoundingBoxSE3, BoundingBoxSE3Index, EulerStateSE3, Vector3D, Vector3DIndex
from d123.geometry.utils.constants import DEFAULT_PITCH, DEFAULT_ROLL
diff --git a/d123/dataset/dataset_specific/nuplan/nuplan_map_conversion.py b/d123/datasets/nuplan/nuplan_map_conversion.py
similarity index 98%
rename from d123/dataset/dataset_specific/nuplan/nuplan_map_conversion.py
rename to d123/datasets/nuplan/nuplan_map_conversion.py
index 78f5d3f6..7b444e5c 100644
--- a/d123/dataset/dataset_specific/nuplan/nuplan_map_conversion.py
+++ b/d123/datasets/nuplan/nuplan_map_conversion.py
@@ -11,12 +11,12 @@
import pyogrio
from shapely.geometry import LineString
-from d123.dataset.conversion.map.road_edge.road_edge_2d_utils import (
+from d123.datasets.utils.maps.road_edge.road_edge_2d_utils import (
get_road_edge_linear_rings,
split_line_geometry_by_max_length,
)
-from d123.dataset.maps.gpkg.utils import get_all_rows_with_value, get_row_with_value
-from d123.dataset.maps.map_datatypes import MapLayer, RoadEdgeType, RoadLineType
+from d123.datatypes.maps.gpkg.utils import get_all_rows_with_value, get_row_with_value
+from d123.datatypes.maps.map_datatypes import MapLayer, RoadEdgeType, RoadLineType
MAP_FILES = {
"sg-one-north": "sg-one-north/9.17.1964/map.gpkg",
diff --git a/d123/dataset/dataset_specific/nuplan/utils/log_splits.yaml b/d123/datasets/nuplan/utils/log_splits.yaml
similarity index 100%
rename from d123/dataset/dataset_specific/nuplan/utils/log_splits.yaml
rename to d123/datasets/nuplan/utils/log_splits.yaml
diff --git a/d123/dataset/dataset_specific/nuscenes/.gitkeep b/d123/datasets/nuscenes/.gitkeep
similarity index 100%
rename from d123/dataset/dataset_specific/nuscenes/.gitkeep
rename to d123/datasets/nuscenes/.gitkeep
diff --git a/d123/dataset/dataset_specific/raw_data_converter.py b/d123/datasets/raw_data_converter.py
similarity index 100%
rename from d123/dataset/dataset_specific/raw_data_converter.py
rename to d123/datasets/raw_data_converter.py
diff --git a/d123/common/datatypes/vehicle_state/__init__.py b/d123/datasets/utils/__init__.py
similarity index 100%
rename from d123/common/datatypes/vehicle_state/__init__.py
rename to d123/datasets/utils/__init__.py
diff --git a/d123/dataset/__init__.py b/d123/datasets/utils/maps/__init__.py
similarity index 100%
rename from d123/dataset/__init__.py
rename to d123/datasets/utils/maps/__init__.py
diff --git a/d123/dataset/conversion/__init__.py b/d123/datasets/utils/maps/opendrive/__init__.py
similarity index 100%
rename from d123/dataset/conversion/__init__.py
rename to d123/datasets/utils/maps/opendrive/__init__.py
diff --git a/d123/dataset/conversion/map/opendrive/opendrive_map_conversion.py b/d123/datasets/utils/maps/opendrive/opendrive_map_conversion.py
similarity index 96%
rename from d123/dataset/conversion/map/opendrive/opendrive_map_conversion.py
rename to d123/datasets/utils/maps/opendrive/opendrive_map_conversion.py
index 56dfa65c..c011a07f 100644
--- a/d123/dataset/conversion/map/opendrive/opendrive_map_conversion.py
+++ b/d123/datasets/utils/maps/opendrive/opendrive_map_conversion.py
@@ -10,19 +10,19 @@
import shapely
from shapely.ops import polygonize, unary_union
-from d123.dataset.conversion.map.opendrive.parser.opendrive import Junction, OpenDrive
-from d123.dataset.conversion.map.opendrive.utils.collection import collect_element_helpers
-from d123.dataset.conversion.map.opendrive.utils.id_mapping import IntIDMapping
-from d123.dataset.conversion.map.opendrive.utils.lane_helper import (
+from d123.datasets.maps.map_datatypes import MapLayer, RoadEdgeType, RoadLineType
+from d123.datasets.utils.maps.opendrive.parser.opendrive import Junction, OpenDrive
+from d123.datasets.utils.maps.opendrive.utils.collection import collect_element_helpers
+from d123.datasets.utils.maps.opendrive.utils.id_mapping import IntIDMapping
+from d123.datasets.utils.maps.opendrive.utils.lane_helper import (
OpenDriveLaneGroupHelper,
OpenDriveLaneHelper,
)
-from d123.dataset.conversion.map.opendrive.utils.objects_helper import (
+from d123.datasets.utils.maps.opendrive.utils.objects_helper import (
OpenDriveObjectHelper,
)
-from d123.dataset.conversion.map.road_edge.road_edge_2d_utils import split_line_geometry_by_max_length
-from d123.dataset.conversion.map.road_edge.road_edge_3d_utils import get_road_edges_3d_from_gdf
-from d123.dataset.maps.map_datatypes import MapLayer, RoadEdgeType, RoadLineType
+from d123.datasets.utils.maps.road_edge.road_edge_2d_utils import split_line_geometry_by_max_length
+from d123.datasets.utils.maps.road_edge.road_edge_3d_utils import get_road_edges_3d_from_gdf
logger = logging.getLogger(__name__)
D123_MAPS_ROOT = Path(os.environ.get("D123_MAPS_ROOT"))
diff --git a/d123/dataset/conversion/map/__init__.py b/d123/datasets/utils/maps/opendrive/parser/__init__.py
similarity index 100%
rename from d123/dataset/conversion/map/__init__.py
rename to d123/datasets/utils/maps/opendrive/parser/__init__.py
diff --git a/d123/dataset/conversion/map/opendrive/parser/elevation.py b/d123/datasets/utils/maps/opendrive/parser/elevation.py
similarity index 97%
rename from d123/dataset/conversion/map/opendrive/parser/elevation.py
rename to d123/datasets/utils/maps/opendrive/parser/elevation.py
index 7ee7aa67..a529bc0b 100644
--- a/d123/dataset/conversion/map/opendrive/parser/elevation.py
+++ b/d123/datasets/utils/maps/opendrive/parser/elevation.py
@@ -4,7 +4,7 @@
from typing import List, Optional
from xml.etree.ElementTree import Element
-from d123.dataset.conversion.map.opendrive.parser.polynomial import Polynomial
+from d123.datasets.utils.maps.opendrive.parser.polynomial import Polynomial
@dataclass
diff --git a/d123/dataset/conversion/map/opendrive/parser/geometry.py b/d123/datasets/utils/maps/opendrive/parser/geometry.py
similarity index 100%
rename from d123/dataset/conversion/map/opendrive/parser/geometry.py
rename to d123/datasets/utils/maps/opendrive/parser/geometry.py
diff --git a/d123/dataset/conversion/map/opendrive/parser/lane.py b/d123/datasets/utils/maps/opendrive/parser/lane.py
similarity index 98%
rename from d123/dataset/conversion/map/opendrive/parser/lane.py
rename to d123/datasets/utils/maps/opendrive/parser/lane.py
index ccbfc9ff..e27de490 100644
--- a/d123/dataset/conversion/map/opendrive/parser/lane.py
+++ b/d123/datasets/utils/maps/opendrive/parser/lane.py
@@ -4,7 +4,7 @@
from typing import List, Optional
from xml.etree.ElementTree import Element
-from d123.dataset.conversion.map.opendrive.parser.polynomial import Polynomial
+from d123.datasets.utils.maps.opendrive.parser.polynomial import Polynomial
@dataclass
diff --git a/d123/dataset/conversion/map/opendrive/parser/objects.py b/d123/datasets/utils/maps/opendrive/parser/objects.py
similarity index 100%
rename from d123/dataset/conversion/map/opendrive/parser/objects.py
rename to d123/datasets/utils/maps/opendrive/parser/objects.py
diff --git a/d123/dataset/conversion/map/opendrive/parser/opendrive.py b/d123/datasets/utils/maps/opendrive/parser/opendrive.py
similarity index 99%
rename from d123/dataset/conversion/map/opendrive/parser/opendrive.py
rename to d123/datasets/utils/maps/opendrive/parser/opendrive.py
index 2a07bb5f..4ed7bb33 100644
--- a/d123/dataset/conversion/map/opendrive/parser/opendrive.py
+++ b/d123/datasets/utils/maps/opendrive/parser/opendrive.py
@@ -6,7 +6,7 @@
from typing import List, Literal, Optional
from xml.etree.ElementTree import Element, parse
-from d123.dataset.conversion.map.opendrive.parser.road import Road
+from d123.datasets.utils.maps.opendrive.parser.road import Road
@dataclass
diff --git a/d123/dataset/conversion/map/opendrive/parser/polynomial.py b/d123/datasets/utils/maps/opendrive/parser/polynomial.py
similarity index 100%
rename from d123/dataset/conversion/map/opendrive/parser/polynomial.py
rename to d123/datasets/utils/maps/opendrive/parser/polynomial.py
diff --git a/d123/dataset/conversion/map/opendrive/parser/reference.py b/d123/datasets/utils/maps/opendrive/parser/reference.py
similarity index 94%
rename from d123/dataset/conversion/map/opendrive/parser/reference.py
rename to d123/datasets/utils/maps/opendrive/parser/reference.py
index bc13537d..ed19a98d 100644
--- a/d123/dataset/conversion/map/opendrive/parser/reference.py
+++ b/d123/datasets/utils/maps/opendrive/parser/reference.py
@@ -9,10 +9,10 @@
import numpy as np
import numpy.typing as npt
-from d123.dataset.conversion.map.opendrive.parser.elevation import Elevation
-from d123.dataset.conversion.map.opendrive.parser.geometry import Arc, Geometry, Line, Spiral
-from d123.dataset.conversion.map.opendrive.parser.lane import LaneOffset, Width
-from d123.dataset.conversion.map.opendrive.parser.polynomial import Polynomial
+from d123.datasets.utils.maps.opendrive.parser.elevation import Elevation
+from d123.datasets.utils.maps.opendrive.parser.geometry import Arc, Geometry, Line, Spiral
+from d123.datasets.utils.maps.opendrive.parser.lane import LaneOffset, Width
+from d123.datasets.utils.maps.opendrive.parser.polynomial import Polynomial
from d123.geometry import Point3DIndex, StateSE2Index
TOLERANCE: Final[float] = 1e-3
diff --git a/d123/dataset/conversion/map/opendrive/parser/road.py b/d123/datasets/utils/maps/opendrive/parser/road.py
similarity index 93%
rename from d123/dataset/conversion/map/opendrive/parser/road.py
rename to d123/datasets/utils/maps/opendrive/parser/road.py
index e0171429..28b5b679 100644
--- a/d123/dataset/conversion/map/opendrive/parser/road.py
+++ b/d123/datasets/utils/maps/opendrive/parser/road.py
@@ -4,10 +4,10 @@
from typing import List, Optional
from xml.etree.ElementTree import Element
-from d123.dataset.conversion.map.opendrive.parser.elevation import ElevationProfile, LateralProfile
-from d123.dataset.conversion.map.opendrive.parser.lane import Lanes
-from d123.dataset.conversion.map.opendrive.parser.objects import Object
-from d123.dataset.conversion.map.opendrive.parser.reference import PlanView
+from d123.datasets.utils.maps.opendrive.parser.elevation import ElevationProfile, LateralProfile
+from d123.datasets.utils.maps.opendrive.parser.lane import Lanes
+from d123.datasets.utils.maps.opendrive.parser.objects import Object
+from d123.datasets.utils.maps.opendrive.parser.reference import PlanView
@dataclass
diff --git a/d123/dataset/conversion/map/opendrive/utils/collection.py b/d123/datasets/utils/maps/opendrive/utils/collection.py
similarity index 96%
rename from d123/dataset/conversion/map/opendrive/utils/collection.py
rename to d123/datasets/utils/maps/opendrive/utils/collection.py
index be631445..bf28c997 100644
--- a/d123/dataset/conversion/map/opendrive/utils/collection.py
+++ b/d123/datasets/utils/maps/opendrive/utils/collection.py
@@ -3,21 +3,21 @@
import numpy as np
-from d123.dataset.conversion.map.opendrive.parser.opendrive import Junction, OpenDrive
-from d123.dataset.conversion.map.opendrive.parser.reference import ReferenceLine
-from d123.dataset.conversion.map.opendrive.parser.road import Road
-from d123.dataset.conversion.map.opendrive.utils.id_system import (
+from d123.datasets.utils.maps.opendrive.parser.opendrive import Junction, OpenDrive
+from d123.datasets.utils.maps.opendrive.parser.reference import ReferenceLine
+from d123.datasets.utils.maps.opendrive.parser.road import Road
+from d123.datasets.utils.maps.opendrive.utils.id_system import (
build_lane_id,
derive_lane_section_id,
lane_group_id_from_lane_id,
road_id_from_lane_group_id,
)
-from d123.dataset.conversion.map.opendrive.utils.lane_helper import (
+from d123.datasets.utils.maps.opendrive.utils.lane_helper import (
OpenDriveLaneGroupHelper,
OpenDriveLaneHelper,
lane_section_to_lane_helpers,
)
-from d123.dataset.conversion.map.opendrive.utils.objects_helper import OpenDriveObjectHelper, get_object_helper
+from d123.datasets.utils.maps.opendrive.utils.objects_helper import OpenDriveObjectHelper, get_object_helper
logger = logging.getLogger(__name__)
diff --git a/d123/dataset/conversion/map/opendrive/utils/id_mapping.py b/d123/datasets/utils/maps/opendrive/utils/id_mapping.py
similarity index 100%
rename from d123/dataset/conversion/map/opendrive/utils/id_mapping.py
rename to d123/datasets/utils/maps/opendrive/utils/id_mapping.py
diff --git a/d123/dataset/conversion/map/opendrive/utils/id_system.py b/d123/datasets/utils/maps/opendrive/utils/id_system.py
similarity index 100%
rename from d123/dataset/conversion/map/opendrive/utils/id_system.py
rename to d123/datasets/utils/maps/opendrive/utils/id_system.py
diff --git a/d123/dataset/conversion/map/opendrive/utils/lane_helper.py b/d123/datasets/utils/maps/opendrive/utils/lane_helper.py
similarity index 97%
rename from d123/dataset/conversion/map/opendrive/utils/lane_helper.py
rename to d123/datasets/utils/maps/opendrive/utils/lane_helper.py
index a21bd625..34d57054 100644
--- a/d123/dataset/conversion/map/opendrive/utils/lane_helper.py
+++ b/d123/datasets/utils/maps/opendrive/utils/lane_helper.py
@@ -6,10 +6,10 @@
import numpy.typing as npt
import shapely
-from d123.dataset.conversion.map.opendrive.parser.lane import Lane, LaneSection
-from d123.dataset.conversion.map.opendrive.parser.reference import ReferenceLine
-from d123.dataset.conversion.map.opendrive.parser.road import RoadType
-from d123.dataset.conversion.map.opendrive.utils.id_system import (
+from d123.datasets.utils.maps.opendrive.parser.lane import Lane, LaneSection
+from d123.datasets.utils.maps.opendrive.parser.reference import ReferenceLine
+from d123.datasets.utils.maps.opendrive.parser.road import RoadType
+from d123.datasets.utils.maps.opendrive.utils.id_system import (
derive_lane_group_id,
derive_lane_id,
lane_group_id_from_lane_id,
diff --git a/d123/dataset/conversion/map/opendrive/utils/objects_helper.py b/d123/datasets/utils/maps/opendrive/utils/objects_helper.py
similarity index 94%
rename from d123/dataset/conversion/map/opendrive/utils/objects_helper.py
rename to d123/datasets/utils/maps/opendrive/utils/objects_helper.py
index 11c7f609..ff478149 100644
--- a/d123/dataset/conversion/map/opendrive/utils/objects_helper.py
+++ b/d123/datasets/utils/maps/opendrive/utils/objects_helper.py
@@ -5,8 +5,8 @@
import numpy.typing as npt
import shapely
-from d123.dataset.conversion.map.opendrive.parser.objects import Object
-from d123.dataset.conversion.map.opendrive.parser.reference import ReferenceLine
+from d123.datasets.utils.maps.opendrive.parser.objects import Object
+from d123.datasets.utils.maps.opendrive.parser.reference import ReferenceLine
from d123.geometry import Point2D, Point3D, Point3DIndex, StateSE2
from d123.geometry.transform.tranform_2d import translate_along_yaw
from d123.geometry.utils.rotation_utils import normalize_angle
diff --git a/d123/dataset/conversion/map/opendrive/__init__.py b/d123/datasets/utils/maps/road_edge/__init__.py
similarity index 100%
rename from d123/dataset/conversion/map/opendrive/__init__.py
rename to d123/datasets/utils/maps/road_edge/__init__.py
diff --git a/d123/dataset/conversion/map/road_edge/road_edge_2d_utils.py b/d123/datasets/utils/maps/road_edge/road_edge_2d_utils.py
similarity index 100%
rename from d123/dataset/conversion/map/road_edge/road_edge_2d_utils.py
rename to d123/datasets/utils/maps/road_edge/road_edge_2d_utils.py
diff --git a/d123/dataset/conversion/map/road_edge/road_edge_3d_utils.py b/d123/datasets/utils/maps/road_edge/road_edge_3d_utils.py
similarity index 99%
rename from d123/dataset/conversion/map/road_edge/road_edge_3d_utils.py
rename to d123/datasets/utils/maps/road_edge/road_edge_3d_utils.py
index b88a44e0..ceeee62a 100644
--- a/d123/dataset/conversion/map/road_edge/road_edge_3d_utils.py
+++ b/d123/datasets/utils/maps/road_edge/road_edge_3d_utils.py
@@ -9,7 +9,7 @@
import shapely
from shapely.geometry import LineString
-from d123.dataset.conversion.map.road_edge.road_edge_2d_utils import get_road_edge_linear_rings
+from d123.datasets.utils.maps.road_edge.road_edge_2d_utils import get_road_edge_linear_rings
from d123.geometry import Point3DIndex
from d123.geometry.occupancy_map import OccupancyMap2D
diff --git a/d123/dataset/dataset_specific/wopd/waymo_map_utils/womp_boundary_utils.py b/d123/datasets/wopd/waymo_map_utils/womp_boundary_utils.py
similarity index 100%
rename from d123/dataset/dataset_specific/wopd/waymo_map_utils/womp_boundary_utils.py
rename to d123/datasets/wopd/waymo_map_utils/womp_boundary_utils.py
diff --git a/d123/dataset/dataset_specific/wopd/waymo_map_utils/wopd_map_utils.py b/d123/datasets/wopd/waymo_map_utils/wopd_map_utils.py
similarity index 97%
rename from d123/dataset/dataset_specific/wopd/waymo_map_utils/wopd_map_utils.py
rename to d123/datasets/wopd/waymo_map_utils/wopd_map_utils.py
index 6cf4f0f9..561d83f0 100644
--- a/d123/dataset/dataset_specific/wopd/waymo_map_utils/wopd_map_utils.py
+++ b/d123/datasets/wopd/waymo_map_utils/wopd_map_utils.py
@@ -7,13 +7,16 @@
import numpy.typing as npt
import pandas as pd
import shapely.geometry as geom
-from waymo_open_dataset import dataset_pb2
-from d123.dataset.dataset_specific.wopd.waymo_map_utils.womp_boundary_utils import extract_lane_boundaries
-from d123.dataset.maps.map_datatypes import MapLayer, RoadEdgeType, RoadLineType
+from d123.common.utils.dependencies import check_dependencies
+from d123.datasets.wopd.waymo_map_utils.womp_boundary_utils import extract_lane_boundaries
+from d123.datatypes.maps.map_datatypes import MapLayer, RoadEdgeType, RoadLineType
from d123.geometry import Point3DIndex, Polyline3D
from d123.geometry.utils.units import mph_to_mps
+check_dependencies(modules=["waymo_open_dataset"], optional_name="waymo")
+from waymo_open_dataset import dataset_pb2
+
# TODO:
# - Implement stop signs
# - Implement speed bumps
diff --git a/d123/dataset/dataset_specific/wopd/wopd_data_converter.py b/d123/datasets/wopd/wopd_data_converter.py
similarity index 95%
rename from d123/dataset/dataset_specific/wopd/wopd_data_converter.py
rename to d123/datasets/wopd/wopd_data_converter.py
index d74a7ea8..1e935329 100644
--- a/d123/dataset/dataset_specific/wopd/wopd_data_converter.py
+++ b/d123/datasets/wopd/wopd_data_converter.py
@@ -12,19 +12,19 @@
import pyarrow as pa
from pyquaternion import Quaternion
-from d123.common.datatypes.detection.detection_types import DetectionType
-from d123.common.datatypes.sensor.camera import CameraMetadata, CameraType, camera_metadata_dict_to_json
-from d123.common.datatypes.sensor.lidar import LiDARMetadata, LiDARType, lidar_metadata_dict_to_json
-from d123.common.datatypes.sensor.lidar_index import WopdLidarIndex
-from d123.common.datatypes.vehicle_state.ego_state import DynamicStateSE3, EgoStateSE3, EgoStateSE3Index
-from d123.common.datatypes.vehicle_state.vehicle_parameters import get_wopd_chrysler_pacifica_parameters
from d123.common.multithreading.worker_utils import WorkerPool, worker_map
+from d123.common.utils.arrow_helper import open_arrow_table, write_arrow_table
from d123.common.utils.dependencies import check_dependencies
-from d123.dataset.arrow.helper import open_arrow_table, write_arrow_table
-from d123.dataset.dataset_specific.raw_data_converter import DataConverterConfig, RawDataConverter
-from d123.dataset.dataset_specific.wopd.waymo_map_utils.wopd_map_utils import convert_wopd_map
-from d123.dataset.dataset_specific.wopd.wopd_utils import parse_range_image_and_camera_projection
-from d123.dataset.logs.log_metadata import LogMetadata
+from d123.datasets.raw_data_converter import DataConverterConfig, RawDataConverter
+from d123.datasets.wopd.waymo_map_utils.wopd_map_utils import convert_wopd_map
+from d123.datasets.wopd.wopd_utils import parse_range_image_and_camera_projection
+from d123.datatypes.detections.detection_types import DetectionType
+from d123.datatypes.scene.scene_metadata import LogMetadata
+from d123.datatypes.sensors.camera import CameraMetadata, CameraType, camera_metadata_dict_to_json
+from d123.datatypes.sensors.lidar import LiDARMetadata, LiDARType, lidar_metadata_dict_to_json
+from d123.datatypes.sensors.lidar_index import WopdLidarIndex
+from d123.datatypes.vehicle_state.ego_state import DynamicStateSE3, EgoStateSE3, EgoStateSE3Index
+from d123.datatypes.vehicle_state.vehicle_parameters import get_wopd_chrysler_pacifica_parameters
from d123.geometry import BoundingBoxSE3Index, EulerStateSE3, Point3D, Vector3D, Vector3DIndex
from d123.geometry.transform.transform_euler_se3 import convert_relative_to_absolute_euler_se3_array
from d123.geometry.utils.constants import DEFAULT_PITCH, DEFAULT_ROLL
diff --git a/d123/dataset/dataset_specific/wopd/wopd_utils.py b/d123/datasets/wopd/wopd_utils.py
similarity index 96%
rename from d123/dataset/dataset_specific/wopd/wopd_utils.py
rename to d123/datasets/wopd/wopd_utils.py
index c9c302ef..e58813f9 100644
--- a/d123/dataset/dataset_specific/wopd/wopd_utils.py
+++ b/d123/datasets/wopd/wopd_utils.py
@@ -1,5 +1,8 @@
from typing import Dict, List, Tuple
+from d123.common.utils.dependencies import check_dependencies
+
+check_dependencies(modules=["tensorflow", "waymo_open_dataset"], optional_name="waymo")
import tensorflow as tf
from waymo_open_dataset import dataset_pb2
diff --git a/d123/common/datatypes/__init__.py b/d123/datatypes/__init__.py
similarity index 100%
rename from d123/common/datatypes/__init__.py
rename to d123/datatypes/__init__.py
diff --git a/d123/dataset/conversion/map/opendrive/parser/__init__.py b/d123/datatypes/detections/__init__.py
similarity index 100%
rename from d123/dataset/conversion/map/opendrive/parser/__init__.py
rename to d123/datatypes/detections/__init__.py
diff --git a/d123/common/datatypes/detection/detection.py b/d123/datatypes/detections/detection.py
similarity index 94%
rename from d123/common/datatypes/detection/detection.py
rename to d123/datatypes/detections/detection.py
index 8ada50fe..bf855679 100644
--- a/d123/common/datatypes/detection/detection.py
+++ b/d123/datatypes/detections/detection.py
@@ -4,9 +4,9 @@
import shapely
-from d123.common.datatypes.detection.detection_types import DetectionType
-from d123.common.datatypes.time.time_point import TimePoint
from d123.common.utils.enums import SerialIntEnum
+from d123.datatypes.detections.detection_types import DetectionType
+from d123.datatypes.time.time_point import TimePoint
from d123.geometry import BoundingBoxSE2, BoundingBoxSE3, EulerStateSE3, OccupancyMap2D, StateSE2, Vector2D, Vector3D
@@ -150,3 +150,10 @@ def get_detection_by_lane_id(self, lane_id: int) -> TrafficLightDetection | None
traffic_light_detection = detection
break
return traffic_light_detection
+
+
+@dataclass
+class DetectionRecording:
+
+ box_detections: BoxDetectionWrapper
+ traffic_light_detections: TrafficLightDetectionWrapper
diff --git a/d123/common/datatypes/detection/detection_types.py b/d123/datatypes/detections/detection_types.py
similarity index 100%
rename from d123/common/datatypes/detection/detection_types.py
rename to d123/datatypes/detections/detection_types.py
diff --git a/d123/dataset/maps/abstract_map.py b/d123/datatypes/maps/abstract_map.py
similarity index 94%
rename from d123/dataset/maps/abstract_map.py
rename to d123/datatypes/maps/abstract_map.py
index be9eefeb..edfc16e5 100644
--- a/d123/dataset/maps/abstract_map.py
+++ b/d123/datatypes/maps/abstract_map.py
@@ -5,8 +5,8 @@
import shapely
-from d123.dataset.maps.abstract_map_objects import AbstractMapObject
-from d123.dataset.maps.map_datatypes import MapLayer
+from d123.datatypes.maps.abstract_map_objects import AbstractMapObject
+from d123.datatypes.maps.map_datatypes import MapLayer
from d123.geometry import Point2D
# TODO:
diff --git a/d123/dataset/maps/abstract_map_objects.py b/d123/datatypes/maps/abstract_map_objects.py
similarity index 98%
rename from d123/dataset/maps/abstract_map_objects.py
rename to d123/datatypes/maps/abstract_map_objects.py
index 14e85539..67f63e5c 100644
--- a/d123/dataset/maps/abstract_map_objects.py
+++ b/d123/datatypes/maps/abstract_map_objects.py
@@ -6,7 +6,7 @@
import shapely.geometry as geom
import trimesh
-from d123.dataset.maps.map_datatypes import MapLayer, RoadEdgeType, RoadLineType
+from d123.datatypes.maps.map_datatypes import MapLayer, RoadEdgeType, RoadLineType
from d123.geometry import Polyline2D, Polyline3D, PolylineSE2
diff --git a/d123/dataset/conversion/map/road_edge/__init__.py b/d123/datatypes/maps/gpkg/__init__.py
similarity index 100%
rename from d123/dataset/conversion/map/road_edge/__init__.py
rename to d123/datatypes/maps/gpkg/__init__.py
diff --git a/d123/dataset/maps/gpkg/gpkg_map.py b/d123/datatypes/maps/gpkg/gpkg_map.py
similarity index 98%
rename from d123/dataset/maps/gpkg/gpkg_map.py
rename to d123/datatypes/maps/gpkg/gpkg_map.py
index d466857e..429a611e 100644
--- a/d123/dataset/maps/gpkg/gpkg_map.py
+++ b/d123/datatypes/maps/gpkg/gpkg_map.py
@@ -11,9 +11,9 @@
import shapely
import shapely.geometry as geom
-from d123.dataset.maps.abstract_map import AbstractMap
-from d123.dataset.maps.abstract_map_objects import AbstractMapObject
-from d123.dataset.maps.gpkg.gpkg_map_objects import (
+from d123.datatypes.maps.abstract_map import AbstractMap
+from d123.datatypes.maps.abstract_map_objects import AbstractMapObject
+from d123.datatypes.maps.gpkg.gpkg_map_objects import (
GPKGCarpark,
GPKGCrosswalk,
GPKGGenericDrivable,
@@ -24,8 +24,8 @@
GPKGRoadLine,
GPKGWalkway,
)
-from d123.dataset.maps.gpkg.utils import load_gdf_with_geometry_columns
-from d123.dataset.maps.map_datatypes import MapLayer
+from d123.datatypes.maps.gpkg.utils import load_gdf_with_geometry_columns
+from d123.datatypes.maps.map_datatypes import MapLayer
from d123.geometry import Point2D
USE_ARROW: bool = True
diff --git a/d123/dataset/maps/gpkg/gpkg_map_objects.py b/d123/datatypes/maps/gpkg/gpkg_map_objects.py
similarity index 98%
rename from d123/dataset/maps/gpkg/gpkg_map_objects.py
rename to d123/datatypes/maps/gpkg/gpkg_map_objects.py
index 1a274a53..2326490b 100644
--- a/d123/dataset/maps/gpkg/gpkg_map_objects.py
+++ b/d123/datatypes/maps/gpkg/gpkg_map_objects.py
@@ -11,7 +11,7 @@
import trimesh
from d123.common.visualization.viser.utils import get_trimesh_from_boundaries
-from d123.dataset.maps.abstract_map_objects import (
+from d123.datatypes.maps.abstract_map_objects import (
AbstractCarpark,
AbstractCrosswalk,
AbstractGenericDrivable,
@@ -24,8 +24,8 @@
AbstractSurfaceMapObject,
AbstractWalkway,
)
-from d123.dataset.maps.gpkg.utils import get_row_with_value
-from d123.dataset.maps.map_datatypes import RoadEdgeType, RoadLineType
+from d123.datatypes.maps.gpkg.utils import get_row_with_value
+from d123.datatypes.maps.map_datatypes import RoadEdgeType, RoadLineType
from d123.geometry import Point3DIndex, Polyline3D
diff --git a/d123/dataset/maps/gpkg/utils.py b/d123/datatypes/maps/gpkg/utils.py
similarity index 100%
rename from d123/dataset/maps/gpkg/utils.py
rename to d123/datatypes/maps/gpkg/utils.py
diff --git a/d123/dataset/maps/map_datatypes.py b/d123/datatypes/maps/map_datatypes.py
similarity index 100%
rename from d123/dataset/maps/map_datatypes.py
rename to d123/datatypes/maps/map_datatypes.py
diff --git a/d123/dataset/dataset_specific/__init__.py b/d123/datatypes/scene/__init__.py
similarity index 100%
rename from d123/dataset/dataset_specific/__init__.py
rename to d123/datatypes/scene/__init__.py
diff --git a/d123/dataset/scene/abstract_scene.py b/d123/datatypes/scene/abstract_scene.py
similarity index 66%
rename from d123/dataset/scene/abstract_scene.py
rename to d123/datatypes/scene/abstract_scene.py
index 4877fc83..a3d2ccfe 100644
--- a/d123/dataset/scene/abstract_scene.py
+++ b/d123/datatypes/scene/abstract_scene.py
@@ -1,17 +1,15 @@
from __future__ import annotations
import abc
-from dataclasses import dataclass
from typing import List, Optional
-from d123.common.datatypes.detection.detection import BoxDetectionWrapper, TrafficLightDetectionWrapper
-from d123.common.datatypes.recording.detection_recording import DetectionRecording
-from d123.common.datatypes.sensor.camera import Camera, CameraType
-from d123.common.datatypes.sensor.lidar import LiDAR, LiDARType
-from d123.common.datatypes.time.time_point import TimePoint
-from d123.common.datatypes.vehicle_state.ego_state import EgoStateSE3
-from d123.dataset.logs.log_metadata import LogMetadata
-from d123.dataset.maps.abstract_map import AbstractMap
+from d123.datatypes.detections.detection import BoxDetectionWrapper, DetectionRecording, TrafficLightDetectionWrapper
+from d123.datatypes.maps.abstract_map import AbstractMap
+from d123.datatypes.scene.scene_metadata import LogMetadata
+from d123.datatypes.sensors.camera import Camera, CameraType
+from d123.datatypes.sensors.lidar import LiDAR, LiDARType
+from d123.datatypes.time.time_point import TimePoint
+from d123.datatypes.vehicle_state.ego_state import EgoStateSE3
# TODO: Remove or improve open/close dynamic of Scene object.
@@ -93,25 +91,3 @@ def open(self) -> None:
def close(self) -> None:
pass
-
-
-# TODO: Move to a more appropriate place.
-@dataclass(frozen=True)
-class SceneExtractionInfo:
-
- initial_idx: int
- duration_s: float
- history_s: float
- iteration_duration_s: float
-
- @property
- def number_of_iterations(self) -> int:
- return round(self.duration_s / self.iteration_duration_s)
-
- @property
- def number_of_history_iterations(self) -> int:
- return round(self.history_s / self.iteration_duration_s)
-
- @property
- def end_idx(self) -> int:
- return self.initial_idx + self.number_of_iterations
diff --git a/d123/datatypes/scene/abstract_scene_builder.py b/d123/datatypes/scene/abstract_scene_builder.py
new file mode 100644
index 00000000..56eb9a6b
--- /dev/null
+++ b/d123/datatypes/scene/abstract_scene_builder.py
@@ -0,0 +1,24 @@
+import abc
+from typing import Iterator
+
+from d123.common.multithreading.worker_utils import WorkerPool
+from d123.datatypes.scene.abstract_scene import AbstractScene
+from d123.datatypes.scene.scene_filter import SceneFilter
+
+# TODO: Expand lazy implementation for scene builder.
+
+
+class SceneBuilder(abc.ABC):
+ """
+ Abstract base class for building scenes from a dataset.
+ """
+
+ @abc.abstractmethod
+ def get_scenes(self, filter: SceneFilter, worker: WorkerPool) -> Iterator[AbstractScene]:
+ """
+ Returns an iterator over scenes that match the given filter.
+ :param filter: SceneFilter object to filter the scenes.
+ :param worker: WorkerPool to parallelize the scene extraction.
+ :return: Iterator over AbstractScene objects.
+ """
+ raise NotImplementedError
diff --git a/d123/dataset/scene/arrow_scene.py b/d123/datatypes/scene/arrow/arrow_scene.py
similarity index 88%
rename from d123/dataset/scene/arrow_scene.py
rename to d123/datatypes/scene/arrow/arrow_scene.py
index ecd68111..0fc61ba8 100644
--- a/d123/dataset/scene/arrow_scene.py
+++ b/d123/datatypes/scene/arrow/arrow_scene.py
@@ -4,14 +4,12 @@
import pyarrow as pa
-from d123.common.datatypes.detection.detection import BoxDetectionWrapper, TrafficLightDetectionWrapper
-from d123.common.datatypes.recording.detection_recording import DetectionRecording
-from d123.common.datatypes.sensor.camera import Camera, CameraMetadata, CameraType, camera_metadata_dict_from_json
-from d123.common.datatypes.sensor.lidar import LiDAR, LiDARMetadata, LiDARType, lidar_metadata_dict_from_json
-from d123.common.datatypes.time.time_point import TimePoint
-from d123.common.datatypes.vehicle_state.ego_state import EgoStateSE3
-from d123.common.datatypes.vehicle_state.vehicle_parameters import VehicleParameters
-from d123.dataset.arrow.conversion import (
+from d123.common.utils.arrow_helper import open_arrow_table
+from d123.datatypes.detections.detection import BoxDetectionWrapper, DetectionRecording, TrafficLightDetectionWrapper
+from d123.datatypes.maps.abstract_map import AbstractMap
+from d123.datatypes.maps.gpkg.gpkg_map import get_local_map_api, get_map_api_from_names
+from d123.datatypes.scene.abstract_scene import AbstractScene
+from d123.datatypes.scene.arrow.utils.conversion import (
get_box_detections_from_arrow_table,
get_camera_from_arrow_table,
get_ego_vehicle_state_from_arrow_table,
@@ -19,11 +17,12 @@
get_timepoint_from_arrow_table,
get_traffic_light_detections_from_arrow_table,
)
-from d123.dataset.arrow.helper import open_arrow_table
-from d123.dataset.logs.log_metadata import LogMetadata
-from d123.dataset.maps.abstract_map import AbstractMap
-from d123.dataset.maps.gpkg.gpkg_map import get_local_map_api, get_map_api_from_names
-from d123.dataset.scene.abstract_scene import AbstractScene, SceneExtractionInfo
+from d123.datatypes.scene.scene_metadata import LogMetadata, SceneExtractionInfo
+from d123.datatypes.sensors.camera import Camera, CameraMetadata, CameraType, camera_metadata_dict_from_json
+from d123.datatypes.sensors.lidar import LiDAR, LiDARMetadata, LiDARType, lidar_metadata_dict_from_json
+from d123.datatypes.time.time_point import TimePoint
+from d123.datatypes.vehicle_state.ego_state import EgoStateSE3
+from d123.datatypes.vehicle_state.vehicle_parameters import VehicleParameters
# TODO: Remove or improve open/close dynamic of Scene object.
diff --git a/d123/dataset/scene/scene_builder.py b/d123/datatypes/scene/arrow/arrow_scene_builder.py
similarity index 87%
rename from d123/dataset/scene/scene_builder.py
rename to d123/datatypes/scene/arrow/arrow_scene_builder.py
index 228fcb13..94571715 100644
--- a/d123/dataset/scene/scene_builder.py
+++ b/d123/datatypes/scene/arrow/arrow_scene_builder.py
@@ -1,4 +1,3 @@
-import abc
import json
import random
from functools import partial
@@ -6,25 +5,12 @@
from typing import Iterator, List, Optional, Set, Union
from d123.common.multithreading.worker_utils import WorkerPool, worker_map
-from d123.dataset.arrow.helper import open_arrow_table
-from d123.dataset.logs.log_metadata import LogMetadata
-from d123.dataset.scene.abstract_scene import AbstractScene
-from d123.dataset.scene.arrow_scene import ArrowScene, SceneExtractionInfo
-from d123.dataset.scene.scene_filter import SceneFilter
-
-# TODO: Fix lazy abstraction implementation for scene builder.
-
-
-class SceneBuilder(abc.ABC):
- @abc.abstractmethod
- def get_scenes(self, filter: SceneFilter, worker: WorkerPool) -> Iterator[AbstractScene]:
- """
- Returns an iterator over scenes that match the given filter.
- :param filter: SceneFilter object to filter the scenes.
- :param worker: WorkerPool to parallelize the scene extraction.
- :return: Iterator over AbstractScene objects.
- """
- raise NotImplementedError
+from d123.common.utils.arrow_helper import open_arrow_table
+from d123.datatypes.scene.abstract_scene import AbstractScene
+from d123.datatypes.scene.abstract_scene_builder import SceneBuilder
+from d123.datatypes.scene.arrow.arrow_scene import ArrowScene
+from d123.datatypes.scene.scene_filter import SceneFilter
+from d123.datatypes.scene.scene_metadata import LogMetadata, SceneExtractionInfo
class ArrowSceneBuilder(SceneBuilder):
diff --git a/d123/dataset/arrow/conversion.py b/d123/datatypes/scene/arrow/utils/conversion.py
similarity index 88%
rename from d123/dataset/arrow/conversion.py
rename to d123/datatypes/scene/arrow/utils/conversion.py
index c961ab7d..514d9f20 100644
--- a/d123/dataset/arrow/conversion.py
+++ b/d123/datatypes/scene/arrow/utils/conversion.py
@@ -3,14 +3,14 @@
import io
import os
from pathlib import Path
-from typing import Dict, Optional
+from typing import Dict, List, Optional
import numpy as np
import numpy.typing as npt
import pyarrow as pa
from PIL import Image
-from d123.common.datatypes.detection.detection import (
+from d123.datatypes.detections.detection import (
BoxDetection,
BoxDetectionMetadata,
BoxDetectionSE3,
@@ -19,14 +19,13 @@
TrafficLightDetectionWrapper,
TrafficLightStatus,
)
-from d123.common.datatypes.detection.detection_types import DetectionType
-from d123.common.datatypes.sensor.camera import Camera, CameraMetadata
-from d123.common.datatypes.sensor.lidar import LiDAR, LiDARMetadata
-from d123.common.datatypes.time.time_point import TimePoint
-from d123.common.datatypes.vehicle_state.ego_state import EgoStateSE3
-from d123.common.datatypes.vehicle_state.vehicle_parameters import VehicleParameters
-from d123.dataset.logs.log_metadata import LogMetadata
-from d123.dataset.maps.abstract_map import List
+from d123.datatypes.detections.detection_types import DetectionType
+from d123.datatypes.scene.scene_metadata import LogMetadata
+from d123.datatypes.sensors.camera import Camera, CameraMetadata
+from d123.datatypes.sensors.lidar import LiDAR, LiDARMetadata
+from d123.datatypes.time.time_point import TimePoint
+from d123.datatypes.vehicle_state.ego_state import EgoStateSE3
+from d123.datatypes.vehicle_state.vehicle_parameters import VehicleParameters
from d123.geometry import BoundingBoxSE3, Vector3D
DATASET_SENSOR_ROOT: Dict[str, Path] = {
@@ -144,11 +143,11 @@ def get_lidar_from_arrow_table(
# NOTE: We move data specific import into if-else block, to avoid data specific import errors
if log_metadata.dataset == "nuplan":
- from d123.dataset.dataset_specific.nuplan.load_sensor import load_nuplan_lidar_from_path
+ from d123.datasets.nuplan.load_sensor import load_nuplan_lidar_from_path
lidar = load_nuplan_lidar_from_path(full_lidar_path, lidar_metadata)
elif log_metadata.dataset == "carla":
- from d123.dataset.dataset_specific.carla.load_sensor import load_carla_lidar_from_path
+ from d123.datasets.carla.load_sensor import load_carla_lidar_from_path
lidar = load_carla_lidar_from_path(full_lidar_path, lidar_metadata)
elif log_metadata.dataset == "wopd":
diff --git a/d123/dataset/scene/scene_filter.py b/d123/datatypes/scene/scene_filter.py
similarity index 96%
rename from d123/dataset/scene/scene_filter.py
rename to d123/datatypes/scene/scene_filter.py
index 0a58eba6..5dbb5ba6 100644
--- a/d123/dataset/scene/scene_filter.py
+++ b/d123/datatypes/scene/scene_filter.py
@@ -1,7 +1,7 @@
from dataclasses import dataclass
from typing import List, Optional
-from d123.common.datatypes.sensor.camera import CameraType
+from d123.datatypes.sensors.camera import CameraType
# TODO: Add more filter options (e.g. scene tags, ego movement, or whatever appropriate)
diff --git a/d123/datatypes/scene/scene_metadata.py b/d123/datatypes/scene/scene_metadata.py
new file mode 100644
index 00000000..0fc12b03
--- /dev/null
+++ b/d123/datatypes/scene/scene_metadata.py
@@ -0,0 +1,40 @@
+from dataclasses import dataclass
+
+import d123
+
+
+@dataclass
+class LogMetadata:
+
+ # TODO: add
+ # - split
+ # - global/local map
+
+ dataset: str
+ log_name: str
+ location: str
+ timestep_seconds: float
+
+ map_has_z: bool
+ version: str = str(d123.__version__)
+
+
+@dataclass(frozen=True)
+class SceneExtractionInfo:
+
+ initial_idx: int
+ duration_s: float
+ history_s: float
+ iteration_duration_s: float
+
+ @property
+ def number_of_iterations(self) -> int:
+ return round(self.duration_s / self.iteration_duration_s)
+
+ @property
+ def number_of_history_iterations(self) -> int:
+ return round(self.history_s / self.iteration_duration_s)
+
+ @property
+ def end_idx(self) -> int:
+ return self.initial_idx + self.number_of_iterations
diff --git a/d123/common/datatypes/sensor/camera.py b/d123/datatypes/sensors/camera.py
similarity index 100%
rename from d123/common/datatypes/sensor/camera.py
rename to d123/datatypes/sensors/camera.py
diff --git a/d123/common/datatypes/sensor/lidar.py b/d123/datatypes/sensors/lidar.py
similarity index 97%
rename from d123/common/datatypes/sensor/lidar.py
rename to d123/datatypes/sensors/lidar.py
index fe178760..f15ccc89 100644
--- a/d123/common/datatypes/sensor/lidar.py
+++ b/d123/datatypes/sensors/lidar.py
@@ -7,8 +7,8 @@
import numpy as np
import numpy.typing as npt
-from d123.common.datatypes.sensor.lidar_index import LIDAR_INDEX_REGISTRY, LiDARIndex
from d123.common.utils.enums import SerialIntEnum
+from d123.datatypes.sensors.lidar_index import LIDAR_INDEX_REGISTRY, LiDARIndex
class LiDARType(SerialIntEnum):
diff --git a/d123/common/datatypes/sensor/lidar_index.py b/d123/datatypes/sensors/lidar_index.py
similarity index 100%
rename from d123/common/datatypes/sensor/lidar_index.py
rename to d123/datatypes/sensors/lidar_index.py
diff --git a/d123/dataset/dataset_specific/carla/__init__.py b/d123/datatypes/time/__init__.py
similarity index 100%
rename from d123/dataset/dataset_specific/carla/__init__.py
rename to d123/datatypes/time/__init__.py
diff --git a/d123/common/datatypes/time/time_point.py b/d123/datatypes/time/time_point.py
similarity index 100%
rename from d123/common/datatypes/time/time_point.py
rename to d123/datatypes/time/time_point.py
diff --git a/d123/dataset/dataset_specific/nuplan/__init__.py b/d123/datatypes/vehicle_state/__init__.py
similarity index 100%
rename from d123/dataset/dataset_specific/nuplan/__init__.py
rename to d123/datatypes/vehicle_state/__init__.py
diff --git a/d123/common/datatypes/vehicle_state/ego_state.py b/d123/datatypes/vehicle_state/ego_state.py
similarity index 96%
rename from d123/common/datatypes/vehicle_state/ego_state.py
rename to d123/datatypes/vehicle_state/ego_state.py
index 79522517..2198cc6b 100644
--- a/d123/common/datatypes/vehicle_state/ego_state.py
+++ b/d123/datatypes/vehicle_state/ego_state.py
@@ -8,21 +8,17 @@
import numpy as np
import numpy.typing as npt
-from d123.common.datatypes.detection.detection import (
- BoxDetectionMetadata,
- BoxDetectionSE2,
- BoxDetectionSE3,
-)
-from d123.common.datatypes.detection.detection_types import DetectionType
-from d123.common.datatypes.time.time_point import TimePoint
-from d123.common.datatypes.vehicle_state.vehicle_parameters import (
+from d123.common.utils.enums import classproperty
+from d123.datatypes.detections.detection import BoxDetectionMetadata, BoxDetectionSE2, BoxDetectionSE3
+from d123.datatypes.detections.detection_types import DetectionType
+from d123.datatypes.time.time_point import TimePoint
+from d123.datatypes.vehicle_state.vehicle_parameters import (
VehicleParameters,
center_se2_to_rear_axle_se2,
center_se3_to_rear_axle_se3,
rear_axle_se2_to_center_se2,
rear_axle_se3_to_center_se3,
)
-from d123.common.utils.enums import classproperty
from d123.geometry import BoundingBoxSE2, BoundingBoxSE3, EulerStateSE3, StateSE2, Vector2D, Vector3D
# TODO: Find an appropriate way to handle SE2 and SE3 states.
diff --git a/d123/common/datatypes/vehicle_state/vehicle_parameters.py b/d123/datatypes/vehicle_state/vehicle_parameters.py
similarity index 98%
rename from d123/common/datatypes/vehicle_state/vehicle_parameters.py
rename to d123/datatypes/vehicle_state/vehicle_parameters.py
index 5bf38706..cd9aeb1c 100644
--- a/d123/common/datatypes/vehicle_state/vehicle_parameters.py
+++ b/d123/datatypes/vehicle_state/vehicle_parameters.py
@@ -4,8 +4,6 @@
from d123.geometry.transform.transform_euler_se3 import translate_euler_se3_along_body_frame
from d123.geometry.transform.transform_se2 import translate_se2_along_body_frame
-# TODO: Add more vehicle parameters, potentially extend the parameters.
-
@dataclass
class VehicleParameters:
diff --git a/d123/geometry/__init__.py b/d123/geometry/__init__.py
index d845b66c..678f2cdf 100644
--- a/d123/geometry/__init__.py
+++ b/d123/geometry/__init__.py
@@ -4,15 +4,19 @@
BoundingBoxSE3Index,
Corners2DIndex,
Corners3DIndex,
+ EulerAnglesIndex,
EulerStateSE3Index,
Point2DIndex,
Point3DIndex,
+ QuaternionIndex,
StateSE2Index,
+ StateSE3Index,
Vector2DIndex,
Vector3DIndex,
)
from d123.geometry.occupancy_map import OccupancyMap2D
from d123.geometry.point import Point2D, Point3D
from d123.geometry.polyline import Polyline2D, Polyline3D, PolylineSE2
+from d123.geometry.rotation import EulerAngles, Quaternion
from d123.geometry.se import EulerStateSE3, StateSE2, StateSE3
from d123.geometry.vector import Vector2D, Vector3D
diff --git a/d123/geometry/test/test_bounding_box.py b/d123/geometry/test/test_bounding_box.py
index e8f44986..a102b4da 100644
--- a/d123/geometry/test/test_bounding_box.py
+++ b/d123/geometry/test/test_bounding_box.py
@@ -4,7 +4,7 @@
import shapely.geometry as geom
from d123.common.utils.mixin import ArrayMixin
-from d123.geometry import BoundingBoxSE2, BoundingBoxSE3, Point2D, Point3D, StateSE2
+from d123.geometry import BoundingBoxSE2, BoundingBoxSE3, Point2D, Point3D, StateSE2, StateSE3
from d123.geometry.geometry_index import (
BoundingBoxSE2Index,
BoundingBoxSE3Index,
@@ -12,7 +12,6 @@
Corners3DIndex,
Point2DIndex,
)
-from d123.geometry.se import StateSE3
class TestBoundingBoxSE2(unittest.TestCase):
diff --git a/d123/geometry/test/test_point.py b/d123/geometry/test/test_point.py
index 94162e93..5c1b30d7 100644
--- a/d123/geometry/test/test_point.py
+++ b/d123/geometry/test/test_point.py
@@ -3,9 +3,8 @@
import numpy as np
-from d123.geometry import Point2D, Point2DIndex
-from d123.geometry.geometry_index import Point3DIndex
-from d123.geometry.point import Point3D
+from d123.geometry import Point2D, Point3D
+from d123.geometry.geometry_index import Point2DIndex, Point3DIndex
class TestPoint2D(unittest.TestCase):
diff --git a/d123/geometry/test/test_polyline.py b/d123/geometry/test/test_polyline.py
index e4103364..d1c1d652 100644
--- a/d123/geometry/test/test_polyline.py
+++ b/d123/geometry/test/test_polyline.py
@@ -3,9 +3,7 @@
import numpy as np
import shapely.geometry as geom
-from d123.geometry.point import Point2D, Point3D
-from d123.geometry.polyline import Polyline2D, Polyline3D, PolylineSE2
-from d123.geometry.se import StateSE2
+from d123.geometry import Point2D, Point3D, Polyline2D, Polyline3D, PolylineSE2, StateSE2
class TestPolyline2D(unittest.TestCase):
diff --git a/d123/geometry/transform/test/test_transform_consistency.py b/d123/geometry/transform/test/test_transform_consistency.py
index 1865a3ff..72facda0 100644
--- a/d123/geometry/transform/test/test_transform_consistency.py
+++ b/d123/geometry/transform/test/test_transform_consistency.py
@@ -3,8 +3,8 @@
import numpy as np
import numpy.typing as npt
+from d123.geometry import EulerStateSE3, StateSE2, Vector2D, Vector3D
from d123.geometry.geometry_index import EulerStateSE3Index, Point2DIndex, Point3DIndex, StateSE2Index
-from d123.geometry.se import EulerStateSE3, StateSE2
from d123.geometry.transform.transform_euler_se3 import (
convert_absolute_to_relative_euler_se3_array,
convert_absolute_to_relative_points_3d_array,
@@ -24,7 +24,6 @@
translate_se2_along_y,
translate_se2_array_along_body_frame,
)
-from d123.geometry.vector import Vector2D, Vector3D
class TestTransformConsistency(unittest.TestCase):
diff --git a/d123/geometry/transform/test/test_transform_euler_se3.py b/d123/geometry/transform/test/test_transform_euler_se3.py
index f63bc3bd..f20203cc 100644
--- a/d123/geometry/transform/test/test_transform_euler_se3.py
+++ b/d123/geometry/transform/test/test_transform_euler_se3.py
@@ -3,7 +3,7 @@
import numpy as np
import numpy.typing as npt
-from d123.geometry.se import EulerStateSE3
+from d123.geometry import EulerStateSE3, Vector3D
from d123.geometry.transform.transform_euler_se3 import (
convert_absolute_to_relative_euler_se3_array,
convert_absolute_to_relative_points_3d_array,
@@ -14,7 +14,6 @@
translate_euler_se3_along_y,
translate_euler_se3_along_z,
)
-from d123.geometry.vector import Vector3D
class TestTransformEulerSE3(unittest.TestCase):
diff --git a/d123/geometry/transform/test/test_transform_se2.py b/d123/geometry/transform/test/test_transform_se2.py
index 503d87de..cbed45c6 100644
--- a/d123/geometry/transform/test/test_transform_se2.py
+++ b/d123/geometry/transform/test/test_transform_se2.py
@@ -3,7 +3,7 @@
import numpy as np
import numpy.typing as npt
-from d123.geometry.se import StateSE2
+from d123.geometry import StateSE2, Vector2D
from d123.geometry.transform.transform_se2 import (
convert_absolute_to_relative_point_2d_array,
convert_absolute_to_relative_se2_array,
@@ -14,7 +14,6 @@
translate_se2_along_y,
translate_se2_array_along_body_frame,
)
-from d123.geometry.vector import Vector2D
class TestTransformSE2(unittest.TestCase):
diff --git a/d123/geometry/transform/test/test_transform_se3.py b/d123/geometry/transform/test/test_transform_se3.py
index 0a752f68..be936c71 100644
--- a/d123/geometry/transform/test/test_transform_se3.py
+++ b/d123/geometry/transform/test/test_transform_se3.py
@@ -4,10 +4,7 @@
import numpy.typing as npt
import d123.geometry.transform.transform_euler_se3 as euler_transform_se3
-from d123.geometry.geometry_index import EulerStateSE3Index, StateSE3Index
-from d123.geometry.point import Point3D
-from d123.geometry.rotation import Quaternion
-from d123.geometry.se import EulerStateSE3, StateSE3
+from d123.geometry import EulerStateSE3, EulerStateSE3Index, Point3D, Quaternion, StateSE3, StateSE3Index
from d123.geometry.transform.transform_se3 import (
convert_absolute_to_relative_points_3d_array,
convert_absolute_to_relative_se3_array,
diff --git a/d123/geometry/transform/transform_euler_se3.py b/d123/geometry/transform/transform_euler_se3.py
index c2f897d5..516f1ba9 100644
--- a/d123/geometry/transform/transform_euler_se3.py
+++ b/d123/geometry/transform/transform_euler_se3.py
@@ -3,9 +3,7 @@
import numpy as np
import numpy.typing as npt
-from d123.geometry import EulerStateSE3, EulerStateSE3Index, Vector3D
-from d123.geometry.geometry_index import Point3DIndex, Vector3DIndex
-from d123.geometry.rotation import EulerAngles
+from d123.geometry import EulerAngles, EulerStateSE3, EulerStateSE3Index, Point3DIndex, Vector3D, Vector3DIndex
from d123.geometry.utils.rotation_utils import (
get_rotation_matrices_from_euler_array,
get_rotation_matrix_from_euler_array,
diff --git a/d123/geometry/transform/transform_se2.py b/d123/geometry/transform/transform_se2.py
index c3a5ac6e..48f718fa 100644
--- a/d123/geometry/transform/transform_se2.py
+++ b/d123/geometry/transform/transform_se2.py
@@ -3,10 +3,8 @@
import numpy as np
import numpy.typing as npt
-from d123.geometry.geometry_index import Point2DIndex, Vector2DIndex
-from d123.geometry.se import StateSE2, StateSE2Index
+from d123.geometry import Point2DIndex, StateSE2, StateSE2Index, Vector2D, Vector2DIndex
from d123.geometry.utils.rotation_utils import normalize_angle
-from d123.geometry.vector import Vector2D
def convert_absolute_to_relative_se2_array(
diff --git a/d123/geometry/transform/transform_se3.py b/d123/geometry/transform/transform_se3.py
index e5132fe2..a11fa2e2 100644
--- a/d123/geometry/transform/transform_se3.py
+++ b/d123/geometry/transform/transform_se3.py
@@ -3,9 +3,7 @@
import numpy as np
import numpy.typing as npt
-from d123.geometry import Vector3D
-from d123.geometry.geometry_index import Point3DIndex, QuaternionIndex, StateSE3Index, Vector3DIndex
-from d123.geometry.se import StateSE3
+from d123.geometry import Point3DIndex, QuaternionIndex, StateSE3, StateSE3Index, Vector3D, Vector3DIndex
from d123.geometry.utils.rotation_utils import (
conjugate_quaternion_array,
get_rotation_matrices_from_quaternion_array,
diff --git a/d123/script/builders/data_converter_builder.py b/d123/script/builders/data_converter_builder.py
index d9c54004..cb7cc83b 100644
--- a/d123/script/builders/data_converter_builder.py
+++ b/d123/script/builders/data_converter_builder.py
@@ -4,7 +4,7 @@
from hydra.utils import instantiate
from omegaconf import DictConfig
-from d123.dataset.dataset_specific.raw_data_converter import RawDataConverter
+from d123.datasets.raw_data_converter import RawDataConverter
from d123.script.builders.utils.utils_type import validate_type
logger = logging.getLogger(__name__)
diff --git a/d123/script/builders/scene_builder_builder.py b/d123/script/builders/scene_builder_builder.py
index 4424b550..148a4792 100644
--- a/d123/script/builders/scene_builder_builder.py
+++ b/d123/script/builders/scene_builder_builder.py
@@ -5,7 +5,7 @@
from hydra.utils import instantiate
from omegaconf import DictConfig
-from d123.dataset.scene.scene_builder import SceneBuilder
+from d123.datatypes.scene.abstract_scene_builder import SceneBuilder
logger = logging.getLogger(__name__)
diff --git a/d123/script/builders/scene_filter_builder.py b/d123/script/builders/scene_filter_builder.py
index af3429a2..512f59ca 100644
--- a/d123/script/builders/scene_filter_builder.py
+++ b/d123/script/builders/scene_filter_builder.py
@@ -4,7 +4,7 @@
from hydra.utils import instantiate
from omegaconf import DictConfig
-from d123.dataset.scene.scene_filter import SceneFilter
+from d123.datatypes.scene.scene_filter import SceneFilter
logger = logging.getLogger(__name__)
diff --git a/d123/script/run_preprocessing.py b/d123/script/run_preprocessing.py
index 4ed6cf97..9e77c514 100644
--- a/d123/script/run_preprocessing.py
+++ b/d123/script/run_preprocessing.py
@@ -9,7 +9,7 @@
from omegaconf import DictConfig
from d123.common.multithreading.worker_utils import worker_map
-from d123.dataset.scene.abstract_scene import AbstractScene
+from d123.datatypes.scene.abstract_scene import AbstractScene
from d123.script.builders.scene_builder_builder import build_scene_builder
from d123.script.builders.scene_filter_builder import build_scene_filter
from d123.script.run_dataset_conversion import build_worker
diff --git a/d123/script/run_simulation.py b/d123/script/run_simulation.py
index 393e4367..76ed5e50 100644
--- a/d123/script/run_simulation.py
+++ b/d123/script/run_simulation.py
@@ -10,7 +10,7 @@
from tqdm import tqdm
from d123.common.multithreading.worker_utils import worker_map
-from d123.dataset.scene.abstract_scene import AbstractScene
+from d123.datatypes.scene.abstract_scene import AbstractScene
from d123.script.builders.scene_builder_builder import build_scene_builder
from d123.script.builders.scene_filter_builder import build_scene_filter
from d123.script.run_dataset_conversion import build_worker
diff --git a/d123/simulation/agents/abstract_agents.py b/d123/simulation/agents/abstract_agents.py
index 2d7f6cde..14c0f3b3 100644
--- a/d123/simulation/agents/abstract_agents.py
+++ b/d123/simulation/agents/abstract_agents.py
@@ -2,8 +2,8 @@
from typing import List, Optional
from d123.common.datatypes.detection.detection import BoxDetection
-from d123.dataset.maps.abstract_map import AbstractMap
-from d123.dataset.scene.abstract_scene import AbstractScene
+from d123.datasets.maps.abstract_map import AbstractMap
+from d123.datasets.scene.abstract_scene import AbstractScene
class AbstractAgents:
diff --git a/d123/simulation/agents/constant_velocity_agents.py b/d123/simulation/agents/constant_velocity_agents.py
index 5201e768..57c48d01 100644
--- a/d123/simulation/agents/constant_velocity_agents.py
+++ b/d123/simulation/agents/constant_velocity_agents.py
@@ -3,8 +3,8 @@
from typing import List, Optional
from d123.common.datatypes.detection.detection import BoxDetection, BoxDetectionSE2
-from d123.dataset.maps.abstract_map import AbstractMap
-from d123.dataset.scene.abstract_scene import AbstractScene
+from d123.datasets.maps.abstract_map import AbstractMap
+from d123.datasets.scene.abstract_scene import AbstractScene
from d123.geometry.bounding_box import BoundingBoxSE2
from d123.geometry.point import Point2D
from d123.geometry.transform.tranform_2d import translate_along_yaw
diff --git a/d123/simulation/agents/idm_agents.py b/d123/simulation/agents/idm_agents.py
index ef8437cf..648a8145 100644
--- a/d123/simulation/agents/idm_agents.py
+++ b/d123/simulation/agents/idm_agents.py
@@ -7,9 +7,9 @@
from shapely.geometry import CAP_STYLE, Polygon
from d123.common.datatypes.detection.detection import BoxDetection, BoxDetectionSE2
-from d123.dataset.arrow.conversion import BoxDetectionWrapper
-from d123.dataset.maps.abstract_map import AbstractMap
-from d123.dataset.scene.abstract_scene import AbstractScene
+from d123.datasets.maps.abstract_map import AbstractMap
+from d123.datasets.scene.abstract_scene import AbstractScene
+from d123.datatypes.scene.arrow.utils.conversion import BoxDetectionWrapper
from d123.geometry.bounding_box import BoundingBoxSE2
from d123.geometry.point import Point2D
from d123.geometry.polyline import PolylineSE2
diff --git a/d123/simulation/agents/path_following.py b/d123/simulation/agents/path_following.py
index 960486d0..347f7f7e 100644
--- a/d123/simulation/agents/path_following.py
+++ b/d123/simulation/agents/path_following.py
@@ -3,8 +3,8 @@
from typing import Dict, List, Optional
from d123.common.datatypes.detection.detection import BoxDetection, BoxDetectionSE2
-from d123.dataset.maps.abstract_map import AbstractMap
-from d123.dataset.scene.abstract_scene import AbstractScene
+from d123.datasets.maps.abstract_map import AbstractMap
+from d123.datasets.scene.abstract_scene import AbstractScene
from d123.geometry.bounding_box import BoundingBoxSE2
from d123.geometry.point import Point2D
from d123.geometry.polyline import PolylineSE2
diff --git a/d123/simulation/agents/smart_agents.py b/d123/simulation/agents/smart_agents.py
index 9d2e2140..4ec342b3 100644
--- a/d123/simulation/agents/smart_agents.py
+++ b/d123/simulation/agents/smart_agents.py
@@ -6,9 +6,9 @@
from torch_geometric.data import HeteroData
from d123.common.datatypes.detection.detection import BoxDetection, BoxDetectionSE2
-from d123.dataset.arrow.conversion import BoxDetectionWrapper, DetectionType
-from d123.dataset.maps.abstract_map import AbstractMap
-from d123.dataset.scene.abstract_scene import AbstractScene
+from d123.datasets.maps.abstract_map import AbstractMap
+from d123.datasets.scene.abstract_scene import AbstractScene
+from d123.datatypes.scene.arrow.utils.conversion import BoxDetectionWrapper, DetectionType
from d123.geometry.bounding_box import BoundingBoxSE2
from d123.geometry.se import StateSE2
from d123.geometry.transform.transform_se2 import convert_relative_to_absolute_point_2d_array
diff --git a/d123/simulation/controller/abstract_controller.py b/d123/simulation/controller/abstract_controller.py
index fc7f4a4f..eaa5aed5 100644
--- a/d123/simulation/controller/abstract_controller.py
+++ b/d123/simulation/controller/abstract_controller.py
@@ -1,7 +1,7 @@
import abc
from d123.common.datatypes.vehicle_state.ego_state import EgoStateSE2
-from d123.dataset.scene.abstract_scene import AbstractScene
+from d123.datasets.scene.abstract_scene import AbstractScene
from d123.simulation.planning.planner_output.abstract_planner_output import AbstractPlannerOutput
from d123.simulation.time_controller.simulation_iteration import SimulationIteration
diff --git a/d123/simulation/controller/action_controller.py b/d123/simulation/controller/action_controller.py
index 65199590..5e924c78 100644
--- a/d123/simulation/controller/action_controller.py
+++ b/d123/simulation/controller/action_controller.py
@@ -1,7 +1,7 @@
from typing import Optional
from d123.common.datatypes.vehicle_state.ego_state import EgoStateSE2
-from d123.dataset.scene.abstract_scene import AbstractScene
+from d123.datasets.scene.abstract_scene import AbstractScene
from d123.simulation.controller.abstract_controller import AbstractEgoController
from d123.simulation.controller.motion_model.abstract_motion_model import AbstractMotionModel
from d123.simulation.planning.planner_output.abstract_planner_output import AbstractPlannerOutput
diff --git a/d123/simulation/gym/demo_gym_env.py b/d123/simulation/gym/demo_gym_env.py
index 54051c31..88eba93f 100644
--- a/d123/simulation/gym/demo_gym_env.py
+++ b/d123/simulation/gym/demo_gym_env.py
@@ -9,9 +9,9 @@
from nuplan.planning.simulation.controller.motion_model.kinematic_bicycle import KinematicBicycleModel
from d123.common.datatypes.recording.detection_recording import DetectionRecording
-from d123.dataset.arrow.conversion import EgoStateSE3
-from d123.dataset.maps.abstract_map import AbstractMap
-from d123.dataset.scene.abstract_scene import AbstractScene
+from d123.datasets.maps.abstract_map import AbstractMap
+from d123.datasets.scene.abstract_scene import AbstractScene
+from d123.datatypes.scene.arrow.utils.conversion import EgoStateSE3
from d123.simulation.observation.abstract_observation import AbstractObservation
from d123.simulation.observation.agents_observation import AgentsObservation
diff --git a/d123/simulation/gym/environment/helper/environment_cache.py b/d123/simulation/gym/environment/helper/environment_cache.py
index 4ab65f6a..00672828 100644
--- a/d123/simulation/gym/environment/helper/environment_cache.py
+++ b/d123/simulation/gym/environment/helper/environment_cache.py
@@ -14,8 +14,8 @@
from d123.common.datatypes.detection.detection_types import DetectionType
from d123.common.datatypes.recording.detection_recording import DetectionRecording
from d123.common.datatypes.vehicle_state.ego_state import EgoStateSE2
-from d123.dataset.maps.abstract_map import AbstractMap
-from d123.dataset.maps.abstract_map_objects import (
+from d123.datasets.maps.abstract_map import AbstractMap
+from d123.datasets.maps.abstract_map_objects import (
AbstractCarpark,
AbstractCrosswalk,
AbstractIntersection,
@@ -23,7 +23,7 @@
AbstractLaneGroup,
AbstractStopLine,
)
-from d123.dataset.maps.map_datatypes import MapLayer
+from d123.datasets.maps.map_datatypes import MapLayer
from d123.geometry.occupancy_map import OccupancyMap2D
from d123.geometry.se import StateSE2
from d123.simulation.gym.environment.helper.environment_area import AbstractEnvironmentArea
diff --git a/d123/simulation/gym/gym_env.py b/d123/simulation/gym/gym_env.py
index 96672d16..b4953dc3 100644
--- a/d123/simulation/gym/gym_env.py
+++ b/d123/simulation/gym/gym_env.py
@@ -5,8 +5,8 @@
from d123.common.datatypes.recording.detection_recording import DetectionRecording
from d123.common.datatypes.vehicle_state.ego_state import DynamicStateSE2, EgoStateSE2
-from d123.dataset.maps.abstract_map import AbstractMap
-from d123.dataset.scene.abstract_scene import AbstractScene
+from d123.datasets.maps.abstract_map import AbstractMap
+from d123.datasets.scene.abstract_scene import AbstractScene
from d123.geometry.vector import Vector2D
from d123.simulation.controller.motion_model.kinematic_bicycle_model import KinematicBicycleModel
from d123.simulation.observation.abstract_observation import AbstractObservation
diff --git a/d123/simulation/history/simulation_history.py b/d123/simulation/history/simulation_history.py
index d40b7dc6..b5488780 100644
--- a/d123/simulation/history/simulation_history.py
+++ b/d123/simulation/history/simulation_history.py
@@ -5,7 +5,7 @@
from d123.common.datatypes.recording.detection_recording import DetectionRecording
from d123.common.datatypes.vehicle_state.ego_state import EgoStateSE2
-from d123.dataset.scene.abstract_scene import AbstractScene
+from d123.datasets.scene.abstract_scene import AbstractScene
from d123.simulation.planning.planner_output.abstract_planner_output import AbstractPlannerOutput
from d123.simulation.time_controller.simulation_iteration import SimulationIteration
diff --git a/d123/simulation/history/simulation_history_buffer.py b/d123/simulation/history/simulation_history_buffer.py
index 47a17ac5..21bfdf7d 100644
--- a/d123/simulation/history/simulation_history_buffer.py
+++ b/d123/simulation/history/simulation_history_buffer.py
@@ -6,7 +6,7 @@
from d123.common.datatypes.recording.abstract_recording import Recording
from d123.common.datatypes.recording.detection_recording import DetectionRecording
from d123.common.datatypes.vehicle_state.ego_state import EgoStateSE2
-from d123.dataset.scene.abstract_scene import AbstractScene
+from d123.datasets.scene.abstract_scene import AbstractScene
class Simulation2DHistoryBuffer:
diff --git a/d123/simulation/metrics/sim_agents/interaction_based.py b/d123/simulation/metrics/sim_agents/interaction_based.py
index 7cbd9b92..992c837b 100644
--- a/d123/simulation/metrics/sim_agents/interaction_based.py
+++ b/d123/simulation/metrics/sim_agents/interaction_based.py
@@ -3,7 +3,7 @@
import numpy as np
import numpy.typing as npt
-from d123.dataset.arrow.conversion import BoxDetectionWrapper
+from d123.datatypes.scene.arrow.utils.conversion import BoxDetectionWrapper
from d123.geometry.geometry_index import BoundingBoxSE2Index
from d123.geometry.utils.bounding_box_utils import bbse2_array_to_polygon_array
diff --git a/d123/simulation/metrics/sim_agents/map_based.py b/d123/simulation/metrics/sim_agents/map_based.py
index 134a3c5e..d204eeb3 100644
--- a/d123/simulation/metrics/sim_agents/map_based.py
+++ b/d123/simulation/metrics/sim_agents/map_based.py
@@ -4,9 +4,9 @@
import numpy.typing as npt
import shapely
-from d123.dataset.maps.abstract_map import AbstractMap
-from d123.dataset.maps.abstract_map_objects import AbstractLane
-from d123.dataset.maps.map_datatypes import MapLayer
+from d123.datasets.maps.abstract_map import AbstractMap
+from d123.datasets.maps.abstract_map_objects import AbstractLane
+from d123.datasets.maps.map_datatypes import MapLayer
from d123.geometry.geometry_index import BoundingBoxSE2Index, Corners2DIndex, StateSE2Index
from d123.geometry.se import StateSE2
from d123.geometry.utils.bounding_box_utils import bbse2_array_to_corners_array
diff --git a/d123/simulation/metrics/sim_agents/sim_agents.py b/d123/simulation/metrics/sim_agents/sim_agents.py
index 36033e70..225e6a80 100644
--- a/d123/simulation/metrics/sim_agents/sim_agents.py
+++ b/d123/simulation/metrics/sim_agents/sim_agents.py
@@ -5,8 +5,8 @@
import numpy.typing as npt
from d123.common.datatypes.detection.detection import BoxDetection, BoxDetectionWrapper, DetectionType
-from d123.dataset.maps.abstract_map import AbstractMap
-from d123.dataset.scene.abstract_scene import AbstractScene
+from d123.datasets.maps.abstract_map import AbstractMap
+from d123.datasets.scene.abstract_scene import AbstractScene
from d123.geometry.geometry_index import BoundingBoxSE2Index
from d123.simulation.metrics.sim_agents.histogram_metric import (
BinaryHistogramIntersectionMetric,
diff --git a/d123/simulation/metrics/sim_agents/utils.py b/d123/simulation/metrics/sim_agents/utils.py
index c9d2bb3b..9151ffd6 100644
--- a/d123/simulation/metrics/sim_agents/utils.py
+++ b/d123/simulation/metrics/sim_agents/utils.py
@@ -4,7 +4,7 @@
import numpy.typing as npt
from d123.common.datatypes.detection.detection import BoxDetectionWrapper
-from d123.dataset.scene.abstract_scene import AbstractScene
+from d123.datasets.scene.abstract_scene import AbstractScene
from d123.geometry.geometry_index import BoundingBoxSE2Index
diff --git a/d123/simulation/observation/abstract_observation.py b/d123/simulation/observation/abstract_observation.py
index dfba70af..ae8f0293 100644
--- a/d123/simulation/observation/abstract_observation.py
+++ b/d123/simulation/observation/abstract_observation.py
@@ -5,7 +5,7 @@
from d123.common.datatypes.recording.abstract_recording import Recording
from d123.common.datatypes.recording.detection_recording import DetectionRecording
from d123.common.datatypes.vehicle_state.ego_state import EgoStateSE2
-from d123.dataset.scene.abstract_scene import AbstractScene
+from d123.datasets.scene.abstract_scene import AbstractScene
from d123.simulation.time_controller.simulation_iteration import SimulationIteration
diff --git a/d123/simulation/observation/agents_observation.py b/d123/simulation/observation/agents_observation.py
index a31d6d67..be31ce5f 100644
--- a/d123/simulation/observation/agents_observation.py
+++ b/d123/simulation/observation/agents_observation.py
@@ -5,8 +5,8 @@
from d123.common.datatypes.recording.abstract_recording import Recording
from d123.common.datatypes.recording.detection_recording import DetectionRecording
from d123.common.datatypes.vehicle_state.ego_state import EgoStateSE2
-from d123.dataset.arrow.conversion import BoxDetectionWrapper
-from d123.dataset.scene.abstract_scene import AbstractScene
+from d123.datasets.scene.abstract_scene import AbstractScene
+from d123.datatypes.scene.arrow.utils.conversion import BoxDetectionWrapper
from d123.simulation.agents.abstract_agents import AbstractAgents
# from d123.simulation.agents.path_following import PathFollowingAgents
diff --git a/d123/simulation/observation/log_replay_observation.py b/d123/simulation/observation/log_replay_observation.py
index 6e0b6b85..0a986ca3 100644
--- a/d123/simulation/observation/log_replay_observation.py
+++ b/d123/simulation/observation/log_replay_observation.py
@@ -3,7 +3,7 @@
from d123.common.datatypes.recording.abstract_recording import Recording
from d123.common.datatypes.recording.detection_recording import DetectionRecording
from d123.common.datatypes.vehicle_state.ego_state import EgoStateSE2
-from d123.dataset.scene.abstract_scene import AbstractScene
+from d123.datasets.scene.abstract_scene import AbstractScene
from d123.simulation.observation.abstract_observation import AbstractObservation
from d123.simulation.time_controller.simulation_iteration import SimulationIteration
diff --git a/d123/simulation/planning/abstract_planner.py b/d123/simulation/planning/abstract_planner.py
index 1746d857..3a0f486d 100644
--- a/d123/simulation/planning/abstract_planner.py
+++ b/d123/simulation/planning/abstract_planner.py
@@ -4,7 +4,7 @@
from dataclasses import dataclass
from typing import List
-from d123.dataset.maps.abstract_map import AbstractMap
+from d123.datasets.maps.abstract_map import AbstractMap
from d123.simulation.history.simulation_history_buffer import Simulation2DHistoryBuffer
from d123.simulation.time_controller.simulation_iteration import SimulationIteration
diff --git a/d123/simulation/simulation_2d.py b/d123/simulation/simulation_2d.py
index 0be93c05..8a4a517a 100644
--- a/d123/simulation/simulation_2d.py
+++ b/d123/simulation/simulation_2d.py
@@ -3,7 +3,7 @@
import logging
from typing import Any, Optional, Tuple, Type
-from d123.dataset.scene.abstract_scene import AbstractScene
+from d123.datasets.scene.abstract_scene import AbstractScene
from d123.simulation.callback.abstract_callback import AbstractCallback
from d123.simulation.callback.multi_callback import MultiCallback
from d123.simulation.history.simulation_history import Simulation2DHistory, Simulation2DHistorySample
diff --git a/d123/simulation/time_controller/abstract_time_controller.py b/d123/simulation/time_controller/abstract_time_controller.py
index 788acfcb..1da25641 100644
--- a/d123/simulation/time_controller/abstract_time_controller.py
+++ b/d123/simulation/time_controller/abstract_time_controller.py
@@ -3,7 +3,7 @@
import abc
from typing import Tuple
-from d123.dataset.scene.abstract_scene import AbstractScene
+from d123.datasets.scene.abstract_scene import AbstractScene
from d123.simulation.time_controller.simulation_iteration import SimulationIteration
diff --git a/d123/simulation/time_controller/log_time_controller.py b/d123/simulation/time_controller/log_time_controller.py
index b358a24c..363e43e7 100644
--- a/d123/simulation/time_controller/log_time_controller.py
+++ b/d123/simulation/time_controller/log_time_controller.py
@@ -1,6 +1,6 @@
from typing import Optional, Tuple
-from d123.dataset.scene.abstract_scene import AbstractScene
+from d123.datasets.scene.abstract_scene import AbstractScene
from d123.simulation.time_controller.abstract_time_controller import (
AbstractTimeController,
)
diff --git a/d123/training/feature_builder/smart_feature_builder.py b/d123/training/feature_builder/smart_feature_builder.py
index 9ea931bf..439a4830 100644
--- a/d123/training/feature_builder/smart_feature_builder.py
+++ b/d123/training/feature_builder/smart_feature_builder.py
@@ -8,14 +8,14 @@
from d123.common.datatypes.detection.detection import BoxDetection, BoxDetectionWrapper
from d123.common.datatypes.detection.detection_types import DetectionType
from d123.common.visualization.color.default import TrafficLightStatus
-from d123.dataset.maps.abstract_map import MapLayer
-from d123.dataset.maps.abstract_map_objects import (
+from d123.datasets.maps.abstract_map import MapLayer
+from d123.datasets.maps.abstract_map_objects import (
AbstractCarpark,
AbstractCrosswalk,
AbstractGenericDrivable,
AbstractLaneGroup,
)
-from d123.dataset.scene.abstract_scene import AbstractScene
+from d123.datasets.scene.abstract_scene import AbstractScene
from d123.geometry import BoundingBoxSE2, PolylineSE2, StateSE2
from d123.geometry.geometry_index import StateSE2Index
from d123.geometry.transform.transform_se2 import convert_absolute_to_relative_se2_array
From 9ece564ecc780e24baf365fc567879caf3623f2d Mon Sep 17 00:00:00 2001
From: Daniel Dauner
Date: Mon, 22 Sep 2025 20:34:57 +0200
Subject: [PATCH 042/145] Few minor fixes and comments
---
.../common/visualization/matplotlib/camera.py | 8 ++---
d123/common/visualization/viser/server.py | 6 ++--
d123/common/visualization/viser/utils.py | 17 ++++------
d123/common/visualization/viser/utils_v2.py | 34 +++----------------
d123/datatypes/detections/detection.py | 18 +++++-----
5 files changed, 28 insertions(+), 55 deletions(-)
diff --git a/d123/common/visualization/matplotlib/camera.py b/d123/common/visualization/matplotlib/camera.py
index 6bdee5f6..bc33d0dd 100644
--- a/d123/common/visualization/matplotlib/camera.py
+++ b/d123/common/visualization/matplotlib/camera.py
@@ -10,11 +10,11 @@
# from PIL import ImageColor
from pyquaternion import Quaternion
-from d123.common.datatypes.detection.detection import BoxDetectionSE3, BoxDetectionWrapper
-from d123.common.datatypes.detection.detection_types import DetectionType
-from d123.common.datatypes.sensor.camera import Camera
-from d123.common.datatypes.vehicle_state.ego_state import EgoStateSE3
from d123.common.visualization.color.default import BOX_DETECTION_CONFIG
+from d123.datatypes.detections.detection import BoxDetectionSE3, BoxDetectionWrapper
+from d123.datatypes.detections.detection_types import DetectionType
+from d123.datatypes.sensors.camera import Camera
+from d123.datatypes.vehicle_state.ego_state import EgoStateSE3
from d123.geometry import BoundingBoxSE3Index, Corners3DIndex
from d123.geometry.transform.transform_euler_se3 import convert_absolute_to_relative_euler_se3_array
diff --git a/d123/common/visualization/viser/server.py b/d123/common/visualization/viser/server.py
index ed5f7a17..6b44ac15 100644
--- a/d123/common/visualization/viser/server.py
+++ b/d123/common/visualization/viser/server.py
@@ -5,8 +5,6 @@
import trimesh
import viser
-from d123.common.datatypes.sensor.camera import CameraType
-from d123.common.datatypes.sensor.lidar import LiDARType
from d123.common.visualization.viser.utils import (
get_bounding_box_meshes,
get_camera_if_available,
@@ -15,7 +13,9 @@
get_map_meshes,
)
from d123.common.visualization.viser.utils_v2 import get_bounding_box_outlines
-from d123.datasets.scene.abstract_scene import AbstractScene
+from d123.datatypes.scene.abstract_scene import AbstractScene
+from d123.datatypes.sensors.camera import CameraType
+from d123.datatypes.sensors.lidar import LiDARType
# TODO: Try to fix performance issues.
# TODO: Refactor this file.
diff --git a/d123/common/visualization/viser/utils.py b/d123/common/visualization/viser/utils.py
index af209718..8c6236ce 100644
--- a/d123/common/visualization/viser/utils.py
+++ b/d123/common/visualization/viser/utils.py
@@ -1,20 +1,18 @@
-from typing import List, Optional, Tuple
+from typing import Final, List, Optional, Tuple
import numpy as np
import numpy.typing as npt
import trimesh
-from pyquaternion import Quaternion
-from typing_extensions import Final
+from pyquaternion import Quaternion # TODO: remove
-# from d123.common.datatypes.sensor.camera_parameters import get_nuplan_camera_parameters
-from d123.common.datatypes.sensor.camera import Camera, CameraType
-from d123.common.datatypes.sensor.lidar import LiDARType
from d123.common.visualization.color.color import TAB_10, Color
from d123.common.visualization.color.config import PlotConfig
from d123.common.visualization.color.default import BOX_DETECTION_CONFIG, EGO_VEHICLE_CONFIG, MAP_SURFACE_CONFIG
-from d123.datasets.maps.abstract_map import MapLayer
-from d123.datasets.maps.abstract_map_objects import AbstractLane, AbstractSurfaceMapObject
-from d123.datasets.scene.abstract_scene import AbstractScene
+from d123.datatypes.maps.abstract_map import MapLayer
+from d123.datatypes.maps.abstract_map_objects import AbstractLane, AbstractSurfaceMapObject
+from d123.datatypes.scene.abstract_scene import AbstractScene
+from d123.datatypes.sensors.camera import Camera, CameraType
+from d123.datatypes.sensors.lidar import LiDARType
from d123.geometry import BoundingBoxSE3, EulerStateSE3, Point3D, Polyline3D
from d123.geometry.transform.transform_euler_se3 import convert_relative_to_absolute_points_3d_array
@@ -103,7 +101,6 @@ def get_map_meshes(scene: AbstractScene):
]
map_objects_dict = scene.map_api.get_proximal_map_objects(center.point_2d, radius=MAP_RADIUS, layers=map_layers)
- print(map_objects_dict.keys())
output = {}
for map_layer in map_objects_dict.keys():
diff --git a/d123/common/visualization/viser/utils_v2.py b/d123/common/visualization/viser/utils_v2.py
index 9b4fd8d8..16a1a15b 100644
--- a/d123/common/visualization/viser/utils_v2.py
+++ b/d123/common/visualization/viser/utils_v2.py
@@ -3,43 +3,18 @@
from d123.common.visualization.color.default import BOX_DETECTION_CONFIG, EGO_VEHICLE_CONFIG
from d123.common.visualization.viser.utils import BRIGHTNESS_FACTOR
-from d123.datasets.scene.abstract_scene import AbstractScene
-
-# from d123.common.datatypes.sensor.camera_parameters import get_nuplan_camera_parameters
-from d123.geometry import BoundingBoxSE3, Corners3DIndex, Point3D, Point3DIndex, Vector3D
-from d123.geometry.transform.transform_euler_se3 import translate_euler_se3_along_body_frame
+from d123.datatypes.scene.abstract_scene import AbstractScene
+from d123.geometry import BoundingBoxSE3, Corners3DIndex, Point3D, Point3DIndex
# TODO: Refactor this file.
# TODO: Add general utilities for 3D primitives and mesh support.
-def _get_bounding_box_corners(bounding_box: BoundingBoxSE3) -> npt.NDArray[np.float64]:
- """
- Get the vertices of a bounding box in 3D space.
- """
- corner_extent_factors = {
- Corners3DIndex.FRONT_LEFT_BOTTOM: Vector3D(+0.5, -0.5, -0.5),
- Corners3DIndex.FRONT_RIGHT_BOTTOM: Vector3D(+0.5, +0.5, -0.5),
- Corners3DIndex.BACK_RIGHT_BOTTOM: Vector3D(-0.5, +0.5, -0.5),
- Corners3DIndex.BACK_LEFT_BOTTOM: Vector3D(-0.5, -0.5, -0.5),
- Corners3DIndex.FRONT_LEFT_TOP: Vector3D(+0.5, -0.5, +0.5),
- Corners3DIndex.FRONT_RIGHT_TOP: Vector3D(+0.5, +0.5, +0.5),
- Corners3DIndex.BACK_RIGHT_TOP: Vector3D(-0.5, +0.5, +0.5),
- Corners3DIndex.BACK_LEFT_TOP: Vector3D(-0.5, -0.5, +0.5),
- }
- corners = np.zeros((len(Corners3DIndex), len(Point3DIndex)), dtype=np.float64)
- bounding_box_extent = np.array([bounding_box.length, bounding_box.width, bounding_box.height], dtype=np.float64)
- for idx, vec in corner_extent_factors.items():
- vector_3d = Vector3D.from_array(bounding_box_extent * vec.array)
- corners[idx] = translate_euler_se3_along_body_frame(bounding_box.center, vector_3d).point_3d.array
- return corners
-
-
def _get_bounding_box_lines(bounding_box: BoundingBoxSE3) -> npt.NDArray[np.float64]:
"""
- Get the edges of a bounding box in 3D space as a Polyline3D.
+ TODO: Vectorize this function and move to geometry module.
"""
- corners = _get_bounding_box_corners(bounding_box)
+ corners = bounding_box.corners_array
index_pairs = [
(Corners3DIndex.FRONT_LEFT_BOTTOM, Corners3DIndex.FRONT_RIGHT_BOTTOM),
(Corners3DIndex.FRONT_RIGHT_BOTTOM, Corners3DIndex.BACK_RIGHT_BOTTOM),
@@ -62,6 +37,7 @@ def _get_bounding_box_lines(bounding_box: BoundingBoxSE3) -> npt.NDArray[np.floa
def translate_points_3d(points_3d: npt.NDArray[np.float64], point_3d: Point3D) -> npt.NDArray[np.float64]:
+ # TODO: remove
return points_3d - point_3d.array
diff --git a/d123/datatypes/detections/detection.py b/d123/datatypes/detections/detection.py
index bf855679..36747b59 100644
--- a/d123/datatypes/detections/detection.py
+++ b/d123/datatypes/detections/detection.py
@@ -1,13 +1,13 @@
from dataclasses import dataclass
from functools import cached_property
-from typing import Iterable
+from typing import Iterable, Optional, Union
import shapely
from d123.common.utils.enums import SerialIntEnum
from d123.datatypes.detections.detection_types import DetectionType
from d123.datatypes.time.time_point import TimePoint
-from d123.geometry import BoundingBoxSE2, BoundingBoxSE3, EulerStateSE3, OccupancyMap2D, StateSE2, Vector2D, Vector3D
+from d123.geometry import BoundingBoxSE2, BoundingBoxSE3, OccupancyMap2D, StateSE2, StateSE3, Vector2D, Vector3D
@dataclass
@@ -16,7 +16,7 @@ class BoxDetectionMetadata:
detection_type: DetectionType
timepoint: TimePoint
track_token: str
- confidence: float | None = None
+ confidence: Optional[float] = None
@dataclass
@@ -44,18 +44,18 @@ class BoxDetectionSE3:
metadata: BoxDetectionMetadata
bounding_box_se3: BoundingBoxSE3
- velocity: Vector3D | None = None
+ velocity: Optional[Vector3D] = None
@property
def shapely_polygon(self) -> shapely.geometry.Polygon:
return self.bounding_box_se3.shapely_polygon
@property
- def center(self) -> EulerStateSE3:
+ def center(self) -> StateSE3:
return self.bounding_box_se3.center
@property
- def center_se3(self) -> EulerStateSE3:
+ def center_se3(self) -> StateSE3:
return self.bounding_box_se3.center_se3
@property
@@ -75,7 +75,7 @@ def box_detection_se2(self) -> BoxDetectionSE2:
)
-BoxDetection = BoxDetectionSE2 | BoxDetectionSE3
+BoxDetection = Union[BoxDetectionSE2, BoxDetectionSE3]
@dataclass
@@ -143,8 +143,8 @@ def __len__(self) -> int:
def __iter__(self):
return iter(self.traffic_light_detections)
- def get_detection_by_lane_id(self, lane_id: int) -> TrafficLightDetection | None:
- traffic_light_detection: TrafficLightDetection | None = None
+ def get_detection_by_lane_id(self, lane_id: int) -> Optional[TrafficLightDetection]:
+ traffic_light_detection: Optional[TrafficLightDetection] = None
for detection in self.traffic_light_detections:
if int(detection.lane_id) == int(lane_id):
traffic_light_detection = detection
From 3d4f9113418daf24c7562a7fbfbe39696557a9ed Mon Sep 17 00:00:00 2001
From: Daniel Dauner
Date: Tue, 23 Sep 2025 21:42:44 +0200
Subject: [PATCH 043/145] Minor refactors and adding quaternion based SE3 in
more datatypes (#43)
---
.pre-commit-config.yaml | 1 +
d123/datatypes/detections/detection.py | 4 +-
d123/datatypes/vehicle_state/ego_state.py | 59 +++++++++++--------
.../vehicle_state/vehicle_parameters.py | 12 ++--
d123/geometry/__init__.py | 12 ++--
d123/geometry/geometry_index.py | 10 +++-
d123/geometry/se.py | 45 +++++++++++---
d123/geometry/utils/rotation_utils.py | 1 +
8 files changed, 96 insertions(+), 48 deletions(-)
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index 251ae7fb..cfcbd787 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -24,6 +24,7 @@ repos:
- id: isort
name: isort (python)
args: ["--profile", "black", "--filter-files", '--line-length', '120']
+ exclude: __init__.py$
- repo: https://github.com/ambv/black
rev: 25.1.0
hooks:
diff --git a/d123/datatypes/detections/detection.py b/d123/datatypes/detections/detection.py
index 36747b59..29da95d7 100644
--- a/d123/datatypes/detections/detection.py
+++ b/d123/datatypes/detections/detection.py
@@ -1,6 +1,6 @@
from dataclasses import dataclass
from functools import cached_property
-from typing import Iterable, Optional, Union
+from typing import Iterable, List, Optional, Union
import shapely
@@ -92,7 +92,7 @@ def __len__(self) -> int:
def __iter__(self):
return iter(self.box_detections)
- def get_box_detections_by_types(self, detection_types: Iterable[DetectionType]) -> list[BoxDetection]:
+ def get_box_detections_by_types(self, detection_types: Iterable[DetectionType]) -> List[BoxDetection]:
return [detection for detection in self.box_detections if detection.metadata.detection_type in detection_types]
def get_detection_by_track_token(self, track_token: str) -> BoxDetection | None:
diff --git a/d123/datatypes/vehicle_state/ego_state.py b/d123/datatypes/vehicle_state/ego_state.py
index 2198cc6b..cc1aab7d 100644
--- a/d123/datatypes/vehicle_state/ego_state.py
+++ b/d123/datatypes/vehicle_state/ego_state.py
@@ -19,43 +19,51 @@
rear_axle_se2_to_center_se2,
rear_axle_se3_to_center_se3,
)
-from d123.geometry import BoundingBoxSE2, BoundingBoxSE3, EulerStateSE3, StateSE2, Vector2D, Vector3D
-
-# TODO: Find an appropriate way to handle SE2 and SE3 states.
+from d123.geometry import BoundingBoxSE2, BoundingBoxSE3, StateSE2, StateSE3, Vector2D, Vector3D
EGO_TRACK_TOKEN: Final[str] = "ego_vehicle"
class EgoStateSE3Index(IntEnum):
+
X = 0
Y = 1
Z = 2
- ROLL = 3
- PITCH = 4
- YAW = 5
- VELOCITY_X = 6
- VELOCITY_Y = 7
- VELOCITY_Z = 8
- ACCELERATION_X = 9
- ACCELERATION_Y = 10
- ACCELERATION_Z = 11
- ANGULAR_VELOCITY_X = 12
- ANGULAR_VELOCITY_Y = 13
- ANGULAR_VELOCITY_Z = 14
+ QW = 3
+ QX = 4
+ QY = 5
+ QZ = 6
+ VELOCITY_X = 7
+ VELOCITY_Y = 8
+ VELOCITY_Z = 9
+ ACCELERATION_X = 10
+ ACCELERATION_Y = 11
+ ACCELERATION_Z = 12
+ ANGULAR_VELOCITY_X = 13
+ ANGULAR_VELOCITY_Y = 14
+ ANGULAR_VELOCITY_Z = 15
@classproperty
- def SE3(cls) -> slice:
- return slice(cls.X, cls.YAW + 1)
+ def STATE_SE3(cls) -> slice:
+ return slice(cls.X, cls.QZ + 1)
@classproperty
def DYNAMIC_VEHICLE_STATE(cls) -> slice:
return slice(cls.VELOCITY_X, cls.ANGULAR_VELOCITY_Z + 1)
+ @classproperty
+ def SCALAR(cls) -> slice:
+ return slice(cls.QW, cls.QW + 1)
+
+ @classproperty
+ def VECTOR(cls) -> slice:
+ return slice(cls.QX, cls.QZ + 1)
+
@dataclass
class EgoStateSE3:
- center_se3: EulerStateSE3
+ center_se3: StateSE3
dynamic_state_se3: DynamicStateSE3
vehicle_parameters: VehicleParameters
timepoint: Optional[TimePoint] = None
@@ -68,14 +76,14 @@ def from_array(
vehicle_parameters: VehicleParameters,
timepoint: Optional[TimePoint] = None,
) -> EgoStateSE3:
- state_se3 = EulerStateSE3.from_array(array[EgoStateSE3Index.SE3])
+ state_se3 = StateSE3.from_array(array[EgoStateSE3Index.STATE_SE3])
dynamic_state = DynamicStateSE3.from_array(array[EgoStateSE3Index.DYNAMIC_VEHICLE_STATE])
return EgoStateSE3(state_se3, dynamic_state, vehicle_parameters, timepoint)
@classmethod
def from_rear_axle(
cls,
- rear_axle_se3: EulerStateSE3,
+ rear_axle_se3: StateSE3,
dynamic_state_se3: DynamicStateSE3,
vehicle_parameters: VehicleParameters,
time_point: TimePoint,
@@ -96,7 +104,7 @@ def array(self) -> npt.NDArray[np.float64]:
Convert the EgoVehicleState to an array.
:return: An array containing the bounding box and dynamic state information.
"""
- assert isinstance(self.center_se3, EulerStateSE3)
+ assert isinstance(self.center_se3, StateSE3)
assert isinstance(self.dynamic_state_se3, DynamicStateSE3)
center_array = self.center_se3.array
@@ -105,11 +113,11 @@ def array(self) -> npt.NDArray[np.float64]:
return np.concatenate((center_array, dynamic_array), axis=0)
@property
- def center(self) -> EulerStateSE3:
+ def center(self) -> StateSE3:
return self.center_se3
@property
- def rear_axle_se3(self) -> EulerStateSE3:
+ def rear_axle_se3(self) -> StateSE3:
return center_se3_to_rear_axle_se3(center_se3=self.center_se3, vehicle_parameters=self.vehicle_parameters)
@property
@@ -117,7 +125,7 @@ def rear_axle_se2(self) -> StateSE2:
return self.rear_axle_se3.state_se2
@property
- def rear_axle(self) -> EulerStateSE3:
+ def rear_axle(self) -> StateSE3:
return self.rear_axle_se3
@cached_property
@@ -265,6 +273,8 @@ def ANGULAR_VELOCITY(cls) -> slice:
@dataclass
class DynamicStateSE3:
+ # TODO: Make class array like
+
velocity: Vector3D
acceleration: Vector3D
angular_velocity: Vector3D
@@ -322,6 +332,7 @@ def dynamic_state_se2(self) -> DynamicStateSE2:
@dataclass
class DynamicStateSE2:
+
velocity: Vector2D
acceleration: Vector2D
angular_velocity: float
diff --git a/d123/datatypes/vehicle_state/vehicle_parameters.py b/d123/datatypes/vehicle_state/vehicle_parameters.py
index cd9aeb1c..0d7f3d01 100644
--- a/d123/datatypes/vehicle_state/vehicle_parameters.py
+++ b/d123/datatypes/vehicle_state/vehicle_parameters.py
@@ -1,8 +1,8 @@
from dataclasses import dataclass
-from d123.geometry import EulerStateSE3, StateSE2, Vector2D, Vector3D
-from d123.geometry.transform.transform_euler_se3 import translate_euler_se3_along_body_frame
+from d123.geometry import StateSE2, StateSE3, Vector2D, Vector3D
from d123.geometry.transform.transform_se2 import translate_se2_along_body_frame
+from d123.geometry.transform.transform_se3 import translate_se3_along_body_frame
@dataclass
@@ -74,14 +74,14 @@ def get_av2_ford_fusion_hybrid_parameters() -> VehicleParameters:
)
-def center_se3_to_rear_axle_se3(center_se3: EulerStateSE3, vehicle_parameters: VehicleParameters) -> EulerStateSE3:
+def center_se3_to_rear_axle_se3(center_se3: StateSE3, vehicle_parameters: VehicleParameters) -> StateSE3:
"""
Converts a center state to a rear axle state.
:param center_se3: The center state.
:param vehicle_parameters: The vehicle parameters.
:return: The rear axle state.
"""
- return translate_euler_se3_along_body_frame(
+ return translate_se3_along_body_frame(
center_se3,
Vector3D(
-vehicle_parameters.rear_axle_to_center_longitudinal,
@@ -91,14 +91,14 @@ def center_se3_to_rear_axle_se3(center_se3: EulerStateSE3, vehicle_parameters: V
)
-def rear_axle_se3_to_center_se3(rear_axle_se3: EulerStateSE3, vehicle_parameters: VehicleParameters) -> EulerStateSE3:
+def rear_axle_se3_to_center_se3(rear_axle_se3: StateSE3, vehicle_parameters: VehicleParameters) -> StateSE3:
"""
Converts a rear axle state to a center state.
:param rear_axle_se3: The rear axle state.
:param vehicle_parameters: The vehicle parameters.
:return: The center state.
"""
- return translate_euler_se3_along_body_frame(
+ return translate_se3_along_body_frame(
rear_axle_se3,
Vector3D(
vehicle_parameters.rear_axle_to_center_longitudinal,
diff --git a/d123/geometry/__init__.py b/d123/geometry/__init__.py
index 678f2cdf..e022c86a 100644
--- a/d123/geometry/__init__.py
+++ b/d123/geometry/__init__.py
@@ -1,22 +1,22 @@
-from d123.geometry.bounding_box import BoundingBoxSE2, BoundingBoxSE3
from d123.geometry.geometry_index import (
+ Point2DIndex,
+ Point3DIndex,
BoundingBoxSE2Index,
BoundingBoxSE3Index,
Corners2DIndex,
Corners3DIndex,
EulerAnglesIndex,
EulerStateSE3Index,
- Point2DIndex,
- Point3DIndex,
QuaternionIndex,
StateSE2Index,
StateSE3Index,
Vector2DIndex,
Vector3DIndex,
)
-from d123.geometry.occupancy_map import OccupancyMap2D
from d123.geometry.point import Point2D, Point3D
-from d123.geometry.polyline import Polyline2D, Polyline3D, PolylineSE2
+from d123.geometry.vector import Vector2D, Vector3D
from d123.geometry.rotation import EulerAngles, Quaternion
from d123.geometry.se import EulerStateSE3, StateSE2, StateSE3
-from d123.geometry.vector import Vector2D, Vector3D
+from d123.geometry.bounding_box import BoundingBoxSE2, BoundingBoxSE3
+from d123.geometry.polyline import Polyline2D, Polyline3D, PolylineSE2
+from d123.geometry.occupancy_map import OccupancyMap2D
diff --git a/d123/geometry/geometry_index.py b/d123/geometry/geometry_index.py
index 1da7c945..aa42cc4b 100644
--- a/d123/geometry/geometry_index.py
+++ b/d123/geometry/geometry_index.py
@@ -157,7 +157,7 @@ def QUATERNION(cls) -> slice:
@classproperty
def SCALAR(cls) -> slice:
- return cls.QW
+ return slice(cls.QW, cls.QW + 1)
@classproperty
def VECTOR(cls) -> slice:
@@ -234,6 +234,14 @@ def QUATERNION(cls) -> slice:
def EXTENT(cls) -> slice:
return slice(cls.LENGTH, cls.HEIGHT + 1)
+ @classproperty
+ def SCALAR(cls) -> slice:
+ return slice(cls.QW, cls.QW + 1)
+
+ @classproperty
+ def VECTOR(cls) -> slice:
+ return slice(cls.QX, cls.QZ + 1)
+
class Corners3DIndex(IntEnum):
"""
diff --git a/d123/geometry/se.py b/d123/geometry/se.py
index 6538bfe9..a35b9c4c 100644
--- a/d123/geometry/se.py
+++ b/d123/geometry/se.py
@@ -253,6 +253,30 @@ def euler_angles(self) -> EulerAngles:
"""
return self.quaternion.euler_angles
+ @property
+ def roll(self) -> float:
+ """The roll (x-axis rotation) angle in radians.
+
+ :return: The roll angle in radians.
+ """
+ return self.euler_angles.roll
+
+ @property
+ def pitch(self) -> float:
+ """The pitch (y-axis rotation) angle in radians.
+
+ :return: The pitch angle in radians.
+ """
+ return self.euler_angles.pitch
+
+ @property
+ def yaw(self) -> float:
+ """The yaw (z-axis rotation) angle in radians.
+
+ :return: The yaw angle in radians.
+ """
+ return self.euler_angles.yaw
+
@property
def rotation_matrix(self) -> npt.NDArray[np.float64]:
"""Returns the 3x3 rotation matrix representation of the state's orientation.
@@ -261,11 +285,22 @@ def rotation_matrix(self) -> npt.NDArray[np.float64]:
"""
return self.quaternion.rotation_matrix
+ @property
+ def transformation_matrix(self) -> npt.NDArray[np.float64]:
+ """Returns the 4x4 transformation matrix representation of the state.
+
+ :return: A 4x4 numpy array representing the transformation matrix.
+ """
+ transformation_matrix = np.eye(4, dtype=np.float64)
+ transformation_matrix[:3, :3] = self.rotation_matrix
+ transformation_matrix[:3, 3] = self.array[StateSE3Index.XYZ]
+ return transformation_matrix
+
class EulerStateSE3(ArrayMixin):
"""
Class to represents a 3D pose as SE3 (x, y, z, roll, pitch, yaw).
- TODO: Use quaternions for rotation representation.
+ NOTE: This class is deprecated, use :class:`~d123.geometry.StateSE3` instead (quaternion based).
"""
_array: npt.NDArray[np.float64]
@@ -450,11 +485,3 @@ def __iter__(self) -> Iterable[float]:
def __hash__(self) -> int:
"""Hash method"""
return hash((self.x, self.y, self.z, self.roll, self.pitch, self.yaw))
-
- def __matmul__(self, other: EulerStateSE3) -> EulerStateSE3:
- """Combines two SE3 states by applying the transformation of the other state to this state.
-
- :param other: Another StateSE3 instance representing the transformation to apply.
- :return: A new StateSE3 instance representing the combined transformation.
- """
- return EulerStateSE3.from_transformation_matrix(self.transformation_matrix @ other.transformation_matrix)
diff --git a/d123/geometry/utils/rotation_utils.py b/d123/geometry/utils/rotation_utils.py
index 3d3248ce..499b98bc 100644
--- a/d123/geometry/utils/rotation_utils.py
+++ b/d123/geometry/utils/rotation_utils.py
@@ -178,6 +178,7 @@ def get_rotation_matrices_from_quaternion_array(quaternion_array: npt.NDArray[np
def get_rotation_matrix_from_quaternion_array(quaternion_array: npt.NDArray[np.float64]) -> npt.NDArray[np.float64]:
+ # TODO: Check if this function is necessary or batch-wise function is universally applicable
assert quaternion_array.ndim == 1 and quaternion_array.shape[0] == len(QuaternionIndex)
return get_rotation_matrices_from_quaternion_array(quaternion_array[None, :])[0]
From e67fd6b737d881d524f114d35c0f1f0e0d67c546 Mon Sep 17 00:00:00 2001
From: Daniel Dauner
Date: Tue, 23 Sep 2025 22:37:41 +0200
Subject: [PATCH 044/145] Unfinished updating of conversion scripts to quats
(#43)
---
d123/common/utils/dependencies.py | 4 +-
d123/common/visualization/matplotlib/utils.py | 4 +-
d123/common/visualization/viser/utils.py | 4 +-
d123/datasets/av2/av2_data_converter.py | 82 ++++++-------------
d123/datasets/nuplan/nuplan_data_converter.py | 33 ++++----
d123/datasets/wopd/wopd_data_converter.py | 10 +--
6 files changed, 48 insertions(+), 89 deletions(-)
diff --git a/d123/common/utils/dependencies.py b/d123/common/utils/dependencies.py
index d750061e..547947e3 100644
--- a/d123/common/utils/dependencies.py
+++ b/d123/common/utils/dependencies.py
@@ -2,8 +2,8 @@
def check_dependencies(modules: Union[str, List[str,]], optional_name: str) -> None:
- """
- Checks if the given modules can be imported, otherwise raises an ImportError with a message
+ """Checks if the given modules can be imported, otherwise raises an ImportError with a message
+
:param modules: Module name or list of module names to check
:param optional_name: Name of the optional feature
:raises ImportError: If any of the modules cannot be imported
diff --git a/d123/common/visualization/matplotlib/utils.py b/d123/common/visualization/matplotlib/utils.py
index 1742a864..9e030b80 100644
--- a/d123/common/visualization/matplotlib/utils.py
+++ b/d123/common/visualization/matplotlib/utils.py
@@ -9,7 +9,7 @@
from matplotlib.path import Path
from d123.common.visualization.color.config import PlotConfig
-from d123.geometry import EulerStateSE3, StateSE2
+from d123.geometry import StateSE2, StateSE3
def add_shapely_polygon_to_ax(
@@ -114,7 +114,7 @@ def get_pose_triangle(size: float) -> geom.Polygon:
def shapely_geometry_local_coords(
- geometry: geom.base.BaseGeometry, origin: Union[StateSE2, EulerStateSE3]
+ geometry: geom.base.BaseGeometry, origin: Union[StateSE2, StateSE3]
) -> geom.base.BaseGeometry:
"""Helper for transforming shapely geometry in coord-frame"""
# TODO: move somewhere else for general use
diff --git a/d123/common/visualization/viser/utils.py b/d123/common/visualization/viser/utils.py
index 8c6236ce..783efa01 100644
--- a/d123/common/visualization/viser/utils.py
+++ b/d123/common/visualization/viser/utils.py
@@ -13,7 +13,7 @@
from d123.datatypes.scene.abstract_scene import AbstractScene
from d123.datatypes.sensors.camera import Camera, CameraType
from d123.datatypes.sensors.lidar import LiDARType
-from d123.geometry import BoundingBoxSE3, EulerStateSE3, Point3D, Polyline3D
+from d123.geometry import BoundingBoxSE3, Point3D, Polyline3D, StateSE3
from d123.geometry.transform.transform_euler_se3 import convert_relative_to_absolute_points_3d_array
# TODO: Refactor this file.
@@ -226,7 +226,7 @@ def get_camera_values(scene: AbstractScene, camera: Camera, iteration: int) -> T
rear_axle_array = rear_axle.array
rear_axle_array[:3] -= initial_point_3d.array
- rear_axle = EulerStateSE3.from_array(rear_axle_array)
+ rear_axle = StateSE3.from_array(rear_axle_array)
camera_to_ego = camera.extrinsic # 4x4 transformation from camera to ego frame
diff --git a/d123/datasets/av2/av2_data_converter.py b/d123/datasets/av2/av2_data_converter.py
index 11e2549b..e13bbf04 100644
--- a/d123/datasets/av2/av2_data_converter.py
+++ b/d123/datasets/av2/av2_data_converter.py
@@ -9,7 +9,6 @@
import numpy as np
import pandas as pd
import pyarrow as pa
-from pyquaternion import Quaternion
from d123.common.multithreading.worker_utils import WorkerPool, worker_map
from d123.datasets.av2.av2_constants import (
@@ -34,12 +33,8 @@
get_av2_ford_fusion_hybrid_parameters,
rear_axle_se3_to_center_se3,
)
-from d123.geometry import BoundingBoxSE3Index, EulerStateSE3, Vector3D, Vector3DIndex
-from d123.geometry.transform.transform_euler_se3 import (
- convert_relative_to_absolute_euler_se3_array,
- get_rotation_matrix,
-)
-from d123.geometry.utils.constants import DEFAULT_PITCH, DEFAULT_ROLL
+from d123.geometry import BoundingBoxSE3Index, StateSE3, Vector3D, Vector3DIndex
+from d123.geometry.transform.transform_se3 import convert_relative_to_absolute_se3_array
def create_token(input_data: str) -> str:
@@ -382,35 +377,19 @@ def _extract_box_detections(
for detection_idx, (_, row) in enumerate(annotations_slice.iterrows()):
row = row.to_dict()
- yaw, pitch, roll = Quaternion(
- w=row["qw"],
- x=row["qx"],
- y=row["qy"],
- z=row["qz"],
- ).yaw_pitch_roll
-
- detections_state[detection_idx, BoundingBoxSE3Index.X] = row["tx_m"]
- detections_state[detection_idx, BoundingBoxSE3Index.Y] = row["ty_m"]
- detections_state[detection_idx, BoundingBoxSE3Index.Z] = row["tz_m"]
- detections_state[detection_idx, BoundingBoxSE3Index.ROLL] = roll
- detections_state[detection_idx, BoundingBoxSE3Index.PITCH] = pitch
- detections_state[detection_idx, BoundingBoxSE3Index.YAW] = yaw
- detections_state[detection_idx, BoundingBoxSE3Index.LENGTH] = row["length_m"]
- detections_state[detection_idx, BoundingBoxSE3Index.WIDTH] = row["width_m"]
- detections_state[detection_idx, BoundingBoxSE3Index.HEIGHT] = row["height_m"]
+
+ detections_state[detection_idx, BoundingBoxSE3Index.XYZ] = [row["tx_m"], row["ty_m"], row["tz_m"]]
+ detections_state[detection_idx, BoundingBoxSE3Index.QUATERNION] = [row["qw"], row["qx"], row["qy"], row["qz"]]
+ detections_state[detection_idx, BoundingBoxSE3Index.EXTENT] = [row["length_m"], row["width_m"], row["height_m"]]
av2_detection_type = AV2SensorBoxDetectionType.deserialize(row["category"])
detections_types.append(int(AV2_TO_DETECTION_TYPE[av2_detection_type]))
- detections_state[:, BoundingBoxSE3Index.STATE_SE3] = convert_relative_to_absolute_euler_se3_array(
- origin=ego_state_se3.rear_axle_se3, se3_array=detections_state[:, BoundingBoxSE3Index.STATE_SE3]
+ detections_state[:, BoundingBoxSE3Index.STATE_SE3] = convert_relative_to_absolute_se3_array(
+ origin=ego_state_se3.rear_axle_se3,
+ se3_array=detections_state[:, BoundingBoxSE3Index.STATE_SE3],
)
- ZERO_BOX_ROLL_PITCH = False # TODO: Add config option or remove
- if ZERO_BOX_ROLL_PITCH:
- detections_state[:, BoundingBoxSE3Index.ROLL] = DEFAULT_ROLL
- detections_state[:, BoundingBoxSE3Index.PITCH] = DEFAULT_PITCH
-
return detections_state.tolist(), detections_velocity.tolist(), detections_token, detections_types
@@ -421,26 +400,19 @@ def _extract_ego_state(city_se3_egovehicle_df: pd.DataFrame, lidar_timestamp_ns:
), f"Expected exactly one ego state for timestamp {lidar_timestamp_ns}, got {len(ego_state_slice)}."
ego_pose_dict = ego_state_slice.iloc[0].to_dict()
-
- ego_pose_quat = Quaternion(
- w=ego_pose_dict["qw"],
- x=ego_pose_dict["qx"],
- y=ego_pose_dict["qy"],
- z=ego_pose_dict["qz"],
- )
-
- yaw, pitch, roll = ego_pose_quat.yaw_pitch_roll
-
- rear_axle_pose = EulerStateSE3(
+ rear_axle_pose = StateSE3(
x=ego_pose_dict["tx_m"],
y=ego_pose_dict["ty_m"],
z=ego_pose_dict["tz_m"],
- roll=roll,
- pitch=pitch,
- yaw=yaw,
+ qw=ego_pose_dict["qw"],
+ qx=ego_pose_dict["qx"],
+ qy=ego_pose_dict["qy"],
+ qz=ego_pose_dict["qz"],
)
- vehicle_parameters = get_av2_ford_fusion_hybrid_parameters() # TODO: Add av2 vehicle parameters
+
+ vehicle_parameters = get_av2_ford_fusion_hybrid_parameters()
center = rear_axle_se3_to_center_se3(rear_axle_se3=rear_axle_pose, vehicle_parameters=vehicle_parameters)
+
# TODO: Add script to calculate the dynamic state from log sequence.
dynamic_state = DynamicStateSE3(
velocity=Vector3D(
@@ -495,9 +467,8 @@ def _extract_camera(
source_dataset_dir = source_log_path.parent.parent
rear_axle_se3 = ego_state_se3.rear_axle_se3
- ego_transform = np.zeros((4, 4), dtype=np.float64)
- ego_transform[:3, :3] = get_rotation_matrix(ego_state_se3.rear_axle_se3)
- ego_transform[:3, 3] = rear_axle_se3.point_3d.array
+ ego_transform = rear_axle_se3.transformation_matrix
+ ego_transform # TODO: Refactor this file, ie. why is the ego transform calculated but not used?
for _, row in egovehicle_se3_sensor_df.iterrows():
row = row.to_dict()
@@ -521,16 +492,13 @@ def _extract_camera(
absolute_image_path = source_dataset_dir / relative_image_path
assert absolute_image_path.exists()
# TODO: Adjust for finer IMU timestamps to correct the camera extrinsic.
- camera_extrinsic = np.eye(4, dtype=np.float64)
- camera_extrinsic[:3, :3] = Quaternion(
- w=row["qw"],
- x=row["qx"],
- y=row["qy"],
- z=row["qz"],
- ).rotation_matrix
- camera_extrinsic[:3, 3] = np.array([row["tx_m"], row["ty_m"], row["tz_m"]], dtype=np.float64)
+
+ camera_extrinsic = StateSE3(
+ x=row["tx_m"], y=row["ty_m"], z=row["tz_m"], qw=row["qw"], qx=row["qx"], qy=row["qy"], qz=row["qz"]
+ )
+
# camera_extrinsic = camera_extrinsic @ ego_transform
- camera_extrinsic = camera_extrinsic.flatten().tolist()
+ camera_extrinsic = camera_extrinsic.transformation_matrix.flatten().tolist()
if data_converter_config.camera_store_option == "path":
camera_dict[camera_type] = (str(relative_image_path), camera_extrinsic)
diff --git a/d123/datasets/nuplan/nuplan_data_converter.py b/d123/datasets/nuplan/nuplan_data_converter.py
index a7386917..d03dbaf9 100644
--- a/d123/datasets/nuplan/nuplan_data_converter.py
+++ b/d123/datasets/nuplan/nuplan_data_converter.py
@@ -30,7 +30,8 @@
get_nuplan_chrysler_pacifica_parameters,
rear_axle_se3_to_center_se3,
)
-from d123.geometry import BoundingBoxSE3, BoundingBoxSE3Index, EulerStateSE3, Vector3D, Vector3DIndex
+from d123.geometry import BoundingBoxSE3, BoundingBoxSE3Index, StateSE3, Vector3D, Vector3DIndex
+from d123.geometry.rotation import EulerAngles
from d123.geometry.utils.constants import DEFAULT_PITCH, DEFAULT_ROLL
check_dependencies(["nuplan", "sqlalchemy"], "nuplan")
@@ -365,17 +366,19 @@ def _extract_detections(lidar_pc: LidarPc) -> Tuple[List[List[float]], List[List
for lidar_box in lidar_pc.lidar_boxes:
lidar_box: LidarBox
- center = EulerStateSE3(
+ lidar_quaternion = EulerAngles(roll=DEFAULT_ROLL, pitch=DEFAULT_PITCH, yaw=lidar_box.yaw).quaternion
+ center = StateSE3(
x=lidar_box.x,
y=lidar_box.y,
z=lidar_box.z,
- roll=DEFAULT_ROLL,
- pitch=DEFAULT_PITCH,
- yaw=lidar_box.yaw,
+ qw=lidar_quaternion.qw,
+ qx=lidar_quaternion.qx,
+ qy=lidar_quaternion.qy,
+ qz=lidar_quaternion.qz,
)
bounding_box_se3 = BoundingBoxSE3(center, lidar_box.length, lidar_box.width, lidar_box.height)
- detections_state.append(bounding_box_se3.array)
+ detections_state.append(bounding_box_se3.tolist())
detections_velocity.append(lidar_box.velocity)
detections_token.append(lidar_box.track_token)
detections_types.append(int(NUPLAN_DETECTION_NAME_DICT[lidar_box.category.name]))
@@ -385,19 +388,16 @@ def _extract_detections(lidar_pc: LidarPc) -> Tuple[List[List[float]], List[List
def _extract_ego_state(lidar_pc: LidarPc) -> List[float]:
- yaw, pitch, roll = lidar_pc.ego_pose.quaternion.yaw_pitch_roll
vehicle_parameters = get_nuplan_chrysler_pacifica_parameters()
- # vehicle_parameters = get_pacifica_parameters()
-
- rear_axle_pose = EulerStateSE3(
+ rear_axle_pose = StateSE3(
x=lidar_pc.ego_pose.x,
y=lidar_pc.ego_pose.y,
z=lidar_pc.ego_pose.z,
- roll=roll,
- pitch=pitch,
- yaw=yaw,
+ qw=lidar_pc.ego_pose.qw,
+ qx=lidar_pc.ego_pose.qx,
+ qy=lidar_pc.ego_pose.qy,
+ qz=lidar_pc.ego_pose.qz,
)
- # NOTE: The height to rear axle is not provided the dataset and is merely approximated.
center = rear_axle_se3_to_center_se3(rear_axle_se3=rear_axle_pose, vehicle_parameters=vehicle_parameters)
dynamic_state = DynamicStateSE3(
velocity=Vector3D(
@@ -416,7 +416,6 @@ def _extract_ego_state(lidar_pc: LidarPc) -> List[float]:
z=lidar_pc.ego_pose.angular_rate_z,
),
)
-
return EgoStateSE3(
center_se3=center,
dynamic_state_se3=dynamic_state,
@@ -436,7 +435,6 @@ def _extract_traffic_lights(log_db: NuPlanDB, lidar_pc_token: str) -> Tuple[List
def _extract_scenario_tag(log_db: NuPlanDB, lidar_pc_token: str) -> List[str]:
-
scenario_tags = [
scenario_tag.type for scenario_tag in log_db.scenario_tag.select_many(lidar_pc_token=lidar_pc_token)
]
@@ -454,7 +452,6 @@ def _extract_camera(
camera_dict: Dict[str, Union[str, bytes]] = {}
sensor_root = NUPLAN_DATA_ROOT / "nuplan-v1.1" / "sensor_blobs"
-
log_cam_infos = {camera.token: camera for camera in log_db.log.cameras}
for camera_type, camera_channel in NUPLAN_CAMERA_TYPES.items():
@@ -468,7 +465,7 @@ def _extract_camera(
# Code taken from MTGS
# https://github.com/OpenDriveLab/MTGS/blob/main/nuplan_scripts/utils/nuplan_utils_custom.py#L117
-
+ # TODO: Refactor
timestamp = image.timestamp + NUPLAN_ROLLING_SHUTTER_S.time_us
img_ego_pose: EgoPose = (
log_db.log._session.query(EgoPose).order_by(func.abs(EgoPose.timestamp - timestamp)).first()
diff --git a/d123/datasets/wopd/wopd_data_converter.py b/d123/datasets/wopd/wopd_data_converter.py
index 1e935329..30c5cdc1 100644
--- a/d123/datasets/wopd/wopd_data_converter.py
+++ b/d123/datasets/wopd/wopd_data_converter.py
@@ -13,7 +13,6 @@
from pyquaternion import Quaternion
from d123.common.multithreading.worker_utils import WorkerPool, worker_map
-from d123.common.utils.arrow_helper import open_arrow_table, write_arrow_table
from d123.common.utils.dependencies import check_dependencies
from d123.datasets.raw_data_converter import DataConverterConfig, RawDataConverter
from d123.datasets.wopd.waymo_map_utils.wopd_map_utils import convert_wopd_map
@@ -32,6 +31,7 @@
check_dependencies(modules=["tensorflow", "waymo_open_dataset"], optional_name="waymo")
import tensorflow as tf
from waymo_open_dataset import dataset_pb2
+from waymo_open_dataset.utils import frame_utils
# TODO: Make keep_polar_features an optional argument.
# With polar features, the lidar loading time is SIGNIFICANTLY higher.
@@ -71,7 +71,7 @@
5: LiDARType.LIDAR_BACK, # REAR
}
-WOPD_DATA_ROOT = Path("/media/nvme1/waymo_perception") # TODO: set as environment variable
+WOPD_DATA_ROOT = Path("/media/nvme1/waymo_perception") # TODO: set as environment variable !!!!
# Whether to use ego or zero roll and pitch values for bounding box detections (after global conversion)
DETECTION_ROLL_PITCH: Final[Literal["ego", "zero"]] = "zero"
@@ -382,11 +382,6 @@ def _write_recording_table(
writer.write_batch(batch)
del batch, row_data, detections_state, detections_velocity, detections_token, detections_types
- if SORT_BY_TIMESTAMP:
- recording_table = open_arrow_table(log_file_path)
- recording_table = recording_table.sort_by([("timestamp", "ascending")])
- write_arrow_table(recording_table, log_file_path)
-
def _get_ego_pose_se3(frame: dataset_pb2.Frame) -> EulerStateSE3:
ego_pose_matrix = np.array(frame.pose.transform).reshape(4, 4)
@@ -519,7 +514,6 @@ def _extract_camera(
def _extract_lidar(
frame: dataset_pb2.Frame, data_converter_config: DataConverterConfig
) -> Dict[LiDARType, npt.NDArray[np.float32]]:
- from waymo_open_dataset.utils import frame_utils
assert data_converter_config.lidar_store_option == "binary", "Lidar store option must be 'binary' for WOPD."
(range_images, camera_projections, _, range_image_top_pose) = parse_range_image_and_camera_projection(frame)
From 21f1dcbba6df94e7293b69776bdaed89ca364ff1 Mon Sep 17 00:00:00 2001
From: jbwang <1159270049@qq.com>
Date: Wed, 24 Sep 2025 10:07:21 +0800
Subject: [PATCH 045/145] add map convert, fix 0004 detection, interpolate
dynamic
---
d123/common/visualization/viser/server.py | 2 +-
.../kitti_360/kitti_360_data_converter.py | 76 ++++++--
.../kitti_360/kitti_360_helper.py | 174 +++++++++++++++---
.../kitti_360/kitti_360_map_conversion.py | 125 +++++++++++++
.../dataset_specific/kitti_360/labels.py | 15 ++
.../dataset_specific/kitti_360/load_sensor.py | 6 +-
.../kitti_360/preprocess_detection.py | 127 ++++++++++---
.../default_dataset_conversion.yaml | 2 +-
8 files changed, 450 insertions(+), 77 deletions(-)
create mode 100644 d123/dataset/dataset_specific/kitti_360/kitti_360_map_conversion.py
diff --git a/d123/common/visualization/viser/server.py b/d123/common/visualization/viser/server.py
index 16e38a66..f70aba28 100644
--- a/d123/common/visualization/viser/server.py
+++ b/d123/common/visualization/viser/server.py
@@ -38,7 +38,7 @@
BOUNDING_BOX_TYPE: Literal["mesh", "lines"] = "lines"
# Map config:
-MAP_AVAILABLE: bool = False
+MAP_AVAILABLE: bool = True
# Cameras config:
diff --git a/d123/dataset/dataset_specific/kitti_360/kitti_360_data_converter.py b/d123/dataset/dataset_specific/kitti_360/kitti_360_data_converter.py
index aee14883..4e221617 100644
--- a/d123/dataset/dataset_specific/kitti_360/kitti_360_data_converter.py
+++ b/d123/dataset/dataset_specific/kitti_360/kitti_360_data_converter.py
@@ -10,6 +10,7 @@
import numpy as np
import pickle
+import copy
from collections import defaultdict
import datetime
import hashlib
@@ -31,8 +32,9 @@
from d123.dataset.arrow.helper import open_arrow_table, write_arrow_table
from d123.dataset.dataset_specific.raw_data_converter import DataConverterConfig, RawDataConverter
from d123.dataset.logs.log_metadata import LogMetadata
-from d123.dataset.dataset_specific.kitti_360.kitti_360_helper import KITTI360Bbox3D, KITTI3602NUPLAN_IMU_CALIBRATION, get_lidar_extrinsic
-from d123.dataset.dataset_specific.kitti_360.labels import KIITI360_DETECTION_NAME_DICT,kittiId2label
+from d123.dataset.dataset_specific.kitti_360.kitti_360_helper import KITTI360Bbox3D, KITTI3602NUPLAN_IMU_CALIBRATION, get_lidar_extrinsic,interpolate_obj_list
+from d123.dataset.dataset_specific.kitti_360.labels import KIITI360_DETECTION_NAME_DICT,kittiId2label,BBOX_LABLES_TO_DETECTION_NAME_DICT
+from d123.dataset.dataset_specific.kitti_360.kitti_360_map_conversion import convert_kitti360_map
from d123.geometry import BoundingBoxSE3, BoundingBoxSE3Index, StateSE3, Vector3D, Vector3DIndex
KITTI360_DT: Final[float] = 0.1
@@ -55,11 +57,10 @@
DIR_POSES = "data_poses"
DIR_CALIB = "calibration"
-# PATH_2D_RAW_ROOT: Path = KITTI360_DATA_ROOT / DIR_2D_RAW
-PATH_2D_RAW_ROOT: Path = KITTI360_DATA_ROOT
+PATH_2D_RAW_ROOT: Path = KITTI360_DATA_ROOT / DIR_2D_RAW
+# PATH_2D_RAW_ROOT: Path = KITTI360_DATA_ROOT
PATH_2D_SMT_ROOT: Path = KITTI360_DATA_ROOT / DIR_2D_SMT
PATH_3D_RAW_ROOT: Path = KITTI360_DATA_ROOT / DIR_3D_RAW
-# PATH_3D_RAW_ROOT: Path = Path("/data/jbwang/d123/data_3d_raw")
PATH_3D_SMT_ROOT: Path = KITTI360_DATA_ROOT / DIR_3D_SMT
PATH_3D_BBOX_ROOT: Path = KITTI360_DATA_ROOT / DIR_3D_BBOX
PATH_POSES_ROOT: Path = KITTI360_DATA_ROOT / DIR_POSES
@@ -146,8 +147,22 @@ def get_available_splits(self) -> List[str]:
return ["kitti360"]
def convert_maps(self, worker: WorkerPool) -> None:
- logging.info("KITTI-360 does not provide standard maps. Skipping map conversion.")
- return None
+ log_args = [
+ {
+ "log_path": log_path,
+ "split": split,
+ }
+ for split, log_paths in self._log_paths_per_split.items()
+ for log_path in log_paths
+ ]
+ worker_map(
+ worker,
+ partial(
+ convert_kitti360_map_to_gpkg,
+ data_converter_config=self.data_converter_config
+ ),
+ log_args,
+ )
def convert_logs(self, worker: WorkerPool) -> None:
log_args = [
@@ -168,6 +183,20 @@ def convert_logs(self, worker: WorkerPool) -> None:
log_args,
)
+def convert_kitti360_map_to_gpkg(
+ args: List[Dict[str, Union[List[str], List[Path]]]], data_converter_config: DataConverterConfig
+) -> List[Any]:
+ for log_info in args:
+ log_path: Path = log_info["log_path"]
+ split: str = log_info["split"]
+ log_name = log_path.stem
+
+ map_path = data_converter_config.output_path / "maps" / split / f"kitti360_{log_name}.gpkg"
+ if data_converter_config.force_map_conversion or not map_path.exists():
+ map_path.unlink(missing_ok=True)
+ convert_kitti360_map(log_name, map_path)
+ return []
+
def convert_kitti360_log_to_arrow(
args: List[Dict[str, Union[List[str], List[Path]]]], data_converter_config: DataConverterConfig
) -> List[Any]:
@@ -189,7 +218,7 @@ def convert_kitti360_log_to_arrow(
metadata = LogMetadata(
dataset="kitti360",
log_name=log_name,
- location=None,
+ location=log_name,
timestep_seconds=KITTI360_DT,
map_has_z=True,
)
@@ -505,26 +534,34 @@ def _extract_detections(
detections_tokens: List[List[str]] = [[] for _ in range(ts_len)]
detections_types: List[List[int]] = [[] for _ in range(ts_len)]
- bbox_3d_path = PATH_3D_BBOX_ROOT / "train" / f"{log_name}.xml"
+ if log_name == "2013_05_28_drive_0004_sync":
+ bbox_3d_path = PATH_3D_BBOX_ROOT / "train_full" / f"{log_name}.xml"
+ else:
+ bbox_3d_path = PATH_3D_BBOX_ROOT / "train" / f"{log_name}.xml"
if not bbox_3d_path.exists():
raise FileNotFoundError(f"BBox 3D file not found: {bbox_3d_path}")
tree = ET.parse(bbox_3d_path)
root = tree.getroot()
- dynamic_groups: Dict[int, List[KITTI360Bbox3D]] = defaultdict(list)
+ dynamic_objs: Dict[int, List[KITTI360Bbox3D]] = defaultdict(list)
detection_preprocess_path = PREPOCESS_DETECTION_DIR / f"{log_name}_detection_preprocessed.pkl"
if detection_preprocess_path.exists():
with open(detection_preprocess_path, "rb") as f:
detection_preprocess_result = pickle.load(f)
- records_dict = {record_item["global_id"]: record_item for record_item in detection_preprocess_result["records"]}
+ static_records_dict = {record_item["global_id"]: record_item for record_item in detection_preprocess_result["static"]}
+ dynamic_records_dict = detection_preprocess_result["dynamic"]
else:
detection_preprocess_result = None
for child in root:
- semanticIdKITTI = int(child.find('semanticId').text)
- name = kittiId2label[semanticIdKITTI].name
+ if child.find('semanticId') is not None:
+ semanticIdKITTI = int(child.find('semanticId').text)
+ name = kittiId2label[semanticIdKITTI].name
+ else:
+ lable = child.find('label').text
+ name = BBOX_LABLES_TO_DETECTION_NAME_DICT.get(lable, 'unknown')
if child.find('transform') is None or name not in KIITI360_DETECTION_NAME_DICT.keys():
continue
obj = KITTI360Bbox3D()
@@ -535,7 +572,7 @@ def _extract_detections(
if detection_preprocess_result is None:
obj.filter_by_radius(ego_states_xyz,radius=50.0)
else:
- obj.load_detection_preprocess(records_dict)
+ obj.load_detection_preprocess(static_records_dict)
for record in obj.valid_frames["records"]:
frame = record["timestamp"]
detections_states[frame].append(obj.get_state_array())
@@ -543,12 +580,15 @@ def _extract_detections(
detections_tokens[frame].append(str(obj.globalID))
detections_types[frame].append(int(KIITI360_DETECTION_NAME_DICT[obj.name]))
else:
- ann_id = obj.annotationId
- dynamic_groups[ann_id].append(obj)
+ global_ID = obj.globalID
+ dynamic_objs[global_ID].append(obj)
# dynamic object
- for ann_id, obj_list in dynamic_groups.items():
- obj_list.sort(key=lambda obj: obj.timestamp)
+ if detection_preprocess_result is not None:
+ dynamic_objs = copy.deepcopy(dynamic_records_dict)
+
+ for global_id, obj_list in dynamic_objs.items():
+ obj_list = interpolate_obj_list(obj_list)
num_frames = len(obj_list)
positions = [obj.get_state_array()[:3] for obj in obj_list]
diff --git a/d123/dataset/dataset_specific/kitti_360/kitti_360_helper.py b/d123/dataset/dataset_specific/kitti_360/kitti_360_helper.py
index 77217b5d..a756a343 100644
--- a/d123/dataset/dataset_specific/kitti_360/kitti_360_helper.py
+++ b/d123/dataset/dataset_specific/kitti_360/kitti_360_helper.py
@@ -2,11 +2,13 @@
from collections import defaultdict
from typing import Dict, Optional, Any, List
+import copy
from scipy.linalg import polar
from scipy.spatial.transform import Rotation as R
from d123.geometry import BoundingBoxSE3, StateSE3
-from d123.dataset.dataset_specific.kitti_360.labels import kittiId2label
+from d123.geometry.polyline import Polyline3D
+from d123.dataset.dataset_specific.kitti_360.labels import kittiId2label,BBOX_LABLES_TO_DETECTION_NAME_DICT
import os
from pathlib import Path
@@ -44,6 +46,11 @@ def global2local(globalId):
return int(semanticId), int(instanceId)
class KITTI360Bbox3D():
+
+ # global id(only used for sequence 0004)
+ dynamic_global_id = 2000000
+ static_global_id = 1000000
+
# Constructor
def __init__(self):
@@ -65,37 +72,39 @@ def __init__(self):
#label
self.label = ''
-
- def parseOpencvMatrix(self, node):
- rows = int(node.find('rows').text)
- cols = int(node.find('cols').text)
- data = node.find('data').text.split(' ')
-
- mat = []
- for d in data:
- d = d.replace('\n', '')
- if len(d)<1:
- continue
- mat.append(float(d))
- mat = np.reshape(mat, [rows, cols])
- return mat
+ # used to mark if the bbox is interpolated
+ self.is_interpolated = False
+ # GT annotation idx
+ self.idx_next = -1
+ self.idx_prev = -1
+
def parseBbox(self, child):
- semanticIdKITTI = int(child.find('semanticId').text)
- self.semanticId = kittiId2label[semanticIdKITTI].id
- self.instanceId = int(child.find('instanceId').text)
- self.name = kittiId2label[semanticIdKITTI].name
-
- self.start_frame = int(child.find('start_frame').text)
- self.end_frame = int(child.find('end_frame').text)
-
self.timestamp = int(child.find('timestamp').text)
self.annotationId = int(child.find('index').text) + 1
self.label = child.find('label').text
- self.globalID = local2global(self.semanticId, self.instanceId)
+ if child.find('semanticId') is None:
+ self.name = BBOX_LABLES_TO_DETECTION_NAME_DICT.get(self.label, 'unknown')
+ self.is_dynamic = int(child.find('dynamic').text)
+ if self.is_dynamic != 0:
+ dynamicSeq = int(child.find('dynamicSeq').text)
+ self.globalID = KITTI360Bbox3D.dynamic_global_id + dynamicSeq
+ else:
+ self.globalID = KITTI360Bbox3D.static_global_id
+ KITTI360Bbox3D.static_global_id += 1
+ else:
+ self.start_frame = int(child.find('start_frame').text)
+ self.end_frame = int(child.find('end_frame').text)
+
+ semanticIdKITTI = int(child.find('semanticId').text)
+ self.semanticId = kittiId2label[semanticIdKITTI].id
+ self.instanceId = int(child.find('instanceId').text)
+ self.name = kittiId2label[semanticIdKITTI].name
+
+ self.globalID = local2global(self.semanticId, self.instanceId)
self.valid_frames = {"global_id": self.globalID, "records": []}
@@ -103,10 +112,11 @@ def parseBbox(self, child):
self.parse_scale_rotation()
def parseVertices(self, child):
- transform = self.parseOpencvMatrix(child.find('transform'))
+ transform = parseOpencvMatrix(child.find('transform'))
R = transform[:3,:3]
T = transform[:3,3]
- vertices = self.parseOpencvMatrix(child.find('vertices'))
+ vertices = parseOpencvMatrix(child.find('vertices'))
+ self.vertices_template = copy.deepcopy(vertices)
vertices = np.matmul(R, vertices.transpose()).transpose() + T
self.vertices = vertices
@@ -122,6 +132,7 @@ def parse_scale_rotation(self):
yaw, pitch, roll = R.from_matrix(Rm).as_euler('zyx', degrees=False)
self.Rm = np.array(Rm)
+ self.Sm = np.array(Sm)
self.scale = scale
self.yaw = yaw
self.pitch = pitch
@@ -153,7 +164,10 @@ def filter_by_radius(self,ego_state_xyz,radius=50.0):
def box_visible_in_point_cloud(self, points):
''' points: (N,3) , box: (8,3) '''
- box = self.vertices
+ box = self.vertices.copy()
+ # avoid calculating ground point cloud
+ z_offset = 0.1
+ box[:,2] += z_offset
O, A, B, C = box[0], box[1], box[2], box[5]
OA = A - O
OB = B - O
@@ -164,13 +178,117 @@ def box_visible_in_point_cloud(self, points):
(np.dot(O, OC) < POC) & (POC < np.dot(C, OC))
points_in_box = np.sum(mask)
- visible = True if points_in_box > 50 else False
+ visible = True if points_in_box > 40 else False
return visible, points_in_box
def load_detection_preprocess(self, records_dict: Dict[int, Any]):
if self.globalID in records_dict:
self.valid_frames["records"] = records_dict[self.globalID]["records"]
+def interpolate_obj_list(obj_list: List[KITTI360Bbox3D]) -> List[KITTI360Bbox3D]:
+ """
+ Fill missing timestamps in obj_list by linear interpolation.
+ For each missing timestamp between two objects, create a new KITTI360Bbox3D object
+ with only interpolated position (T), yaw, pitch, roll, and copy other attributes.
+ Returns a new list with all timestamps filled and sorted.
+ """
+ if not obj_list:
+ return obj_list
+
+ # Sort by timestamp ascending
+ obj_list.sort(key=lambda obj: obj.timestamp)
+ timestamps = [obj.timestamp for obj in obj_list]
+ min_ts, max_ts = min(timestamps), max(timestamps)
+ full_ts = list(range(min_ts, max_ts + 1))
+ missing_ts = sorted(set(full_ts) - set(timestamps))
+
+ # Prepare arrays for interpolation
+ T_arr = np.array([obj.T for obj in obj_list])
+ yaw_arr = np.array([obj.yaw for obj in obj_list])
+ pitch_arr = np.array([obj.pitch for obj in obj_list])
+ roll_arr = np.array([obj.roll for obj in obj_list])
+ ts_arr = np.array(timestamps)
+
+ for ts in missing_ts:
+ idx_next = np.searchsorted(ts_arr, ts)
+ idx_prev = idx_next - 1
+ if idx_prev < 0 or idx_next >= len(obj_list):
+ continue
+
+ frac = (ts - ts_arr[idx_prev]) / (ts_arr[idx_next] - ts_arr[idx_prev])
+ T_interp = T_arr[idx_prev] * (1 - frac) + T_arr[idx_next] * frac
+
+ yaw_delat = normalize_angle(yaw_arr[idx_next] - yaw_arr[idx_prev])
+ yaw_interp = yaw_arr[idx_prev] + yaw_delat * frac
+ yaw_interp = normalize_angle(yaw_interp)
+
+ pitch_interp = pitch_arr[idx_prev] * (1 - frac) + pitch_arr[idx_next] * frac
+ roll_interp = roll_arr[idx_prev] * (1 - frac) + roll_arr[idx_next] * frac
+
+ obj_new = copy.deepcopy(obj_list[idx_prev])
+ obj_new.timestamp = ts
+ obj_new.T = T_interp
+ obj_new.yaw = yaw_interp
+ obj_new.pitch = pitch_interp
+ obj_new.roll = roll_interp
+ obj_new.Rm = R.from_euler('zyx', [obj_new.yaw, obj_new.pitch, obj_new.roll], degrees=False).as_matrix()
+ obj_new.R = obj_new.Rm @ obj_new.Sm
+ obj_new.vertices = (obj_new.R @ obj_new.vertices_template.T).T + obj_new.T
+ obj_new.is_interpolated = True
+ obj_new.idx_prev = ts_arr[idx_prev]
+ obj_new.idx_next = ts_arr[idx_next]
+
+ obj_list.append(obj_new)
+
+ obj_list.sort(key=lambda obj: obj.timestamp)
+ return obj_list
+
+def normalize_angle(a):
+ return np.arctan2(np.sin(a), np.cos(a))
+
+class KITTI360_MAP_Bbox3D():
+ def __init__(self):
+ self.id = -1
+ self.label = ' '
+
+ self.vertices: Polyline3D = None
+ self.R = None
+ self.T = None
+
+ def parseVertices_plane(self, child):
+ transform = parseOpencvMatrix(child.find('transform'))
+ R = transform[:3,:3]
+ T = transform[:3,3]
+ if child.find("transform_plane").find('rows').text == '0':
+ vertices = parseOpencvMatrix(child.find('vertices'))
+ else:
+ vertices = parseOpencvMatrix(child.find('vertices_plane'))
+
+ vertices = np.matmul(R, vertices.transpose()).transpose() + T
+ self.vertices = Polyline3D.from_array(vertices)
+
+ self.R = R
+ self.T = T
+
+ def parseBbox(self, child):
+ self.id = int(child.find('index').text)
+ self.label = child.find('label').text
+ self.parseVertices_plane(child)
+
+
+def parseOpencvMatrix(node):
+ rows = int(node.find('rows').text)
+ cols = int(node.find('cols').text)
+ data = node.find('data').text.split(' ')
+
+ mat = []
+ for d in data:
+ d = d.replace('\n', '')
+ if len(d)<1:
+ continue
+ mat.append(float(d))
+ mat = np.reshape(mat, [rows, cols])
+ return mat
def get_lidar_extrinsic() -> np.ndarray:
cam2pose_txt = PATH_CALIB_ROOT / "calib_cam_to_pose.txt"
diff --git a/d123/dataset/dataset_specific/kitti_360/kitti_360_map_conversion.py b/d123/dataset/dataset_specific/kitti_360/kitti_360_map_conversion.py
new file mode 100644
index 00000000..bf13eda6
--- /dev/null
+++ b/d123/dataset/dataset_specific/kitti_360/kitti_360_map_conversion.py
@@ -0,0 +1,125 @@
+import os
+import warnings
+from pathlib import Path
+from typing import Dict, List, Optional
+
+import geopandas as gpd
+import numpy as np
+import pandas as pd
+import xml.etree.ElementTree as ET
+import pyogrio
+from shapely.geometry import LineString
+import shapely.geometry as geom
+
+from d123.dataset.conversion.map.road_edge.road_edge_2d_utils import (
+ get_road_edge_linear_rings,
+ split_line_geometry_by_max_length,
+)
+from d123.dataset.maps.gpkg.utils import get_all_rows_with_value, get_row_with_value
+from d123.dataset.maps.map_datatypes import MapLayer, RoadEdgeType, RoadLineType
+from d123.geometry.polyline import Polyline3D
+from d123.dataset.dataset_specific.kitti_360.kitti_360_helper import KITTI360_MAP_Bbox3D
+
+MAX_ROAD_EDGE_LENGTH = 100.0 # meters, used to filter out very long road edges
+
+KITTI360_DATA_ROOT = Path(os.environ["KITTI360_DATA_ROOT"])
+
+DIR_3D_BBOX = "data_3d_bboxes"
+
+PATH_3D_BBOX_ROOT: Path = KITTI360_DATA_ROOT / DIR_3D_BBOX
+
+KIITI360_MAP_BBOX = [
+ "road",
+ "sidewalk",
+ # "railtrack",
+ # "ground",
+ # "driveway",
+]
+
+def convert_kitti360_map(log_name, map_path):
+
+ xml_path = PATH_3D_BBOX_ROOT / "train_full" / f"{log_name}.xml"
+
+ if not xml_path.exists():
+ raise FileNotFoundError(f"BBox 3D file not found: {xml_path}")
+
+ tree = ET.parse(xml_path)
+ root = tree.getroot()
+ objs: List[KITTI360_MAP_Bbox3D] = []
+ for child in root:
+ label = child.find('label').text
+ if child.find("transform") is None or label not in KIITI360_MAP_BBOX:
+ continue
+ obj = KITTI360_MAP_Bbox3D()
+ obj.parseBbox(child)
+ objs.append(obj)
+
+ dataframes: Dict[MapLayer, gpd.GeoDataFrame] = {}
+ dataframes[MapLayer.LANE] = _get_none_data()
+ dataframes[MapLayer.LANE_GROUP] = _get_none_data()
+ dataframes[MapLayer.INTERSECTION] = _get_none_data()
+ dataframes[MapLayer.CROSSWALK] = _get_none_data()
+ dataframes[MapLayer.WALKWAY] = _extract_walkway_df(objs)
+ dataframes[MapLayer.CARPARK] = _get_none_data()
+ dataframes[MapLayer.GENERIC_DRIVABLE] = _extract_generic_drivable_df(objs)
+ dataframes[MapLayer.ROAD_EDGE] = _extract_road_edge_df(objs)
+ dataframes[MapLayer.ROAD_LINE] = _get_none_data()
+
+ map_file_name = map_path
+ for layer, gdf in dataframes.items():
+ gdf.to_file(map_file_name, layer=layer.serialize(), driver="GPKG", mode="a")
+
+def _get_none_data() -> gpd.GeoDataFrame:
+ ids = []
+ geometries = []
+ data = pd.DataFrame({"id": ids})
+ gdf = gpd.GeoDataFrame(data, geometry=geometries)
+ return gdf
+
+def _extract_generic_drivable_df(objs: list[KITTI360_MAP_Bbox3D]) -> gpd.GeoDataFrame:
+ ids: List[int] = []
+ outlines: List[geom.LineString] = []
+ geometries: List[geom.Polygon] = []
+ for obj in objs:
+ if obj.label != "road":
+ continue
+ ids.append(obj.id)
+ outlines.append(obj.vertices.linestring)
+ geometries.append(geom.Polygon(obj.vertices.array[:, :2]))
+ data = pd.DataFrame({"id": ids, "outline": outlines})
+ gdf = gpd.GeoDataFrame(data, geometry=geometries)
+ return gdf
+
+def _extract_walkway_df(objs: list[KITTI360_MAP_Bbox3D]) -> gpd.GeoDataFrame:
+ ids: List[int] = []
+ outlines: List[geom.LineString] = []
+ geometries: List[geom.Polygon] = []
+ for obj in objs:
+ if obj.label != "sidewalk":
+ continue
+ ids.append(obj.id)
+ outlines.append(obj.vertices.linestring)
+ geometries.append(geom.Polygon(obj.vertices.array[:, :2]))
+
+ data = pd.DataFrame({"id": ids, "outline": outlines})
+ gdf = gpd.GeoDataFrame(data, geometry=geometries)
+ return gdf
+
+def _extract_road_edge_df(objs: list[KITTI360_MAP_Bbox3D]) -> gpd.GeoDataFrame:
+ geometries: List[geom.Polygon] = []
+ for obj in objs:
+ if obj.label != "road":
+ continue
+ geometries.append(geom.Polygon(obj.vertices.array[:, :2]))
+ road_edge_linear_rings = get_road_edge_linear_rings(geometries)
+ road_edges = split_line_geometry_by_max_length(road_edge_linear_rings, MAX_ROAD_EDGE_LENGTH)
+
+ ids = []
+ road_edge_types = []
+ for idx in range(len(road_edges)):
+ ids.append(idx)
+ # TODO @DanielDauner: Figure out if other types should/could be assigned here.
+ road_edge_types.append(int(RoadEdgeType.ROAD_EDGE_BOUNDARY))
+
+ data = pd.DataFrame({"id": ids, "road_edge_type": road_edge_types})
+ return gpd.GeoDataFrame(data, geometry=road_edges)
\ No newline at end of file
diff --git a/d123/dataset/dataset_specific/kitti_360/labels.py b/d123/dataset/dataset_specific/kitti_360/labels.py
index de24f152..6903be9f 100644
--- a/d123/dataset/dataset_specific/kitti_360/labels.py
+++ b/d123/dataset/dataset_specific/kitti_360/labels.py
@@ -169,6 +169,21 @@ def assureSingleInstanceName( name ):
from d123.common.datatypes.detection.detection_types import DetectionType
+BBOX_LABLES_TO_DETECTION_NAME_DICT = {
+ 'car': 'car',
+ 'truck': 'truck',
+ "bicycle": "bicycle",
+ "trafficLight": "traffic light",
+ "trailer": "trailer",
+ "bus": "bus",
+ "pedestrian": "person",
+ "motorcycle": "motorcycle",
+ "stop": "stop",
+ "trafficSign": "traffic sign",
+ "rider": "rider",
+ "caravan": "caravan",
+}
+
KIITI360_DETECTION_NAME_DICT = {
"traffic light": DetectionType.SIGN,
"traffic sign": DetectionType.SIGN,
diff --git a/d123/dataset/dataset_specific/kitti_360/load_sensor.py b/d123/dataset/dataset_specific/kitti_360/load_sensor.py
index 2a23401f..c4df6d36 100644
--- a/d123/dataset/dataset_specific/kitti_360/load_sensor.py
+++ b/d123/dataset/dataset_specific/kitti_360/load_sensor.py
@@ -1,12 +1,16 @@
from pathlib import Path
import numpy as np
+import logging
from d123.common.datatypes.sensor.lidar import LiDAR, LiDARMetadata
def load_kitti360_lidar_from_path(filepath: Path, lidar_metadata: LiDARMetadata) -> LiDAR:
- assert filepath.exists(), f"LiDAR file not found: {filepath}"
+ if not filepath.exists():
+ logging.warning(f"LiDAR file does not exist: {filepath}. Returning empty point cloud.")
+ return LiDAR(metadata=lidar_metadata, point_cloud=np.zeros((4, 0), dtype=np.float32))
+
pcd = np.fromfile(filepath, dtype=np.float32)
pcd = np.reshape(pcd,[-1,4]) # [N,4]
diff --git a/d123/dataset/dataset_specific/kitti_360/preprocess_detection.py b/d123/dataset/dataset_specific/kitti_360/preprocess_detection.py
index 5827e779..f2d14ce1 100644
--- a/d123/dataset/dataset_specific/kitti_360/preprocess_detection.py
+++ b/d123/dataset/dataset_specific/kitti_360/preprocess_detection.py
@@ -1,8 +1,8 @@
"""
-This script precomputes static detection records for KITTI-360:
+This script precomputes detection records for KITTI-360:
- Stage 1: radius filtering using ego positions (from poses.txt).
- Stage 2: LiDAR visibility check to fill per-frame point counts.
-It writes a pickle containing, for each static object, all feasible frames and
+It writes a pickle containing, for each object, all feasible frames and
their point counts to avoid recomputation in later pipelines.
We have precomputed and saved the pickle for all training logs, you can either
download them or run this script to generate
@@ -12,9 +12,11 @@
import os
import pickle
import logging
+import copy
from pathlib import Path
from typing import Dict, List, Tuple, Optional, Any
from collections import defaultdict
+import concurrent.futures
import numpy as np
import numpy.typing as npt
@@ -25,15 +27,16 @@
DIR_3D_BBOX = "data_3d_bboxes"
DIR_POSES = "data_poses"
-# PATH_3D_RAW_ROOT = KITTI360_DATA_ROOT / DIR_3D_RAW
-PATH_3D_RAW_ROOT = Path("/data/jbwang/d123/data_3d_raw/")
+PATH_3D_RAW_ROOT = KITTI360_DATA_ROOT / DIR_3D_RAW
PATH_3D_BBOX_ROOT = KITTI360_DATA_ROOT / DIR_3D_BBOX
PATH_POSES_ROOT = KITTI360_DATA_ROOT / DIR_POSES
-from d123.dataset.dataset_specific.kitti_360.kitti_360_helper import KITTI360Bbox3D, KITTI3602NUPLAN_IMU_CALIBRATION, get_lidar_extrinsic
-from d123.dataset.dataset_specific.kitti_360.labels import KIITI360_DETECTION_NAME_DICT, kittiId2label
+from d123.dataset.dataset_specific.kitti_360.kitti_360_helper import KITTI360Bbox3D, KITTI3602NUPLAN_IMU_CALIBRATION, get_lidar_extrinsic,interpolate_obj_list
+from d123.dataset.dataset_specific.kitti_360.labels import KIITI360_DETECTION_NAME_DICT, kittiId2label,BBOX_LABLES_TO_DETECTION_NAME_DICT
def _bbox_xml_path(log_name: str) -> Path:
+ if log_name == "2013_05_28_drive_0004_sync":
+ return PATH_3D_BBOX_ROOT / "train_full" / f"{log_name}.xml"
return PATH_3D_BBOX_ROOT / "train" / f"{log_name}.xml"
def _lidar_frame_path(log_name: str, frame_idx: int) -> Path:
@@ -44,24 +47,36 @@ def _load_lidar_xyz(filepath: Path) -> np.ndarray:
arr = np.fromfile(filepath, dtype=np.float32)
return arr.reshape(-1, 4)[:, :3]
-def _collect_static_objects(log_name: str) -> List[KITTI360Bbox3D]:
- """Parse XML and collect static objects with valid class names."""
+def _collect_objects(log_name: str) -> Tuple[List[KITTI360Bbox3D], Dict[int, List[KITTI360Bbox3D]]]:
+ """Parse XML and collect objects with valid class names."""
xml_path = _bbox_xml_path(log_name)
if not xml_path.exists():
raise FileNotFoundError(f"BBox 3D file not found: {xml_path}")
tree = ET.parse(xml_path)
root = tree.getroot()
- objs: List[KITTI360Bbox3D] = []
+
+ static_objs: List[KITTI360Bbox3D] = []
+ dynamic_objs: Dict[int, List[KITTI360Bbox3D]] = defaultdict(list)
+
for child in root:
- sem_id = int(child.find("semanticId").text)
- name = kittiId2label[sem_id].name
- timestamp = int(child.find('timestamp').text) # -1 for static objects
- if child.find("transform") is None or name not in KIITI360_DETECTION_NAME_DICT or timestamp != -1:
+ if child.find('semanticId') is not None:
+ semanticIdKITTI = int(child.find('semanticId').text)
+ name = kittiId2label[semanticIdKITTI].name
+ else:
+ lable = child.find('label').text
+ name = BBOX_LABLES_TO_DETECTION_NAME_DICT.get(lable, 'unknown')
+ if child.find("transform") is None or name not in KIITI360_DETECTION_NAME_DICT:
continue
obj = KITTI360Bbox3D()
obj.parseBbox(child)
- objs.append(obj)
- return objs
+ timestamp = int(child.find('timestamp').text)
+ if timestamp == -1:
+ static_objs.append(obj)
+ else:
+ global_ID = obj.globalID
+ dynamic_objs[global_ID].append(obj)
+
+ return static_objs, dynamic_objs
def _collect_ego_states(log_name: str,length: int) -> npt.NDArray[np.float64]:
"""Load ego states from poses.txt."""
@@ -105,14 +120,18 @@ def _collect_ego_states(log_name: str,length: int) -> npt.NDArray[np.float64]:
def process_detection(
log_name: str,
- radius_m: float = 50.0,
+ radius_m: float = 60.0,
output_dir: Optional[Path] = None,
) -> None:
"""
- Precompute static detections filtering:
+ Precompute detections filtering
+ for static objects:
1) filter by ego-centered radius over all frames
2) filter by LiDAR point cloud visibility
- Save per-frame static detections to a pickle to avoid recomputation.
+ for dynamic objects:
+ 1) interpolate boxes for missing frames
+ 2) select box with highest LiDAR point count
+ Save per-frame detections to a pickle to avoid recomputation.
"""
lidar_dir = PATH_3D_RAW_ROOT / log_name / "velodyne_points" / "data"
@@ -121,21 +140,36 @@ def process_detection(
ts_len = len(list(lidar_dir.glob("*.bin")))
logging.info(f"[preprocess] {log_name}: found {ts_len} lidar frames")
- # 1) Parse static objects from XML
- static_objs = _collect_static_objects(log_name)
+ # 1) Parse objects from XML
+ static_objs: List[KITTI360Bbox3D]
+ dynamic_objs: Dict[int, List[KITTI360Bbox3D]]
+ static_objs, dynamic_objs = _collect_objects(log_name)
+
+ # only interpolate dynamic objects
+ for global_ID, obj_list in dynamic_objs.items():
+ obj_list_interpolated = interpolate_obj_list(obj_list)
+ dynamic_objs[global_ID] = obj_list_interpolated
+ dymanic_objs_updated = copy.deepcopy(dynamic_objs)
+
logging.info(f"[preprocess] {log_name}: static objects = {len(static_objs)}")
+ logging.info(f"[preprocess] {log_name}: dynamic objects = {len(dynamic_objs.keys())}")
- # 2) Filter by ego-centered radius
+ # 2) Filter static objs by ego-centered radius
ego_states = _collect_ego_states(log_name,ts_len)
logging.info(f"[preprocess] {log_name}: ego states = {len(ego_states)}")
for obj in static_objs:
obj.filter_by_radius(ego_states[:, :3, 3], radius_m)
- # 3) Filter by LiDAR point cloud visibility
+ # 3) Filter static objs by LiDAR point cloud visibility
lidar_extrinsic = get_lidar_extrinsic()
- for time_idx in range(ts_len):
+
+ def process_one_frame(time_idx: int) -> None:
logging.info(f"[preprocess] {log_name}: t={time_idx}")
lidar_path = _lidar_frame_path(log_name, time_idx)
+ if not lidar_path.exists():
+ logging.warning(f"[preprocess] {log_name}: LiDAR frame not found: {lidar_path}")
+ return
+
lidar_xyz = _load_lidar_xyz(lidar_path)
# lidar to pose
@@ -158,17 +192,53 @@ def process_detection(
record["points_in_box"] = points_in_box
break
+ # for dynamic objects, select the box with the highest LiDAR point count
+ for global_ID, obj_list in dynamic_objs.items():
+ obj_at_time = [obj for obj in obj_list if obj.timestamp == time_idx]
+ if not obj_at_time:
+ continue
+
+ obj = obj_at_time[0]
+ # NOTE only update interpolated boxes
+ if not obj.is_interpolated:
+ continue
+
+ max_points = -1
+ best_obj = None
+ ts_prev = obj.idx_prev
+ ts_next = obj.idx_next
+ candidates = [candidate for candidate in obj_list if ts_prev <= candidate.timestamp <= ts_next]
+
+ for obj in candidates:
+ visible, points_in_box = obj.box_visible_in_point_cloud(lidar_in_world)
+ if points_in_box > max_points:
+ max_points = points_in_box
+ best_obj = obj
+
+ if best_obj is not None:
+ idx = next((i for i, o in enumerate(dynamic_objs[global_ID]) if o.timestamp == time_idx), None)
+ if idx is not None:
+ dymanic_objs_updated[global_ID][idx] = copy.deepcopy(best_obj)
+ dymanic_objs_updated[global_ID][idx].timestamp = time_idx
+
+ max_workers = os.cpu_count() * 2
+ with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor:
+ results = list(executor.map(process_one_frame, range(ts_len)))
+
# 4) Save pickle
- records: List[Dict[str, Any]] = []
+ static_records: List[Dict[str, Any]] = []
for obj in static_objs:
- records.append(obj.valid_frames)
+ static_records.append(obj.valid_frames)
+
if output_dir is None:
output_dir = PATH_3D_BBOX_ROOT / "preprocess"
output_dir.mkdir(parents=True, exist_ok=True)
out_path = output_dir / f"{log_name}_detection_preprocessed.pkl"
+
payload = {
"log_name": log_name,
- "records": records
+ "static": static_records,
+ "dynamic": dymanic_objs_updated
}
with open(out_path, "wb") as f:
pickle.dump(payload, f)
@@ -177,11 +247,12 @@ def process_detection(
if __name__ == "__main__":
import argparse
logging.basicConfig(level=logging.INFO)
- parser = argparse.ArgumentParser(description="Precompute KITTI-360 static detections filters")
- parser.add_argument("--log_name", default="2013_05_28_drive_0007_sync")
+ parser = argparse.ArgumentParser(description="Precompute KITTI-360 detections filters")
+ parser.add_argument("--log_name", default="2013_05_28_drive_0004_sync")
parser.add_argument("--radius", type=float, default=60.0)
parser.add_argument("--out", type=Path, default="detection_preprocess", help="output directory for pkl")
args = parser.parse_args()
+
process_detection(
log_name=args.log_name,
radius_m=args.radius,
diff --git a/d123/script/config/dataset_conversion/default_dataset_conversion.yaml b/d123/script/config/dataset_conversion/default_dataset_conversion.yaml
index 52915f13..2c474fe8 100644
--- a/d123/script/config/dataset_conversion/default_dataset_conversion.yaml
+++ b/d123/script/config/dataset_conversion/default_dataset_conversion.yaml
@@ -22,4 +22,4 @@ defaults:
- kitti360_dataset
force_log_conversion: True
-force_map_conversion: False
+force_map_conversion: True
From 49fa542fde116d497956cfe597efa466668e4f92 Mon Sep 17 00:00:00 2001
From: DanielDauner
Date: Wed, 24 Sep 2025 09:18:58 +0200
Subject: [PATCH 046/145] Further removal of EulerStateSE3 (#43)
---
d123/datasets/wopd/wopd_data_converter.py | 57 ++++++++++-------------
d123/geometry/se.py | 29 ++++++++----
2 files changed, 45 insertions(+), 41 deletions(-)
diff --git a/d123/datasets/wopd/wopd_data_converter.py b/d123/datasets/wopd/wopd_data_converter.py
index 30c5cdc1..f3353a6a 100644
--- a/d123/datasets/wopd/wopd_data_converter.py
+++ b/d123/datasets/wopd/wopd_data_converter.py
@@ -5,12 +5,11 @@
from dataclasses import asdict
from functools import partial
from pathlib import Path
-from typing import Any, Dict, Final, List, Literal, Tuple, Union
+from typing import Any, Dict, Final, List, Tuple, Union
import numpy as np
import numpy.typing as npt
import pyarrow as pa
-from pyquaternion import Quaternion
from d123.common.multithreading.worker_utils import WorkerPool, worker_map
from d123.common.utils.dependencies import check_dependencies
@@ -24,8 +23,8 @@
from d123.datatypes.sensors.lidar_index import WopdLidarIndex
from d123.datatypes.vehicle_state.ego_state import DynamicStateSE3, EgoStateSE3, EgoStateSE3Index
from d123.datatypes.vehicle_state.vehicle_parameters import get_wopd_chrysler_pacifica_parameters
-from d123.geometry import BoundingBoxSE3Index, EulerStateSE3, Point3D, Vector3D, Vector3DIndex
-from d123.geometry.transform.transform_euler_se3 import convert_relative_to_absolute_euler_se3_array
+from d123.geometry import BoundingBoxSE3Index, EulerAngles, StateSE3, Vector3D, Vector3DIndex
+from d123.geometry.transform.transform_se3 import convert_relative_to_absolute_se3_array
from d123.geometry.utils.constants import DEFAULT_PITCH, DEFAULT_ROLL
check_dependencies(modules=["tensorflow", "waymo_open_dataset"], optional_name="waymo")
@@ -74,7 +73,7 @@
WOPD_DATA_ROOT = Path("/media/nvme1/waymo_perception") # TODO: set as environment variable !!!!
# Whether to use ego or zero roll and pitch values for bounding box detections (after global conversion)
-DETECTION_ROLL_PITCH: Final[Literal["ego", "zero"]] = "zero"
+ZERO_ROLL_PITCH: Final[bool] = True
def create_token(input_data: str) -> str:
@@ -383,16 +382,12 @@ def _write_recording_table(
del batch, row_data, detections_state, detections_velocity, detections_token, detections_types
-def _get_ego_pose_se3(frame: dataset_pb2.Frame) -> EulerStateSE3:
- ego_pose_matrix = np.array(frame.pose.transform).reshape(4, 4)
- yaw, pitch, roll = Quaternion(matrix=ego_pose_matrix[:3, :3]).yaw_pitch_roll
- ego_point_3d = Point3D.from_array(ego_pose_matrix[:3, 3])
-
- return EulerStateSE3(x=ego_point_3d.x, y=ego_point_3d.y, z=ego_point_3d.z, roll=roll, pitch=pitch, yaw=yaw)
+def _get_ego_pose_se3(frame: dataset_pb2.Frame) -> StateSE3:
+ ego_pose_matrix = np.array(frame.pose.transform, dtype=np.float64).reshape(4, 4)
+ return StateSE3.from_transformation_matrix(ego_pose_matrix)
def _extract_detections(frame: dataset_pb2.Frame) -> Tuple[List[List[float]], List[List[float]], List[str], List[int]]:
- # TODO: implement
ego_rear_axle = _get_ego_pose_se3(frame)
@@ -406,13 +401,22 @@ def _extract_detections(frame: dataset_pb2.Frame) -> Tuple[List[List[float]], Li
if detection.type not in WOPD_DETECTION_NAME_DICT:
continue
- # 1. SS3 Bounding Box
+ # 1. Quaternion rotations
+ # NOTE: WOPD bounding boxes are (1) stored in ego frame and (2) only supply yaw rotation
+ # The global pose can either consider ego roll and pitch or set them to zero.
+ # (zero roll/pitch corresponds to setting it to the ego roll/pitch, before transformation to global frame)
+ #
+ detection_quaternion = EulerAngles(
+ roll=ego_rear_axle.roll if ZERO_ROLL_PITCH else DEFAULT_ROLL,
+ pitch=ego_rear_axle.pitch if ZERO_ROLL_PITCH else DEFAULT_PITCH,
+ yaw=detection.box.heading,
+ ).quaternion
+
+ # 2. Fill SE3 Bounding Box
detections_state[detection_idx, BoundingBoxSE3Index.X] = detection.box.center_x
detections_state[detection_idx, BoundingBoxSE3Index.Y] = detection.box.center_y
detections_state[detection_idx, BoundingBoxSE3Index.Z] = detection.box.center_z
- detections_state[detection_idx, BoundingBoxSE3Index.ROLL] = DEFAULT_ROLL # not provided in WOPD
- detections_state[detection_idx, BoundingBoxSE3Index.PITCH] = DEFAULT_PITCH # not provided in WOPD
- detections_state[detection_idx, BoundingBoxSE3Index.YAW] = detection.box.heading
+ detections_state[detection_idx, BoundingBoxSE3Index.QUATERNION] = detection_quaternion
detections_state[detection_idx, BoundingBoxSE3Index.LENGTH] = detection.box.length
detections_state[detection_idx, BoundingBoxSE3Index.WIDTH] = detection.box.width
detections_state[detection_idx, BoundingBoxSE3Index.HEIGHT] = detection.box.height
@@ -428,17 +432,9 @@ def _extract_detections(frame: dataset_pb2.Frame) -> Tuple[List[List[float]], Li
detections_token.append(str(detection.id))
detections_types.append(int(WOPD_DETECTION_NAME_DICT[detection.type]))
- detections_state[:, BoundingBoxSE3Index.STATE_SE3] = convert_relative_to_absolute_euler_se3_array(
+ detections_state[:, BoundingBoxSE3Index.STATE_SE3] = convert_relative_to_absolute_se3_array(
origin=ego_rear_axle, se3_array=detections_state[:, BoundingBoxSE3Index.STATE_SE3]
)
- if DETECTION_ROLL_PITCH == "ego":
- pass
- if DETECTION_ROLL_PITCH == "zero":
- detections_state[:, BoundingBoxSE3Index.ROLL] = DEFAULT_ROLL
- detections_state[:, BoundingBoxSE3Index.PITCH] = DEFAULT_PITCH
- else:
- raise ValueError(f"Invalid DETECTION_ROLL_PITCH value: {DETECTION_ROLL_PITCH}. Must be 'ego' or 'zero'.")
-
return detections_state.tolist(), detections_velocity.tolist(), detections_token, detections_types
@@ -484,17 +480,12 @@ def _extract_camera(
transform = np.array(calibration.extrinsic.transform).reshape(4, 4)
# FIXME: This is an ugly hack to convert to uniform camera convention.
- flip_camera = EulerStateSE3(
- x=0.0,
- y=0.0,
- z=0.0,
- roll=np.deg2rad(0.0),
- pitch=np.deg2rad(90.0),
- yaw=np.deg2rad(-90.0),
- ).rotation_matrix
+ # TODO: Extract function to convert between different camera conventions.
+ flip_camera = EulerAngles(roll=np.deg2rad(0.0), pitch=np.deg2rad(90.0), yaw=np.deg2rad(-90.0)).rotation_matrix
transform[:3, :3] = transform[:3, :3] @ flip_camera
context_extrinsic[camera_type] = transform
+ # TODO: Refactor to avoid code duplication
for image_proto in frame.images:
camera_type = WOPD_CAMERA_TYPES[image_proto.name]
diff --git a/d123/geometry/se.py b/d123/geometry/se.py
index a35b9c4c..213dca2f 100644
--- a/d123/geometry/se.py
+++ b/d123/geometry/se.py
@@ -139,6 +139,20 @@ def from_array(cls, array: npt.NDArray[np.float64], copy: bool = True) -> StateS
object.__setattr__(instance, "_array", array.copy() if copy else array)
return instance
+ @classmethod
+ def from_transformation_matrix(cls, transformation_matrix: npt.NDArray[np.float64]) -> StateSE3:
+ """Constructs a StateSE3 from a 4x4 transformation matrix.
+
+ :param transformation_matrix: A 4x4 numpy array representing the transformation matrix.
+ :return: A StateSE3 instance.
+ """
+ assert transformation_matrix.ndim == 2
+ assert transformation_matrix.shape == (4, 4)
+ array = np.zeros(len(StateSE3Index), dtype=np.float64)
+ array[StateSE3Index.XYZ] = transformation_matrix[:3, :3]
+ array[StateSE3Index.QUATERNION] = Quaternion.from_rotation_matrix(transformation_matrix[:3, :3])
+ return StateSE3.from_array(array)
+
@property
def x(self) -> float:
"""Returns the x-coordinate of the quaternion.
@@ -332,18 +346,17 @@ def from_array(cls, array: npt.NDArray[np.float64], copy: bool = True) -> EulerS
return instance
@classmethod
- def from_transformation_matrix(cls, array: npt.NDArray[np.float64]) -> EulerStateSE3:
- """Constructs a StateSE3 from a 4x4 transformation matrix.
+ def from_transformation_matrix(cls, transformation_matrix: npt.NDArray[np.float64]) -> EulerStateSE3:
+ """Constructs a EulerStateSE3 from a 4x4 transformation matrix.
:param array: A 4x4 numpy array representing the transformation matrix.
- :return: A StateSE3 instance.
+ :return: A EulerStateSE3 instance.
"""
- assert array.ndim == 2
- assert array.shape == (4, 4)
- translation = array[:3, 3]
- rotation = array[:3, :3]
+ assert transformation_matrix.ndim == 2
+ assert transformation_matrix.shape == (4, 4)
+ translation = transformation_matrix[:3, 3]
+ rotation = transformation_matrix[:3, :3]
roll, pitch, yaw = EulerAngles.from_rotation_matrix(rotation)
-
return EulerStateSE3(
x=translation[Point3DIndex.X],
y=translation[Point3DIndex.Y],
From cdc5cd8be46a298c8312c8d9d69c6f4385317400 Mon Sep 17 00:00:00 2001
From: DanielDauner
Date: Wed, 24 Sep 2025 09:43:50 +0200
Subject: [PATCH 047/145] Config adaptations and visualizations to account for
recent refactoring (#39)
---
.../visualization/matplotlib/observation.py | 14 ++++++------
d123/datasets/wopd/wopd_data_converter.py | 2 +-
.../default_dataset_conversion.yaml | 4 ++--
.../config/datasets/av2_sensor_dataset.yaml | 4 ++--
.../script/config/datasets/carla_dataset.yaml | 4 ++--
.../config/datasets/nuplan_dataset.yaml | 4 ++--
.../config/datasets/nuplan_mini_dataset.yaml | 4 ++--
.../datasets/nuplan_private_dataset.yaml | 4 ++--
d123/script/config/datasets/wopd_dataset.yaml | 4 ++--
notebooks/av2/delete_me.ipynb | 10 ++++-----
notebooks/av2/delete_me_map.ipynb | 2 +-
notebooks/deprecated/extraction_testing.ipynb | 2 +-
.../test_nuplan_conversion.ipynb | 2 +-
notebooks/deprecated/test_waypoints.ipynb | 2 +-
notebooks/gym/test_gym.ipynb | 2 +-
notebooks/nuplan/nuplan_sensor_loading.ipynb | 2 +-
notebooks/scene_rendering.ipynb | 4 ++--
notebooks/viz/bev_matplotlib.ipynb | 22 ++++++++++---------
notebooks/waymo_perception/map_testing.ipynb | 2 +-
19 files changed, 48 insertions(+), 46 deletions(-)
diff --git a/d123/common/visualization/matplotlib/observation.py b/d123/common/visualization/matplotlib/observation.py
index eb37ad74..ed46c6a9 100644
--- a/d123/common/visualization/matplotlib/observation.py
+++ b/d123/common/visualization/matplotlib/observation.py
@@ -4,9 +4,6 @@
import numpy as np
import shapely.geometry as geom
-from d123.common.datatypes.detection.detection import BoxDetectionWrapper, TrafficLightDetectionWrapper
-from d123.common.datatypes.detection.detection_types import DetectionType
-from d123.common.datatypes.vehicle_state.ego_state import EgoStateSE2, EgoStateSE3
from d123.common.visualization.color.config import PlotConfig
from d123.common.visualization.color.default import (
BOX_DETECTION_CONFIG,
@@ -22,10 +19,13 @@
get_pose_triangle,
shapely_geometry_local_coords,
)
-from d123.datasets.maps.abstract_map import AbstractMap
-from d123.datasets.maps.abstract_map_objects import AbstractLane
-from d123.datasets.maps.map_datatypes import MapLayer
-from d123.datasets.scene.abstract_scene import AbstractScene
+from d123.datatypes.detections.detection import BoxDetectionWrapper, TrafficLightDetectionWrapper
+from d123.datatypes.detections.detection_types import DetectionType
+from d123.datatypes.maps.abstract_map import AbstractMap
+from d123.datatypes.maps.abstract_map_objects import AbstractLane
+from d123.datatypes.maps.map_datatypes import MapLayer
+from d123.datatypes.scene.abstract_scene import AbstractScene
+from d123.datatypes.vehicle_state.ego_state import EgoStateSE2, EgoStateSE3
from d123.geometry import BoundingBoxSE2, BoundingBoxSE3, Point2D
from d123.geometry.geometry_index import StateSE2Index
from d123.geometry.transform.transform_se2 import translate_se2_along_body_frame
diff --git a/d123/datasets/wopd/wopd_data_converter.py b/d123/datasets/wopd/wopd_data_converter.py
index f3353a6a..95649218 100644
--- a/d123/datasets/wopd/wopd_data_converter.py
+++ b/d123/datasets/wopd/wopd_data_converter.py
@@ -10,13 +10,13 @@
import numpy as np
import numpy.typing as npt
import pyarrow as pa
+from fromd123.datatypes.detections.detection_types import DetectionType
from d123.common.multithreading.worker_utils import WorkerPool, worker_map
from d123.common.utils.dependencies import check_dependencies
from d123.datasets.raw_data_converter import DataConverterConfig, RawDataConverter
from d123.datasets.wopd.waymo_map_utils.wopd_map_utils import convert_wopd_map
from d123.datasets.wopd.wopd_utils import parse_range_image_and_camera_projection
-from d123.datatypes.detections.detection_types import DetectionType
from d123.datatypes.scene.scene_metadata import LogMetadata
from d123.datatypes.sensors.camera import CameraMetadata, CameraType, camera_metadata_dict_to_json
from d123.datatypes.sensors.lidar import LiDARMetadata, LiDARType, lidar_metadata_dict_to_json
diff --git a/d123/script/config/dataset_conversion/default_dataset_conversion.yaml b/d123/script/config/dataset_conversion/default_dataset_conversion.yaml
index b46e7c9d..be97439e 100644
--- a/d123/script/config/dataset_conversion/default_dataset_conversion.yaml
+++ b/d123/script/config/dataset_conversion/default_dataset_conversion.yaml
@@ -15,9 +15,9 @@ defaults:
- default_dataset_paths
- _self_
- datasets:
- # - nuplan_private_dataset
+ - nuplan_private_dataset
# - carla_dataset
- - wopd_dataset
+ # - wopd_dataset
# - av2_sensor_dataset
force_log_conversion: True
diff --git a/d123/script/config/datasets/av2_sensor_dataset.yaml b/d123/script/config/datasets/av2_sensor_dataset.yaml
index 58a64f7a..d567a175 100644
--- a/d123/script/config/datasets/av2_sensor_dataset.yaml
+++ b/d123/script/config/datasets/av2_sensor_dataset.yaml
@@ -1,12 +1,12 @@
av2_sensor_dataset:
- _target_: d123.dataset.dataset_specific.av2.av2_data_converter.AV2SensorDataConverter
+ _target_: d123.datasets.av2.av2_data_converter.AV2SensorDataConverter
_convert_: 'all'
splits: ["av2-sensor-mini_train"]
log_path: "/mnt/elements_0/argoverse"
data_converter_config:
- _target_: d123.dataset.dataset_specific.raw_data_converter.DataConverterConfig
+ _target_: d123.datasets.raw_data_converter.DataConverterConfig
_convert_: 'all'
output_path: ${d123_data_root}
diff --git a/d123/script/config/datasets/carla_dataset.yaml b/d123/script/config/datasets/carla_dataset.yaml
index 31184f78..14160a0c 100644
--- a/d123/script/config/datasets/carla_dataset.yaml
+++ b/d123/script/config/datasets/carla_dataset.yaml
@@ -1,12 +1,12 @@
carla_dataset:
- _target_: d123.dataset.dataset_specific.carla.carla_data_converter.CarlaDataConverter
+ _target_: d123.datasets.carla.carla_data_converter.CarlaDataConverter
_convert_: 'all'
splits: ["carla"]
log_path: "${oc.env:HOME}/carla_workspace/data"
data_converter_config:
- _target_: d123.dataset.dataset_specific.raw_data_converter.DataConverterConfig
+ _target_: d123.datasets.raw_data_converter.DataConverterConfig
_convert_: 'all'
output_path: ${d123_data_root}
diff --git a/d123/script/config/datasets/nuplan_dataset.yaml b/d123/script/config/datasets/nuplan_dataset.yaml
index 8c104287..6bcca6fb 100644
--- a/d123/script/config/datasets/nuplan_dataset.yaml
+++ b/d123/script/config/datasets/nuplan_dataset.yaml
@@ -1,12 +1,12 @@
nuplan_dataset:
- _target_: d123.dataset.dataset_specific.nuplan.nuplan_data_converter.NuplanDataConverter
+ _target_: d123.datasets.nuplan.nuplan_data_converter.NuplanDataConverter
_convert_: 'all'
splits: ["nuplan_train", "nuplan_val", "nuplan_test"]
log_path: ${oc.env:NUPLAN_DATA_ROOT}/nuplan-v1.1/splits # NOTE: folder including [mini, trainval, test], sometimes not inside "splits" folder
data_converter_config:
- _target_: d123.dataset.dataset_specific.raw_data_converter.DataConverterConfig
+ _target_: d123.datasets.raw_data_converter.DataConverterConfig
_convert_: 'all'
output_path: ${d123_data_root}
diff --git a/d123/script/config/datasets/nuplan_mini_dataset.yaml b/d123/script/config/datasets/nuplan_mini_dataset.yaml
index 1fdb2b54..a371d34d 100644
--- a/d123/script/config/datasets/nuplan_mini_dataset.yaml
+++ b/d123/script/config/datasets/nuplan_mini_dataset.yaml
@@ -1,5 +1,5 @@
nuplan_mini_dataset:
- _target_: d123.dataset.dataset_specific.nuplan.nuplan_data_converter.NuplanDataConverter
+ _target_: d123.datasets.nuplan.nuplan_data_converter.NuplanDataConverter
_convert_: 'all'
@@ -7,7 +7,7 @@ nuplan_mini_dataset:
log_path: ${oc.env:NUPLAN_DATA_ROOT}/nuplan-v1.1/splits # NOTE: folder including [mini, trainval, test], sometimes not inside "splits" folder
data_converter_config:
- _target_: d123.dataset.dataset_specific.raw_data_converter.DataConverterConfig
+ _target_: d123.datasets.raw_data_converter.DataConverterConfig
_convert_: 'all'
output_path: ${d123_data_root}
diff --git a/d123/script/config/datasets/nuplan_private_dataset.yaml b/d123/script/config/datasets/nuplan_private_dataset.yaml
index 399dcb7e..7062f38f 100644
--- a/d123/script/config/datasets/nuplan_private_dataset.yaml
+++ b/d123/script/config/datasets/nuplan_private_dataset.yaml
@@ -1,12 +1,12 @@
nuplan_private_dataset:
- _target_: d123.dataset.dataset_specific.nuplan.nuplan_data_converter.NuplanDataConverter
+ _target_: d123.datasets.nuplan.nuplan_data_converter.NuplanDataConverter
_convert_: 'all'
splits: ["nuplan_private_test"]
log_path: ${oc.env:NUPLAN_DATA_ROOT}/nuplan-v1.1/splits # NOTE: folder including [mini, trainval, test], sometimes not inside "splits" folder
data_converter_config:
- _target_: d123.dataset.dataset_specific.raw_data_converter.DataConverterConfig
+ _target_: d123.datasets.raw_data_converter.DataConverterConfig
_convert_: 'all'
output_path: ${d123_data_root}
diff --git a/d123/script/config/datasets/wopd_dataset.yaml b/d123/script/config/datasets/wopd_dataset.yaml
index 1abb0381..906aef03 100644
--- a/d123/script/config/datasets/wopd_dataset.yaml
+++ b/d123/script/config/datasets/wopd_dataset.yaml
@@ -1,12 +1,12 @@
wopd_dataset:
- _target_: d123.dataset.dataset_specific.wopd.wopd_data_converter.WOPDDataConverter
+ _target_: d123.datasets.wopd.wopd_data_converter.WOPDDataConverter
_convert_: 'all'
splits: ["wopd_train"]
log_path: null # TODO: implement
data_converter_config:
- _target_: d123.dataset.dataset_specific.raw_data_converter.DataConverterConfig
+ _target_: d123.datasets.raw_data_converter.DataConverterConfig
_convert_: 'all'
output_path: ${d123_data_root}
diff --git a/notebooks/av2/delete_me.ipynb b/notebooks/av2/delete_me.ipynb
index 434287f0..8d224181 100644
--- a/notebooks/av2/delete_me.ipynb
+++ b/notebooks/av2/delete_me.ipynb
@@ -126,8 +126,8 @@
"# # # 4. sensors\n",
"# # print(_ls(log_folder))\n",
"\n",
- "# # from d123.dataset.dataset_specific.av2.av2_data_converter import AV2SensorDataConverter\n",
- "# from d123.dataset.dataset_specific.av2.av2_data_converter import AV2SensorDataConverter\n",
+ "# # from d123.datasets.av2.av2_data_converter import AV2SensorDataConverter\n",
+ "# from d123.datasets.av2.av2_data_converter import AV2SensorDataConverter\n",
"\n",
"# # AV2SensorDataConverter([])"
]
@@ -197,8 +197,8 @@
"# Testing sensor syn dataframes\n",
"\n",
"from typing import Optional\n",
- "from d123.dataset.dataset_specific.av2.av2_constants import AV2_CAMERA_TYPE_MAPPING\n",
- "from d123.dataset.dataset_specific.av2.av2_helper import build_sensor_dataframe, build_synchronization_dataframe\n",
+ "from d123.datasets.av2.av2_constants import AV2_CAMERA_TYPE_MAPPING\n",
+ "from d123.datasets.av2.av2_helper import build_sensor_dataframe, build_synchronization_dataframe\n",
"\n",
"\n",
"sensor_df = build_sensor_dataframe(log_folder)\n",
@@ -384,7 +384,7 @@
"\n",
"\n",
"from pyquaternion import Quaternion\n",
- "from d123.common.datatypes.detection.detection_types import DetectionType\n",
+ "d123.datatypes.detections.detection_types import DetectionType\n",
"from d123.geometry.base import StateSE2\n",
"from d123.geometry.bounding_box import BoundingBoxSE2\n",
"from d123.common.visualization.color.config import PlotConfig\n",
diff --git a/notebooks/av2/delete_me_map.ipynb b/notebooks/av2/delete_me_map.ipynb
index 901c232c..2e8dba42 100644
--- a/notebooks/av2/delete_me_map.ipynb
+++ b/notebooks/av2/delete_me_map.ipynb
@@ -59,7 +59,7 @@
"from typing import Dict, List\n",
"\n",
"from d123.geometry.line.polylines import Polyline3D\n",
- "from d123.dataset.dataset_specific.av2.av2_map_conversion import _extract_lane_group_dict\n",
+ "from d123.datasets.av2.av2_map_conversion import _extract_lane_group_dict\n",
"\n",
"\n",
"def _extract_polyline(data: List[Dict[str, float]], close: bool = False) -> Polyline3D:\n",
diff --git a/notebooks/deprecated/extraction_testing.ipynb b/notebooks/deprecated/extraction_testing.ipynb
index d782d7ba..b4c2d5bb 100644
--- a/notebooks/deprecated/extraction_testing.ipynb
+++ b/notebooks/deprecated/extraction_testing.ipynb
@@ -17,7 +17,7 @@
"from d123.common.multithreading.worker_pool import WorkerPool\n",
"\n",
"from d123.dataset.arrow.helper import open_arrow_arrow_table\n",
- "from d123.dataset.dataset_specific.nuplan.nuplan_data_processor import worker_map\n",
+ "from d123.datasets.nuplan.nuplan_data_processor import worker_map\n",
"from d123.dataset.logs.log_metadata import LogMetadata\n",
"from d123.dataset.scene.abstract_scene import AbstractScene\n",
"from d123.dataset.scene.arrow_scene import ArrowScene, SceneExtractionInfo\n",
diff --git a/notebooks/deprecated/map_conversion/test_nuplan_conversion.ipynb b/notebooks/deprecated/map_conversion/test_nuplan_conversion.ipynb
index 32805c97..8c9339ad 100644
--- a/notebooks/deprecated/map_conversion/test_nuplan_conversion.ipynb
+++ b/notebooks/deprecated/map_conversion/test_nuplan_conversion.ipynb
@@ -7,7 +7,7 @@
"outputs": [],
"source": [
"from pathlib import Path\n",
- "from d123.dataset.dataset_specific.nuplan.nuplan_map_conversion import NuPlanMapConverter, MAP_LOCATIONS\n",
+ "from d123.datasets.nuplan.nuplan_map_conversion import NuPlanMapConverter, MAP_LOCATIONS\n",
"\n",
"\n",
"\n",
diff --git a/notebooks/deprecated/test_waypoints.ipynb b/notebooks/deprecated/test_waypoints.ipynb
index 86d2ea9c..cdafea69 100644
--- a/notebooks/deprecated/test_waypoints.ipynb
+++ b/notebooks/deprecated/test_waypoints.ipynb
@@ -60,7 +60,7 @@
"from shapely.geometry import LineString\n",
"import numpy as np\n",
"from matplotlib import pyplot as plt\n",
- "from d123.dataset.dataset_specific.carla.carla_data_processor import _load_json_gz \n",
+ "from d123.datasets.carla.carla_data_processor import _load_json_gz \n",
"from d123.common.visualization.matplotlib.plots import _plot_scene_on_ax\n",
"json_dict = _load_json_gz(\"/home/daniel/carla_workspace/data/_Rep0_longest1_route0_06_13_17_21_21/boxes/0000000002.json.gz\")\n",
"json_dict\n",
diff --git a/notebooks/gym/test_gym.ipynb b/notebooks/gym/test_gym.ipynb
index c49f505c..a9e89681 100644
--- a/notebooks/gym/test_gym.ipynb
+++ b/notebooks/gym/test_gym.ipynb
@@ -89,7 +89,7 @@
")\n",
"from d123.dataset.arrow.conversion import TrafficLightDetectionWrapper\n",
"from d123.dataset.maps.abstract_map import AbstractMap\n",
- "from d123.common.datatypes.detection.detection import BoxDetectionWrapper\n",
+ "d123.datatypes.detections.detection import BoxDetectionWrapper\n",
"from d123.dataset.scene.abstract_scene import AbstractScene\n",
"import io\n",
"from PIL import Image\n",
diff --git a/notebooks/nuplan/nuplan_sensor_loading.ipynb b/notebooks/nuplan/nuplan_sensor_loading.ipynb
index 3fc654d8..939097bf 100644
--- a/notebooks/nuplan/nuplan_sensor_loading.ipynb
+++ b/notebooks/nuplan/nuplan_sensor_loading.ipynb
@@ -35,7 +35,7 @@
"metadata": {},
"outputs": [],
"source": [
- "from d123.dataset.dataset_specific.nuplan.nuplan_data_converter import NuplanDataConverter"
+ "from d123.datasets.nuplan.nuplan_data_converter import NuplanDataConverter"
]
},
{
diff --git a/notebooks/scene_rendering.ipynb b/notebooks/scene_rendering.ipynb
index 4436e7ea..25018fda 100644
--- a/notebooks/scene_rendering.ipynb
+++ b/notebooks/scene_rendering.ipynb
@@ -61,8 +61,8 @@
"outputs": [],
"source": [
"from typing import Tuple\n",
- "from d123.common.datatypes.detection.detection import BoxDetection\n",
- "from d123.common.datatypes.detection.detection_types import DYNAMIC_DETECTION_TYPES, STATIC_DETECTION_TYPES\n",
+ "d123.datatypes.detections.detection import BoxDetection\n",
+ "d123.datatypes.detections.detection_types import DYNAMIC_DETECTION_TYPES, STATIC_DETECTION_TYPES\n",
"from d123.geometry import StateSE2\n",
"from d123.geometry.transform.tranform_2d import translate_along_yaw\n",
"from d123.geometry.vector import Vector2D\n",
diff --git a/notebooks/viz/bev_matplotlib.ipynb b/notebooks/viz/bev_matplotlib.ipynb
index 3627a511..3f2e9f12 100644
--- a/notebooks/viz/bev_matplotlib.ipynb
+++ b/notebooks/viz/bev_matplotlib.ipynb
@@ -7,11 +7,11 @@
"metadata": {},
"outputs": [],
"source": [
- "from d123.dataset.scene.scene_builder import ArrowSceneBuilder\n",
- "from d123.dataset.scene.scene_filter import SceneFilter\n",
+ "from d123.datatypes.scene.arrow.arrow_scene_builder import ArrowSceneBuilder\n",
+ "from d123.datatypes.scene.scene_filter import SceneFilter\n",
"\n",
"from d123.common.multithreading.worker_sequential import Sequential\n",
- "from d123.common.datatypes.sensor.camera import CameraType"
+ "from d123.datatypes.sensors.camera import CameraType"
]
},
{
@@ -42,8 +42,8 @@
"\n",
"# splits = [\"wopd_train\"]\n",
"# splits = [\"carla\"]\n",
- "# splits = [\"nuplan_private_test\"]\n",
- "splits = [\"av2-sensor-mini_train\"]\n",
+ "splits = [\"nuplan_private_test\"]\n",
+ "# splits = [\"av2-sensor-mini_train\"]\n",
"# log_names = None\n",
"\n",
"\n",
@@ -76,8 +76,10 @@
"outputs": [],
"source": [
"from typing import List, Optional, Tuple\n",
+ "\n",
"import matplotlib.pyplot as plt\n",
"import numpy as np\n",
+ "\n",
"from d123.geometry import Point2D\n",
"from d123.common.visualization.color.color import BLACK, DARK_GREY, DARKER_GREY, LIGHT_GREY, NEW_TAB_10, TAB_10\n",
"from d123.common.visualization.color.config import PlotConfig\n",
@@ -89,11 +91,11 @@
" add_traffic_lights_to_ax,\n",
")\n",
"from d123.common.visualization.matplotlib.utils import add_shapely_linestring_to_ax, add_shapely_polygon_to_ax\n",
- "from d123.dataset.maps.abstract_map import AbstractMap\n",
- "from d123.dataset.maps.abstract_map_objects import AbstractLane, AbstractLaneGroup\n",
- "from d123.dataset.maps.gpkg.gpkg_map_objects import GPKGIntersection\n",
- "from d123.dataset.maps.map_datatypes import MapLayer\n",
- "from d123.dataset.scene.abstract_scene import AbstractScene\n",
+ "from d123.datatypes.maps.abstract_map import AbstractMap\n",
+ "from d123.datatypes.maps.abstract_map_objects import AbstractLane, AbstractLaneGroup\n",
+ "from d123.datatypes.maps.gpkg.gpkg_map_objects import GPKGIntersection\n",
+ "from d123.datatypes.maps.map_datatypes import MapLayer\n",
+ "from d123.datatypes.scene.abstract_scene import AbstractScene\n",
"\n",
"\n",
"import shapely.geometry as geom\n",
diff --git a/notebooks/waymo_perception/map_testing.ipynb b/notebooks/waymo_perception/map_testing.ipynb
index d7694c2d..ed242b08 100644
--- a/notebooks/waymo_perception/map_testing.ipynb
+++ b/notebooks/waymo_perception/map_testing.ipynb
@@ -241,7 +241,7 @@
"metadata": {},
"outputs": [],
"source": [
- "from d123.dataset.dataset_specific.wopd.wopd_map_utils import extract_lane_boundaries\n",
+ "from d123.datasets.wopd.wopd_map_utils import extract_lane_boundaries\n",
"\n",
"\n",
"left_boundaries, right_boundaries = extract_lane_boundaries(\n",
From 53c46e8123d129ba016841ce4fbe6493720e8d71 Mon Sep 17 00:00:00 2001
From: DanielDauner
Date: Wed, 24 Sep 2025 13:59:17 +0200
Subject: [PATCH 048/145] Fix some wrong import paths in hydra logs (#39)
---
.../config/common/scene_builder/default_scene_builder.yaml | 2 +-
d123/script/config/common/scene_filter/all_scenes.yaml | 2 +-
d123/script/config/common/scene_filter/log_scenes.yaml | 2 +-
d123/script/config/common/scene_filter/nuplan_mini_train.yaml | 2 +-
d123/script/config/common/scene_filter/nuplan_mini_val.yaml | 2 +-
d123/script/config/common/scene_filter/nuplan_sim_agent.yaml | 2 +-
6 files changed, 6 insertions(+), 6 deletions(-)
diff --git a/d123/script/config/common/scene_builder/default_scene_builder.yaml b/d123/script/config/common/scene_builder/default_scene_builder.yaml
index 2a8bfec7..5e42aaf4 100644
--- a/d123/script/config/common/scene_builder/default_scene_builder.yaml
+++ b/d123/script/config/common/scene_builder/default_scene_builder.yaml
@@ -1,4 +1,4 @@
-_target_: d123.dataset.scene.scene_builder.ArrowSceneBuilder
+_target_: d123.datatypes.scene.arrow.arrow_scene_builder.ArrowSceneBuilder
_convert_: 'all'
dataset_path: ${d123_data_root}
diff --git a/d123/script/config/common/scene_filter/all_scenes.yaml b/d123/script/config/common/scene_filter/all_scenes.yaml
index 06ac76d5..35e157a0 100644
--- a/d123/script/config/common/scene_filter/all_scenes.yaml
+++ b/d123/script/config/common/scene_filter/all_scenes.yaml
@@ -1,4 +1,4 @@
-_target_: d123.dataset.scene.scene_filter.SceneFilter
+_target_: d123.datatypes.scene.scene_filter.SceneFilter
_convert_: 'all'
split_types: null
diff --git a/d123/script/config/common/scene_filter/log_scenes.yaml b/d123/script/config/common/scene_filter/log_scenes.yaml
index 68114df9..f83322ac 100644
--- a/d123/script/config/common/scene_filter/log_scenes.yaml
+++ b/d123/script/config/common/scene_filter/log_scenes.yaml
@@ -1,4 +1,4 @@
-_target_: d123.dataset.scene.scene_filter.SceneFilter
+_target_: d123.datatypes.scene.scene_filter.SceneFilter
_convert_: 'all'
split_types: null
diff --git a/d123/script/config/common/scene_filter/nuplan_mini_train.yaml b/d123/script/config/common/scene_filter/nuplan_mini_train.yaml
index f2f1c4bf..231e882b 100644
--- a/d123/script/config/common/scene_filter/nuplan_mini_train.yaml
+++ b/d123/script/config/common/scene_filter/nuplan_mini_train.yaml
@@ -1,4 +1,4 @@
-_target_: d123.dataset.scene.scene_filter.SceneFilter
+_target_: d123.datatypes.scene.scene_filter.SceneFilter
_convert_: 'all'
split_types: null
diff --git a/d123/script/config/common/scene_filter/nuplan_mini_val.yaml b/d123/script/config/common/scene_filter/nuplan_mini_val.yaml
index 2e4eefee..43f5c9bc 100644
--- a/d123/script/config/common/scene_filter/nuplan_mini_val.yaml
+++ b/d123/script/config/common/scene_filter/nuplan_mini_val.yaml
@@ -1,4 +1,4 @@
-_target_: d123.dataset.scene.scene_filter.SceneFilter
+_target_: d123.datatypes.scene.scene_filter.SceneFilter
_convert_: 'all'
split_types: null
diff --git a/d123/script/config/common/scene_filter/nuplan_sim_agent.yaml b/d123/script/config/common/scene_filter/nuplan_sim_agent.yaml
index eea010cb..055ac331 100644
--- a/d123/script/config/common/scene_filter/nuplan_sim_agent.yaml
+++ b/d123/script/config/common/scene_filter/nuplan_sim_agent.yaml
@@ -1,4 +1,4 @@
-_target_: d123.dataset.scene.scene_filter.SceneFilter
+_target_: d123.datatypes.scene.scene_filter.SceneFilter
_convert_: 'all'
split_types: null
From 5639321563ed30a5311c482993df9d5c33a91775 Mon Sep 17 00:00:00 2001
From: DanielDauner
Date: Wed, 24 Sep 2025 14:44:17 +0200
Subject: [PATCH 049/145] Fix some viser specific issues after debugging.
---
d123/common/utils/mixin.py | 2 +-
d123/common/visualization/viser/server.py | 4 +-
d123/common/visualization/viser/utils.py | 75 ++++++++++++++++---
d123/common/visualization/viser/utils_v2.py | 73 ------------------
d123/datatypes/detections/detection.py | 7 +-
.../common/scene_filter/viser_scenes.yaml | 20 +++++
d123/script/config/viser/default_viser.yaml | 1 +
7 files changed, 93 insertions(+), 89 deletions(-)
delete mode 100644 d123/common/visualization/viser/utils_v2.py
create mode 100644 d123/script/config/common/scene_filter/viser_scenes.yaml
diff --git a/d123/common/utils/mixin.py b/d123/common/utils/mixin.py
index 252a4eee..56038bb4 100644
--- a/d123/common/utils/mixin.py
+++ b/d123/common/utils/mixin.py
@@ -5,7 +5,7 @@
class ArrayMixin:
- """Abstract base class for geometric entities."""
+ """Mixin class for object entities."""
@classmethod
def from_array(cls, array: npt.NDArray[np.float64], copy: bool = True) -> ArrayMixin:
diff --git a/d123/common/visualization/viser/server.py b/d123/common/visualization/viser/server.py
index 6b44ac15..e5c95081 100644
--- a/d123/common/visualization/viser/server.py
+++ b/d123/common/visualization/viser/server.py
@@ -7,12 +7,12 @@
from d123.common.visualization.viser.utils import (
get_bounding_box_meshes,
+ get_bounding_box_outlines,
get_camera_if_available,
get_camera_values,
get_lidar_points,
get_map_meshes,
)
-from d123.common.visualization.viser.utils_v2 import get_bounding_box_outlines
from d123.datatypes.scene.abstract_scene import AbstractScene
from d123.datatypes.sensors.camera import CameraType
from d123.datatypes.sensors.lidar import LiDARType
@@ -35,7 +35,7 @@
LINE_WIDTH: float = 4.0
# Bounding box config:
-BOUNDING_BOX_TYPE: Literal["mesh", "lines"] = "lines"
+BOUNDING_BOX_TYPE: Literal["mesh", "lines"] = "mesh"
# Map config:
MAP_AVAILABLE: bool = True
diff --git a/d123/common/visualization/viser/utils.py b/d123/common/visualization/viser/utils.py
index 783efa01..75d72b91 100644
--- a/d123/common/visualization/viser/utils.py
+++ b/d123/common/visualization/viser/utils.py
@@ -13,7 +13,8 @@
from d123.datatypes.scene.abstract_scene import AbstractScene
from d123.datatypes.sensors.camera import Camera, CameraType
from d123.datatypes.sensors.lidar import LiDARType
-from d123.geometry import BoundingBoxSE3, Point3D, Polyline3D, StateSE3
+from d123.geometry import BoundingBoxSE3, Corners3DIndex, Point3D, Point3DIndex, Polyline3D, StateSE3, StateSE3Index
+from d123.geometry.geometry_index import BoundingBoxSE3Index
from d123.geometry.transform.transform_euler_se3 import convert_relative_to_absolute_points_3d_array
# TODO: Refactor this file.
@@ -57,13 +58,6 @@ def bounding_box_to_trimesh(bbox: BoundingBoxSE3, plot_config: PlotConfig) -> tr
return configure_trimesh(box_mesh, plot_config.fill_color)
-def translate_bounding_box_se3(bounding_box_se3: BoundingBoxSE3, point_3d: Point3D) -> BoundingBoxSE3:
- bounding_box_se3.center.x = bounding_box_se3.center.x - point_3d.x
- bounding_box_se3.center.y = bounding_box_se3.center.y - point_3d.y
- bounding_box_se3.center.z = bounding_box_se3.center.z - point_3d.z
- return bounding_box_se3
-
-
def get_bounding_box_meshes(scene: AbstractScene, iteration: int):
initial_ego_vehicle_state = scene.get_ego_state_at_iteration(0)
@@ -75,19 +69,80 @@ def get_bounding_box_meshes(scene: AbstractScene, iteration: int):
output = {}
for box_detection in box_detections:
bbox: BoundingBoxSE3 = box_detection.bounding_box
- bbox = translate_bounding_box_se3(bbox, initial_ego_vehicle_state.center_se3)
+ bbox.array[BoundingBoxSE3Index.XYZ] -= initial_ego_vehicle_state.center_se3.array[StateSE3Index.XYZ]
plot_config = BOX_DETECTION_CONFIG[box_detection.metadata.detection_type]
trimesh_box = bounding_box_to_trimesh(bbox, plot_config)
output[f"{box_detection.metadata.detection_type.serialize()}/{box_detection.metadata.track_token}"] = (
trimesh_box
)
- ego_bbox = translate_bounding_box_se3(ego_vehicle_state.bounding_box, initial_ego_vehicle_state.center_se3)
+ ego_bbox = ego_vehicle_state.bounding_box
+ ego_bbox.array[BoundingBoxSE3Index.XYZ] -= initial_ego_vehicle_state.center_se3.array[StateSE3Index.XYZ]
trimesh_box = bounding_box_to_trimesh(ego_bbox, EGO_VEHICLE_CONFIG)
output["ego"] = trimesh_box
return output
+def _get_bounding_box_lines(bounding_box: BoundingBoxSE3) -> npt.NDArray[np.float64]:
+ """
+ TODO: Vectorize this function and move to geometry module.
+ """
+ corners = bounding_box.corners_array
+ index_pairs = [
+ (Corners3DIndex.FRONT_LEFT_BOTTOM, Corners3DIndex.FRONT_RIGHT_BOTTOM),
+ (Corners3DIndex.FRONT_RIGHT_BOTTOM, Corners3DIndex.BACK_RIGHT_BOTTOM),
+ (Corners3DIndex.BACK_RIGHT_BOTTOM, Corners3DIndex.BACK_LEFT_BOTTOM),
+ (Corners3DIndex.BACK_LEFT_BOTTOM, Corners3DIndex.FRONT_LEFT_BOTTOM),
+ (Corners3DIndex.FRONT_LEFT_TOP, Corners3DIndex.FRONT_RIGHT_TOP),
+ (Corners3DIndex.FRONT_RIGHT_TOP, Corners3DIndex.BACK_RIGHT_TOP),
+ (Corners3DIndex.BACK_RIGHT_TOP, Corners3DIndex.BACK_LEFT_TOP),
+ (Corners3DIndex.BACK_LEFT_TOP, Corners3DIndex.FRONT_LEFT_TOP),
+ (Corners3DIndex.FRONT_LEFT_BOTTOM, Corners3DIndex.FRONT_LEFT_TOP),
+ (Corners3DIndex.FRONT_RIGHT_BOTTOM, Corners3DIndex.FRONT_RIGHT_TOP),
+ (Corners3DIndex.BACK_RIGHT_BOTTOM, Corners3DIndex.BACK_RIGHT_TOP),
+ (Corners3DIndex.BACK_LEFT_BOTTOM, Corners3DIndex.BACK_LEFT_TOP),
+ ]
+ lines = np.zeros((len(index_pairs), 2, len(Point3DIndex)), dtype=np.float64)
+ for i, (start_idx, end_idx) in enumerate(index_pairs):
+ lines[i, 0] = corners[start_idx]
+ lines[i, 1] = corners[end_idx]
+ return lines
+
+
+def get_bounding_box_outlines(scene: AbstractScene, iteration: int):
+
+ initial_ego_vehicle_state = scene.get_ego_state_at_iteration(0)
+ origin: StateSE3 = initial_ego_vehicle_state.center_se3
+
+ ego_vehicle_state = scene.get_ego_state_at_iteration(iteration)
+ box_detections = scene.get_box_detections_at_iteration(iteration)
+
+ lines = []
+ colors = []
+ for box_detection in box_detections:
+ bbox: BoundingBoxSE3 = box_detection.bounding_box_se3
+ bbox_lines = _get_bounding_box_lines(bbox)
+ bbox_lines[..., Point3DIndex.XYZ] = bbox_lines[..., Point3DIndex.XYZ] - origin.array[StateSE3Index.XYZ]
+ bbox_color = np.zeros(bbox_lines.shape, dtype=np.float32)
+ bbox_color[..., :] = (
+ BOX_DETECTION_CONFIG[box_detection.metadata.detection_type]
+ .fill_color.set_brightness(BRIGHTNESS_FACTOR)
+ .rgb_norm
+ )
+
+ lines.append(bbox_lines)
+ colors.append(bbox_color)
+
+ ego_bbox_lines = _get_bounding_box_lines(ego_vehicle_state.bounding_box_se3)
+ ego_bbox_lines[..., Point3DIndex.XYZ] = ego_bbox_lines[..., Point3DIndex.XYZ] - origin.array[StateSE3Index.XYZ]
+ ego_bbox_color = np.zeros(ego_bbox_lines.shape, dtype=np.float32)
+ ego_bbox_color[..., :] = EGO_VEHICLE_CONFIG.fill_color.set_brightness(BRIGHTNESS_FACTOR).rgb_norm
+
+ lines.append(ego_bbox_lines)
+ colors.append(ego_bbox_color)
+ return np.concatenate(lines, axis=0), np.concatenate(colors, axis=0)
+
+
def get_map_meshes(scene: AbstractScene):
initial_ego_vehicle_state = scene.get_ego_state_at_iteration(0)
center = initial_ego_vehicle_state.center_se3
diff --git a/d123/common/visualization/viser/utils_v2.py b/d123/common/visualization/viser/utils_v2.py
deleted file mode 100644
index 16a1a15b..00000000
--- a/d123/common/visualization/viser/utils_v2.py
+++ /dev/null
@@ -1,73 +0,0 @@
-import numpy as np
-import numpy.typing as npt
-
-from d123.common.visualization.color.default import BOX_DETECTION_CONFIG, EGO_VEHICLE_CONFIG
-from d123.common.visualization.viser.utils import BRIGHTNESS_FACTOR
-from d123.datatypes.scene.abstract_scene import AbstractScene
-from d123.geometry import BoundingBoxSE3, Corners3DIndex, Point3D, Point3DIndex
-
-# TODO: Refactor this file.
-# TODO: Add general utilities for 3D primitives and mesh support.
-
-
-def _get_bounding_box_lines(bounding_box: BoundingBoxSE3) -> npt.NDArray[np.float64]:
- """
- TODO: Vectorize this function and move to geometry module.
- """
- corners = bounding_box.corners_array
- index_pairs = [
- (Corners3DIndex.FRONT_LEFT_BOTTOM, Corners3DIndex.FRONT_RIGHT_BOTTOM),
- (Corners3DIndex.FRONT_RIGHT_BOTTOM, Corners3DIndex.BACK_RIGHT_BOTTOM),
- (Corners3DIndex.BACK_RIGHT_BOTTOM, Corners3DIndex.BACK_LEFT_BOTTOM),
- (Corners3DIndex.BACK_LEFT_BOTTOM, Corners3DIndex.FRONT_LEFT_BOTTOM),
- (Corners3DIndex.FRONT_LEFT_TOP, Corners3DIndex.FRONT_RIGHT_TOP),
- (Corners3DIndex.FRONT_RIGHT_TOP, Corners3DIndex.BACK_RIGHT_TOP),
- (Corners3DIndex.BACK_RIGHT_TOP, Corners3DIndex.BACK_LEFT_TOP),
- (Corners3DIndex.BACK_LEFT_TOP, Corners3DIndex.FRONT_LEFT_TOP),
- (Corners3DIndex.FRONT_LEFT_BOTTOM, Corners3DIndex.FRONT_LEFT_TOP),
- (Corners3DIndex.FRONT_RIGHT_BOTTOM, Corners3DIndex.FRONT_RIGHT_TOP),
- (Corners3DIndex.BACK_RIGHT_BOTTOM, Corners3DIndex.BACK_RIGHT_TOP),
- (Corners3DIndex.BACK_LEFT_BOTTOM, Corners3DIndex.BACK_LEFT_TOP),
- ]
- lines = np.zeros((len(index_pairs), 2, len(Point3DIndex)), dtype=np.float64)
- for i, (start_idx, end_idx) in enumerate(index_pairs):
- lines[i, 0] = corners[start_idx]
- lines[i, 1] = corners[end_idx]
- return lines
-
-
-def translate_points_3d(points_3d: npt.NDArray[np.float64], point_3d: Point3D) -> npt.NDArray[np.float64]:
- # TODO: remove
- return points_3d - point_3d.array
-
-
-def get_bounding_box_outlines(scene: AbstractScene, iteration: int):
-
- initial_ego_vehicle_state = scene.get_ego_state_at_iteration(0)
- ego_vehicle_state = scene.get_ego_state_at_iteration(iteration)
- box_detections = scene.get_box_detections_at_iteration(iteration)
-
- lines = []
- colors = []
- for box_detection in box_detections:
- bbox: BoundingBoxSE3 = box_detection.bounding_box_se3
- bbox_lines = _get_bounding_box_lines(bbox)
- bbox_lines = translate_points_3d(bbox_lines, initial_ego_vehicle_state.center_se3.point_3d)
- bbox_color = np.zeros(bbox_lines.shape, dtype=np.float32)
- bbox_color[..., :] = (
- BOX_DETECTION_CONFIG[box_detection.metadata.detection_type]
- .fill_color.set_brightness(BRIGHTNESS_FACTOR)
- .rgb_norm
- )
-
- lines.append(bbox_lines)
- colors.append(bbox_color)
-
- ego_bbox_lines = _get_bounding_box_lines(ego_vehicle_state.bounding_box_se3)
- ego_bbox_lines = translate_points_3d(ego_bbox_lines, initial_ego_vehicle_state.center_se3.point_3d)
- ego_bbox_color = np.zeros(ego_bbox_lines.shape, dtype=np.float32)
- ego_bbox_color[..., :] = EGO_VEHICLE_CONFIG.fill_color.set_brightness(BRIGHTNESS_FACTOR).rgb_norm
-
- lines.append(ego_bbox_lines)
- colors.append(ego_bbox_color)
- return np.concatenate(lines, axis=0), np.concatenate(colors, axis=0)
diff --git a/d123/datatypes/detections/detection.py b/d123/datatypes/detections/detection.py
index 29da95d7..aa1cc679 100644
--- a/d123/datatypes/detections/detection.py
+++ b/d123/datatypes/detections/detection.py
@@ -24,7 +24,7 @@ class BoxDetectionSE2:
metadata: BoxDetectionMetadata
bounding_box_se2: BoundingBoxSE2
- velocity: Vector2D | None = None
+ velocity: Optional[Vector2D] = None
@property
def shapely_polygon(self) -> shapely.geometry.Polygon:
@@ -81,7 +81,7 @@ def box_detection_se2(self) -> BoxDetectionSE2:
@dataclass
class BoxDetectionWrapper:
- box_detections: list[BoxDetection]
+ box_detections: List[BoxDetection]
def __getitem__(self, index: int) -> BoxDetection:
return self.box_detections[index]
@@ -132,7 +132,8 @@ class TrafficLightDetection:
@dataclass
class TrafficLightDetectionWrapper:
- traffic_light_detections: list[TrafficLightDetection]
+
+ traffic_light_detections: List[TrafficLightDetection]
def __getitem__(self, index: int) -> TrafficLightDetection:
return self.traffic_light_detections[index]
diff --git a/d123/script/config/common/scene_filter/viser_scenes.yaml b/d123/script/config/common/scene_filter/viser_scenes.yaml
new file mode 100644
index 00000000..fe012d84
--- /dev/null
+++ b/d123/script/config/common/scene_filter/viser_scenes.yaml
@@ -0,0 +1,20 @@
+_target_: d123.datatypes.scene.scene_filter.SceneFilter
+_convert_: 'all'
+
+split_types: null
+split_names: null
+log_names: null
+
+
+map_names: null
+scene_tokens: null
+timestamp_threshold_s: 10.0
+ego_displacement_minimum_m: null
+
+duration_s: 10.0
+history_s: 0.0
+
+camera_types: null
+
+max_num_scenes: null
+shuffle: True
diff --git a/d123/script/config/viser/default_viser.yaml b/d123/script/config/viser/default_viser.yaml
index c42164dc..fb0e47a2 100644
--- a/d123/script/config/viser/default_viser.yaml
+++ b/d123/script/config/viser/default_viser.yaml
@@ -12,5 +12,6 @@ hydra:
defaults:
- default_common
- default_dataset_paths
+ - override scene_filter: viser_scenes
port_number: 8080
From ad9d33fbcbfc0eb4fccf42d6f8b15c2134bd4913 Mon Sep 17 00:00:00 2001
From: jbwang <1159270049@qq.com>
Date: Tue, 30 Sep 2025 13:14:06 +0800
Subject: [PATCH 050/145] only extract timestamp that exists in ego_pose.txt
---
.../kitti_360/kitti_360_data_converter.py | 71 ++++++------
.../kitti_360/kitti_360_helper.py | 81 ++-----------
.../kitti_360/kitti_360_map_conversion.py | 2 +-
.../kitti_360/preprocess_detection.py | 108 +++++-------------
4 files changed, 68 insertions(+), 194 deletions(-)
diff --git a/d123/dataset/dataset_specific/kitti_360/kitti_360_data_converter.py b/d123/dataset/dataset_specific/kitti_360/kitti_360_data_converter.py
index 4e221617..76396bbd 100644
--- a/d123/dataset/dataset_specific/kitti_360/kitti_360_data_converter.py
+++ b/d123/dataset/dataset_specific/kitti_360/kitti_360_data_converter.py
@@ -32,7 +32,7 @@
from d123.dataset.arrow.helper import open_arrow_table, write_arrow_table
from d123.dataset.dataset_specific.raw_data_converter import DataConverterConfig, RawDataConverter
from d123.dataset.logs.log_metadata import LogMetadata
-from d123.dataset.dataset_specific.kitti_360.kitti_360_helper import KITTI360Bbox3D, KITTI3602NUPLAN_IMU_CALIBRATION, get_lidar_extrinsic,interpolate_obj_list
+from d123.dataset.dataset_specific.kitti_360.kitti_360_helper import KITTI360Bbox3D, KITTI3602NUPLAN_IMU_CALIBRATION, get_lidar_extrinsic
from d123.dataset.dataset_specific.kitti_360.labels import KIITI360_DETECTION_NAME_DICT,kittiId2label,BBOX_LABLES_TO_DETECTION_NAME_DICT
from d123.dataset.dataset_specific.kitti_360.kitti_360_map_conversion import convert_kitti360_map
from d123.geometry import BoundingBoxSE3, BoundingBoxSE3Index, StateSE3, Vector3D, Vector3DIndex
@@ -57,8 +57,8 @@
DIR_POSES = "data_poses"
DIR_CALIB = "calibration"
-PATH_2D_RAW_ROOT: Path = KITTI360_DATA_ROOT / DIR_2D_RAW
-# PATH_2D_RAW_ROOT: Path = KITTI360_DATA_ROOT
+# PATH_2D_RAW_ROOT: Path = KITTI360_DATA_ROOT / DIR_2D_RAW
+PATH_2D_RAW_ROOT: Path = KITTI360_DATA_ROOT
PATH_2D_SMT_ROOT: Path = KITTI360_DATA_ROOT / DIR_2D_SMT
PATH_3D_RAW_ROOT: Path = KITTI360_DATA_ROOT / DIR_3D_RAW
PATH_3D_SMT_ROOT: Path = KITTI360_DATA_ROOT / DIR_3D_SMT
@@ -330,7 +330,7 @@ def _read_projection_matrix(p_line: str) -> np.ndarray:
K = P[:, :3]
return K
-def _readYAMLFile(fileName):
+def _readYAMLFile(fileName:Path) -> Dict[str, Any]:
'''make OpenCV YAML file compatible with python'''
ret = {}
skip_lines=1 # Skip the first line which says "%YAML:1.0". Or replace it with "%YAML 1.0"
@@ -360,22 +360,22 @@ def _write_recording_table(
data_converter_config: DataConverterConfig
) -> None:
- ts_list = _read_timestamps(log_name)
- ego_state_all = _extract_ego_state_all(log_name)
+ ts_list: List[TimePoint] = _read_timestamps(log_name)
+ ego_state_all, valid_timestamp = _extract_ego_state_all(log_name)
ego_states_xyz = np.array([ego_state[:3] for ego_state in ego_state_all],dtype=np.float64)
- detections_states,detections_velocity,detections_tokens,detections_types = _extract_detections(log_name,len(ts_list),ego_states_xyz)
+ detections_states,detections_velocity,detections_tokens,detections_types = _extract_detections(log_name,len(ts_list),ego_states_xyz,valid_timestamp)
with pa.OSFile(str(log_file_path), "wb") as sink:
with pa.ipc.new_file(sink, recording_schema) as writer:
- for idx, tp in enumerate(ts_list):
-
+ for idx in range(len(valid_timestamp)):
+ valid_idx = valid_timestamp[idx]
row_data = {
"token": [create_token(f"{log_name}_{idx}")],
- "timestamp": [tp.time_us],
- "detections_state": [detections_states[idx]],
- "detections_velocity": [detections_velocity[idx]],
- "detections_token": [detections_tokens[idx]],
- "detections_type": [detections_types[idx]],
+ "timestamp": [ts_list[valid_idx].time_us],
+ "detections_state": [detections_states[valid_idx]],
+ "detections_velocity": [detections_velocity[valid_idx]],
+ "detections_token": [detections_tokens[valid_idx]],
+ "detections_type": [detections_types[valid_idx]],
"ego_states": [ego_state_all[idx]],
"traffic_light_ids": [[]],
"traffic_light_types": [[]],
@@ -384,7 +384,7 @@ def _write_recording_table(
}
if data_converter_config.lidar_store_option is not None:
- lidar_data_dict = _extract_lidar(log_name, idx, data_converter_config)
+ lidar_data_dict = _extract_lidar(log_name, valid_idx, data_converter_config)
for lidar_type, lidar_data in lidar_data_dict.items():
if lidar_data is not None:
row_data[lidar_type.serialize()] = [lidar_data]
@@ -392,7 +392,7 @@ def _write_recording_table(
row_data[lidar_type.serialize()] = [None]
if data_converter_config.camera_store_option is not None:
- camera_data_dict = _extract_cameras(log_name, idx, data_converter_config)
+ camera_data_dict = _extract_cameras(log_name, valid_idx, data_converter_config)
for camera_type, camera_data in camera_data_dict.items():
if camera_data is not None:
row_data[camera_type.serialize()] = [camera_data[0]]
@@ -448,7 +448,7 @@ def _read_timestamps(log_name: str) -> Optional[List[TimePoint]]:
return tps
return None
-def _extract_ego_state_all(log_name: str) -> List[List[float]]:
+def _extract_ego_state_all(log_name: str) -> Tuple[List[List[float]], List[int]]:
ego_state_all: List[List[float]] = []
@@ -456,24 +456,20 @@ def _extract_ego_state_all(log_name: str) -> List[List[float]]:
if not pose_file.exists():
raise FileNotFoundError(f"Pose file not found: {pose_file}")
poses = np.loadtxt(pose_file)
- poses_time = poses[:, 0] - 1 # Adjusting time to start from 0
+ poses_time = poses[:, 0].astype(np.int32)
+ valid_timestamp: List[int] = list(poses_time)
- # oxts_path = PATH_POSES_ROOT / log_name / "oxts" / "data"
- oxts_path = Path("/data/jbwang/d123/data_poses/") / log_name / "oxts" / "data"
+ oxts_path = PATH_POSES_ROOT / log_name / "oxts" / "data"
- pose_idx = 0
- poses_time_len = len(poses_time)
-
- for idx in range(len(list(oxts_path.glob("*.txt")))):
- oxts_path_file = oxts_path / f"{int(idx):010d}.txt"
+ for idx in range(len(valid_timestamp)):
+ oxts_path_file = oxts_path / f"{int(valid_timestamp[idx]):010d}.txt"
oxts_data = np.loadtxt(oxts_path_file)
vehicle_parameters = get_kitti360_station_wagon_parameters()
- while pose_idx + 1 < poses_time_len and poses_time[pose_idx + 1] < idx:
- pose_idx += 1
- pos = pose_idx
- # pos = np.searchsorted(poses_time, idx, side='right') - 1
+ pos = idx
+ if log_name=="2013_05_28_drive_0004_sync" and pos == 0:
+ pos = 1
# NOTE you can use oxts_data[3:6] as roll, pitch, yaw for simplicity
#roll, pitch, yaw = oxts_data[3:6]
@@ -521,12 +517,13 @@ def _extract_ego_state_all(log_name: str) -> List[List[float]]:
timepoint=None,
).array.tolist()
)
- return ego_state_all
+ return ego_state_all, valid_timestamp
def _extract_detections(
log_name: str,
ts_len: int,
- ego_states_xyz: np.ndarray
+ ego_states_xyz: np.ndarray,
+ valid_timestamp: List[int],
) -> Tuple[List[List[float]], List[List[float]], List[str], List[int]]:
detections_states: List[List[List[float]]] = [[] for _ in range(ts_len)]
@@ -544,17 +541,16 @@ def _extract_detections(
tree = ET.parse(bbox_3d_path)
root = tree.getroot()
- dynamic_objs: Dict[int, List[KITTI360Bbox3D]] = defaultdict(list)
-
detection_preprocess_path = PREPOCESS_DETECTION_DIR / f"{log_name}_detection_preprocessed.pkl"
if detection_preprocess_path.exists():
with open(detection_preprocess_path, "rb") as f:
detection_preprocess_result = pickle.load(f)
static_records_dict = {record_item["global_id"]: record_item for record_item in detection_preprocess_result["static"]}
- dynamic_records_dict = detection_preprocess_result["dynamic"]
else:
detection_preprocess_result = None
+ dynamic_objs: Dict[int, List[KITTI360Bbox3D]] = defaultdict(list)
+
for child in root:
if child.find('semanticId') is not None:
semanticIdKITTI = int(child.find('semanticId').text)
@@ -570,7 +566,7 @@ def _extract_detections(
#static object
if obj.timestamp == -1:
if detection_preprocess_result is None:
- obj.filter_by_radius(ego_states_xyz,radius=50.0)
+ obj.filter_by_radius(ego_states_xyz,valid_timestamp,radius=50.0)
else:
obj.load_detection_preprocess(static_records_dict)
for record in obj.valid_frames["records"]:
@@ -584,11 +580,8 @@ def _extract_detections(
dynamic_objs[global_ID].append(obj)
# dynamic object
- if detection_preprocess_result is not None:
- dynamic_objs = copy.deepcopy(dynamic_records_dict)
-
for global_id, obj_list in dynamic_objs.items():
- obj_list = interpolate_obj_list(obj_list)
+ obj_list.sort(key=lambda obj: obj.timestamp)
num_frames = len(obj_list)
positions = [obj.get_state_array()[:3] for obj in obj_list]
diff --git a/d123/dataset/dataset_specific/kitti_360/kitti_360_helper.py b/d123/dataset/dataset_specific/kitti_360/kitti_360_helper.py
index a756a343..e8520253 100644
--- a/d123/dataset/dataset_specific/kitti_360/kitti_360_helper.py
+++ b/d123/dataset/dataset_specific/kitti_360/kitti_360_helper.py
@@ -1,7 +1,7 @@
import numpy as np
from collections import defaultdict
-from typing import Dict, Optional, Any, List
+from typing import Dict, Optional, Any, List, Tuple
import copy
from scipy.linalg import polar
from scipy.spatial.transform import Rotation as R
@@ -30,14 +30,14 @@
KITTI3602NUPLAN_IMU_CALIBRATION = kitti3602nuplan_imu_calibration_ideal
MAX_N = 1000
-def local2global(semanticId, instanceId):
+def local2global(semanticId: int, instanceId: int) -> int:
globalId = semanticId*MAX_N + instanceId
if isinstance(globalId, np.ndarray):
return globalId.astype(np.int32)
else:
return int(globalId)
-def global2local(globalId):
+def global2local(globalId: int) -> Tuple[int, int]:
semanticId = globalId // MAX_N
instanceId = globalId % MAX_N
if isinstance(globalId, np.ndarray):
@@ -72,12 +72,6 @@ def __init__(self):
#label
self.label = ''
-
- # used to mark if the bbox is interpolated
- self.is_interpolated = False
- # GT annotation idx
- self.idx_next = -1
- self.idx_prev = -1
def parseBbox(self, child):
self.timestamp = int(child.find('timestamp').text)
@@ -138,7 +132,7 @@ def parse_scale_rotation(self):
self.pitch = pitch
self.roll = roll
- def get_state_array(self):
+ def get_state_array(self) -> np.ndarray:
center = StateSE3(
x=self.T[0],
y=self.T[1],
@@ -152,17 +146,17 @@ def get_state_array(self):
return bounding_box_se3.array
- def filter_by_radius(self,ego_state_xyz,radius=50.0):
+ def filter_by_radius(self, ego_state_xyz: np.ndarray, valid_timestamp: List[int], radius: float = 50.0) -> None:
''' first stage of detection, used to filter out detections by radius '''
d = np.linalg.norm(ego_state_xyz - self.T[None, :], axis=1)
idxs = np.where(d <= radius)[0]
for idx in idxs:
self.valid_frames["records"].append({
- "timestamp": idx,
+ "timestamp": valid_timestamp[idx],
"points_in_box": None,
})
- def box_visible_in_point_cloud(self, points):
+ def box_visible_in_point_cloud(self, points: np.ndarray) -> Tuple[bool, int]:
''' points: (N,3) , box: (8,3) '''
box = self.vertices.copy()
# avoid calculating ground point cloud
@@ -185,67 +179,6 @@ def load_detection_preprocess(self, records_dict: Dict[int, Any]):
if self.globalID in records_dict:
self.valid_frames["records"] = records_dict[self.globalID]["records"]
-def interpolate_obj_list(obj_list: List[KITTI360Bbox3D]) -> List[KITTI360Bbox3D]:
- """
- Fill missing timestamps in obj_list by linear interpolation.
- For each missing timestamp between two objects, create a new KITTI360Bbox3D object
- with only interpolated position (T), yaw, pitch, roll, and copy other attributes.
- Returns a new list with all timestamps filled and sorted.
- """
- if not obj_list:
- return obj_list
-
- # Sort by timestamp ascending
- obj_list.sort(key=lambda obj: obj.timestamp)
- timestamps = [obj.timestamp for obj in obj_list]
- min_ts, max_ts = min(timestamps), max(timestamps)
- full_ts = list(range(min_ts, max_ts + 1))
- missing_ts = sorted(set(full_ts) - set(timestamps))
-
- # Prepare arrays for interpolation
- T_arr = np.array([obj.T for obj in obj_list])
- yaw_arr = np.array([obj.yaw for obj in obj_list])
- pitch_arr = np.array([obj.pitch for obj in obj_list])
- roll_arr = np.array([obj.roll for obj in obj_list])
- ts_arr = np.array(timestamps)
-
- for ts in missing_ts:
- idx_next = np.searchsorted(ts_arr, ts)
- idx_prev = idx_next - 1
- if idx_prev < 0 or idx_next >= len(obj_list):
- continue
-
- frac = (ts - ts_arr[idx_prev]) / (ts_arr[idx_next] - ts_arr[idx_prev])
- T_interp = T_arr[idx_prev] * (1 - frac) + T_arr[idx_next] * frac
-
- yaw_delat = normalize_angle(yaw_arr[idx_next] - yaw_arr[idx_prev])
- yaw_interp = yaw_arr[idx_prev] + yaw_delat * frac
- yaw_interp = normalize_angle(yaw_interp)
-
- pitch_interp = pitch_arr[idx_prev] * (1 - frac) + pitch_arr[idx_next] * frac
- roll_interp = roll_arr[idx_prev] * (1 - frac) + roll_arr[idx_next] * frac
-
- obj_new = copy.deepcopy(obj_list[idx_prev])
- obj_new.timestamp = ts
- obj_new.T = T_interp
- obj_new.yaw = yaw_interp
- obj_new.pitch = pitch_interp
- obj_new.roll = roll_interp
- obj_new.Rm = R.from_euler('zyx', [obj_new.yaw, obj_new.pitch, obj_new.roll], degrees=False).as_matrix()
- obj_new.R = obj_new.Rm @ obj_new.Sm
- obj_new.vertices = (obj_new.R @ obj_new.vertices_template.T).T + obj_new.T
- obj_new.is_interpolated = True
- obj_new.idx_prev = ts_arr[idx_prev]
- obj_new.idx_next = ts_arr[idx_next]
-
- obj_list.append(obj_new)
-
- obj_list.sort(key=lambda obj: obj.timestamp)
- return obj_list
-
-def normalize_angle(a):
- return np.arctan2(np.sin(a), np.cos(a))
-
class KITTI360_MAP_Bbox3D():
def __init__(self):
self.id = -1
diff --git a/d123/dataset/dataset_specific/kitti_360/kitti_360_map_conversion.py b/d123/dataset/dataset_specific/kitti_360/kitti_360_map_conversion.py
index bf13eda6..924a7822 100644
--- a/d123/dataset/dataset_specific/kitti_360/kitti_360_map_conversion.py
+++ b/d123/dataset/dataset_specific/kitti_360/kitti_360_map_conversion.py
@@ -36,7 +36,7 @@
# "driveway",
]
-def convert_kitti360_map(log_name, map_path):
+def convert_kitti360_map(log_name: str, map_path: Path) -> None:
xml_path = PATH_3D_BBOX_ROOT / "train_full" / f"{log_name}.xml"
diff --git a/d123/dataset/dataset_specific/kitti_360/preprocess_detection.py b/d123/dataset/dataset_specific/kitti_360/preprocess_detection.py
index f2d14ce1..97ea6eb8 100644
--- a/d123/dataset/dataset_specific/kitti_360/preprocess_detection.py
+++ b/d123/dataset/dataset_specific/kitti_360/preprocess_detection.py
@@ -1,8 +1,8 @@
"""
-This script precomputes detection records for KITTI-360:
+This script precomputes static detection records for KITTI-360:
- Stage 1: radius filtering using ego positions (from poses.txt).
- Stage 2: LiDAR visibility check to fill per-frame point counts.
-It writes a pickle containing, for each object, all feasible frames and
+It writes a pickle containing, for each static object, all feasible frames and
their point counts to avoid recomputation in later pipelines.
We have precomputed and saved the pickle for all training logs, you can either
download them or run this script to generate
@@ -31,8 +31,8 @@
PATH_3D_BBOX_ROOT = KITTI360_DATA_ROOT / DIR_3D_BBOX
PATH_POSES_ROOT = KITTI360_DATA_ROOT / DIR_POSES
-from d123.dataset.dataset_specific.kitti_360.kitti_360_helper import KITTI360Bbox3D, KITTI3602NUPLAN_IMU_CALIBRATION, get_lidar_extrinsic,interpolate_obj_list
-from d123.dataset.dataset_specific.kitti_360.labels import KIITI360_DETECTION_NAME_DICT, kittiId2label,BBOX_LABLES_TO_DETECTION_NAME_DICT
+from d123.dataset.dataset_specific.kitti_360.kitti_360_helper import KITTI360Bbox3D, KITTI3602NUPLAN_IMU_CALIBRATION, get_lidar_extrinsic
+from d123.dataset.dataset_specific.kitti_360.labels import KIITI360_DETECTION_NAME_DICT, kittiId2label, BBOX_LABLES_TO_DETECTION_NAME_DICT
def _bbox_xml_path(log_name: str) -> Path:
if log_name == "2013_05_28_drive_0004_sync":
@@ -47,8 +47,8 @@ def _load_lidar_xyz(filepath: Path) -> np.ndarray:
arr = np.fromfile(filepath, dtype=np.float32)
return arr.reshape(-1, 4)[:, :3]
-def _collect_objects(log_name: str) -> Tuple[List[KITTI360Bbox3D], Dict[int, List[KITTI360Bbox3D]]]:
- """Parse XML and collect objects with valid class names."""
+def _collect_static_objects(log_name: str) -> List[KITTI360Bbox3D]:
+ """Parse XML and collect static objects with valid class names."""
xml_path = _bbox_xml_path(log_name)
if not xml_path.exists():
raise FileNotFoundError(f"BBox 3D file not found: {xml_path}")
@@ -56,7 +56,6 @@ def _collect_objects(log_name: str) -> Tuple[List[KITTI360Bbox3D], Dict[int, Lis
root = tree.getroot()
static_objs: List[KITTI360Bbox3D] = []
- dynamic_objs: Dict[int, List[KITTI360Bbox3D]] = defaultdict(list)
for child in root:
if child.find('semanticId') is not None:
@@ -65,20 +64,15 @@ def _collect_objects(log_name: str) -> Tuple[List[KITTI360Bbox3D], Dict[int, Lis
else:
lable = child.find('label').text
name = BBOX_LABLES_TO_DETECTION_NAME_DICT.get(lable, 'unknown')
- if child.find("transform") is None or name not in KIITI360_DETECTION_NAME_DICT:
+ timestamp = int(child.find('timestamp').text) # -1 for static objects
+ if child.find("transform") is None or name not in KIITI360_DETECTION_NAME_DICT or timestamp != -1:
continue
obj = KITTI360Bbox3D()
obj.parseBbox(child)
- timestamp = int(child.find('timestamp').text)
- if timestamp == -1:
- static_objs.append(obj)
- else:
- global_ID = obj.globalID
- dynamic_objs[global_ID].append(obj)
-
- return static_objs, dynamic_objs
+ static_objs.append(obj)
+ return static_objs
-def _collect_ego_states(log_name: str,length: int) -> npt.NDArray[np.float64]:
+def _collect_ego_states(log_name: str) -> Tuple[npt.NDArray[np.float64], list[int]]:
"""Load ego states from poses.txt."""
pose_file = PATH_POSES_ROOT / log_name / "poses.txt"
@@ -86,17 +80,12 @@ def _collect_ego_states(log_name: str,length: int) -> npt.NDArray[np.float64]:
raise FileNotFoundError(f"Pose file not found: {pose_file}")
poses = np.loadtxt(pose_file)
- poses_time = poses[:, 0] - 1 # Adjusting time to start from 0
+ poses_time = poses[:, 0].astype(np.int32)
+ valid_timestamp: List[int] = list(poses_time)
- pose_idx = 0
- poses_time_len = len(poses_time)
-
ego_states = []
-
- for time_idx in range(length):
- while pose_idx + 1 < poses_time_len and poses_time[pose_idx + 1] < time_idx:
- pose_idx += 1
- pos = pose_idx
+ for time_idx in range(len(valid_timestamp)):
+ pos = time_idx
state_item = np.eye(4)
r00, r01, r02 = poses[pos, 1:4]
r10, r11, r12 = poses[pos, 5:8]
@@ -115,7 +104,8 @@ def _collect_ego_states(log_name: str,length: int) -> npt.NDArray[np.float64]:
state_item[:3, 3] = ego_state_xyz
ego_states.append(state_item)
- return np.array(ego_states) # [N,4,4]
+ # [N,4,4]
+ return np.array(ego_states), valid_timestamp
def process_detection(
@@ -128,9 +118,6 @@ def process_detection(
for static objects:
1) filter by ego-centered radius over all frames
2) filter by LiDAR point cloud visibility
- for dynamic objects:
- 1) interpolate boxes for missing frames
- 2) select box with highest LiDAR point count
Save per-frame detections to a pickle to avoid recomputation.
"""
@@ -141,31 +128,22 @@ def process_detection(
logging.info(f"[preprocess] {log_name}: found {ts_len} lidar frames")
# 1) Parse objects from XML
- static_objs: List[KITTI360Bbox3D]
- dynamic_objs: Dict[int, List[KITTI360Bbox3D]]
- static_objs, dynamic_objs = _collect_objects(log_name)
-
- # only interpolate dynamic objects
- for global_ID, obj_list in dynamic_objs.items():
- obj_list_interpolated = interpolate_obj_list(obj_list)
- dynamic_objs[global_ID] = obj_list_interpolated
- dymanic_objs_updated = copy.deepcopy(dynamic_objs)
-
+ static_objs: List[KITTI360Bbox3D] = _collect_static_objects(log_name)
logging.info(f"[preprocess] {log_name}: static objects = {len(static_objs)}")
- logging.info(f"[preprocess] {log_name}: dynamic objects = {len(dynamic_objs.keys())}")
# 2) Filter static objs by ego-centered radius
- ego_states = _collect_ego_states(log_name,ts_len)
+ ego_states, valid_timestamp = _collect_ego_states(log_name)
logging.info(f"[preprocess] {log_name}: ego states = {len(ego_states)}")
for obj in static_objs:
- obj.filter_by_radius(ego_states[:, :3, 3], radius_m)
+ obj.filter_by_radius(ego_states[:, :3, 3], valid_timestamp, radius_m)
# 3) Filter static objs by LiDAR point cloud visibility
lidar_extrinsic = get_lidar_extrinsic()
def process_one_frame(time_idx: int) -> None:
- logging.info(f"[preprocess] {log_name}: t={time_idx}")
- lidar_path = _lidar_frame_path(log_name, time_idx)
+ valid_time_idx = valid_timestamp[time_idx]
+ logging.info(f"[preprocess] {log_name}: t={valid_time_idx}")
+ lidar_path = _lidar_frame_path(log_name, valid_time_idx)
if not lidar_path.exists():
logging.warning(f"[preprocess] {log_name}: LiDAR frame not found: {lidar_path}")
return
@@ -181,49 +159,20 @@ def process_one_frame(time_idx: int) -> None:
lidar_in_world = lidar_in_imu @ ego_states[time_idx][:3,:3].T + ego_states[time_idx][:3,3]
for obj in static_objs:
- if not any(record["timestamp"] == time_idx for record in obj.valid_frames["records"]):
+ if not any(record["timestamp"] == valid_time_idx for record in obj.valid_frames["records"]):
continue
visible, points_in_box = obj.box_visible_in_point_cloud(lidar_in_world)
if not visible:
- obj.valid_frames["records"] = [record for record in obj.valid_frames["records"] if record["timestamp"] != time_idx]
+ obj.valid_frames["records"] = [record for record in obj.valid_frames["records"] if record["timestamp"] != valid_time_idx]
else:
for record in obj.valid_frames["records"]:
- if record["timestamp"] == time_idx:
+ if record["timestamp"] == valid_time_idx:
record["points_in_box"] = points_in_box
break
- # for dynamic objects, select the box with the highest LiDAR point count
- for global_ID, obj_list in dynamic_objs.items():
- obj_at_time = [obj for obj in obj_list if obj.timestamp == time_idx]
- if not obj_at_time:
- continue
-
- obj = obj_at_time[0]
- # NOTE only update interpolated boxes
- if not obj.is_interpolated:
- continue
-
- max_points = -1
- best_obj = None
- ts_prev = obj.idx_prev
- ts_next = obj.idx_next
- candidates = [candidate for candidate in obj_list if ts_prev <= candidate.timestamp <= ts_next]
-
- for obj in candidates:
- visible, points_in_box = obj.box_visible_in_point_cloud(lidar_in_world)
- if points_in_box > max_points:
- max_points = points_in_box
- best_obj = obj
-
- if best_obj is not None:
- idx = next((i for i, o in enumerate(dynamic_objs[global_ID]) if o.timestamp == time_idx), None)
- if idx is not None:
- dymanic_objs_updated[global_ID][idx] = copy.deepcopy(best_obj)
- dymanic_objs_updated[global_ID][idx].timestamp = time_idx
-
max_workers = os.cpu_count() * 2
with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor:
- results = list(executor.map(process_one_frame, range(ts_len)))
+ results = list(executor.map(process_one_frame, range(len(valid_timestamp))))
# 4) Save pickle
static_records: List[Dict[str, Any]] = []
@@ -238,7 +187,6 @@ def process_one_frame(time_idx: int) -> None:
payload = {
"log_name": log_name,
"static": static_records,
- "dynamic": dymanic_objs_updated
}
with open(out_path, "wb") as f:
pickle.dump(payload, f)
@@ -248,7 +196,7 @@ def process_one_frame(time_idx: int) -> None:
import argparse
logging.basicConfig(level=logging.INFO)
parser = argparse.ArgumentParser(description="Precompute KITTI-360 detections filters")
- parser.add_argument("--log_name", default="2013_05_28_drive_0004_sync")
+ parser.add_argument("--log_name", default="2013_05_28_drive_0000_sync")
parser.add_argument("--radius", type=float, default=60.0)
parser.add_argument("--out", type=Path, default="detection_preprocess", help="output directory for pkl")
args = parser.parse_args()
From 5c95ecbd4ba65b6f2524ccb46355c41c88df8b70 Mon Sep 17 00:00:00 2001
From: jbwang <1159270049@qq.com>
Date: Tue, 30 Sep 2025 13:27:04 +0800
Subject: [PATCH 051/145] merge dev_v0.0.7 into kitti360
---
d123/datasets/av2/av2_data_converter.py | 11 -----------
d123/datasets/carla/carla_data_converter.py | 8 --------
d123/datasets/nuplan/nuplan_data_converter.py | 13 -------------
d123/datatypes/scene/arrow/arrow_scene.py | 11 -----------
4 files changed, 43 deletions(-)
diff --git a/d123/datasets/av2/av2_data_converter.py b/d123/datasets/av2/av2_data_converter.py
index 59f306e0..f066aa42 100644
--- a/d123/datasets/av2/av2_data_converter.py
+++ b/d123/datasets/av2/av2_data_converter.py
@@ -10,17 +10,6 @@
import pandas as pd
import pyarrow as pa
-<<<<<<< HEAD:d123/dataset/dataset_specific/av2/av2_data_converter.py
-from d123.common.datatypes.sensor.camera import PinholeCameraMetadata, CameraType, camera_metadata_dict_to_json
-from d123.common.datatypes.sensor.lidar import LiDARMetadata, LiDARType, lidar_metadata_dict_to_json
-from d123.common.datatypes.time.time_point import TimePoint
-from d123.common.datatypes.vehicle_state.ego_state import DynamicStateSE3, EgoStateSE3, EgoStateSE3Index
-from d123.common.datatypes.vehicle_state.vehicle_parameters import (
- get_av2_ford_fusion_hybrid_parameters,
- rear_axle_se3_to_center_se3,
-)
-=======
->>>>>>> dev_v0.0.7:d123/datasets/av2/av2_data_converter.py
from d123.common.multithreading.worker_utils import WorkerPool, worker_map
from d123.datasets.av2.av2_constants import (
AV2_CAMERA_TYPE_MAPPING,
diff --git a/d123/datasets/carla/carla_data_converter.py b/d123/datasets/carla/carla_data_converter.py
index 80525baf..bcf8342c 100644
--- a/d123/datasets/carla/carla_data_converter.py
+++ b/d123/datasets/carla/carla_data_converter.py
@@ -11,14 +11,6 @@
import numpy as np
import pyarrow as pa
-<<<<<<< HEAD:d123/dataset/dataset_specific/carla/carla_data_converter.py
-from d123.common.datatypes.sensor.camera import PinholeCameraMetadata, CameraType, camera_metadata_dict_to_json
-from d123.common.datatypes.sensor.lidar import LiDARMetadata, LiDARType, lidar_metadata_dict_to_json
-from d123.common.datatypes.sensor.lidar_index import CarlaLidarIndex
-from d123.common.datatypes.vehicle_state.ego_state import EgoStateSE3Index
-from d123.common.datatypes.vehicle_state.vehicle_parameters import get_carla_lincoln_mkz_2020_parameters
-=======
->>>>>>> dev_v0.0.7:d123/datasets/carla/carla_data_converter.py
from d123.common.multithreading.worker_utils import WorkerPool, worker_map
from d123.common.utils.arrow_helper import open_arrow_table, write_arrow_table
from d123.datasets.raw_data_converter import DataConverterConfig, RawDataConverter
diff --git a/d123/datasets/nuplan/nuplan_data_converter.py b/d123/datasets/nuplan/nuplan_data_converter.py
index e2d1cef5..398e536a 100644
--- a/d123/datasets/nuplan/nuplan_data_converter.py
+++ b/d123/datasets/nuplan/nuplan_data_converter.py
@@ -12,18 +12,6 @@
import yaml
from pyquaternion import Quaternion
-<<<<<<< HEAD:d123/dataset/dataset_specific/nuplan/nuplan_data_converter.py
-
-import d123.dataset.dataset_specific.nuplan.utils as nuplan_utils
-from d123.common.datatypes.detection.detection import TrafficLightStatus
-from d123.common.datatypes.detection.detection_types import DetectionType
-from d123.common.datatypes.sensor.camera import PinholeCameraMetadata, CameraType, camera_metadata_dict_to_json
-from d123.common.datatypes.sensor.lidar import LiDARMetadata, LiDARType, lidar_metadata_dict_to_json
-from d123.common.datatypes.sensor.lidar_index import NuplanLidarIndex
-from d123.common.datatypes.time.time_point import TimePoint
-from d123.common.datatypes.vehicle_state.ego_state import DynamicStateSE3, EgoStateSE3, EgoStateSE3Index
-from d123.common.datatypes.vehicle_state.vehicle_parameters import (
-=======
import d123.datasets.nuplan.utils as nuplan_utils
from d123.common.multithreading.worker_utils import WorkerPool, worker_map
from d123.common.utils.arrow_helper import open_arrow_table, write_arrow_table
@@ -39,7 +27,6 @@
from d123.datatypes.time.time_point import TimePoint
from d123.datatypes.vehicle_state.ego_state import DynamicStateSE3, EgoStateSE3, EgoStateSE3Index
from d123.datatypes.vehicle_state.vehicle_parameters import (
->>>>>>> dev_v0.0.7:d123/datasets/nuplan/nuplan_data_converter.py
get_nuplan_chrysler_pacifica_parameters,
rear_axle_se3_to_center_se3,
)
diff --git a/d123/datatypes/scene/arrow/arrow_scene.py b/d123/datatypes/scene/arrow/arrow_scene.py
index 45738893..e05b717c 100644
--- a/d123/datatypes/scene/arrow/arrow_scene.py
+++ b/d123/datatypes/scene/arrow/arrow_scene.py
@@ -4,23 +4,12 @@
import pyarrow as pa
-<<<<<<< HEAD:d123/dataset/scene/arrow_scene.py
-from d123.common.datatypes.detection.detection import BoxDetectionWrapper, TrafficLightDetectionWrapper
-from d123.common.datatypes.recording.detection_recording import DetectionRecording
-from d123.common.datatypes.sensor.camera import Camera, CameraMetadata, PinholeCameraMetadata, FisheyeMEICameraMetadata, CameraType, camera_metadata_dict_from_json
-from d123.common.datatypes.sensor.lidar import LiDAR, LiDARMetadata, LiDARType, lidar_metadata_dict_from_json
-from d123.common.datatypes.time.time_point import TimePoint
-from d123.common.datatypes.vehicle_state.ego_state import EgoStateSE3
-from d123.common.datatypes.vehicle_state.vehicle_parameters import VehicleParameters
-from d123.dataset.arrow.conversion import (
-=======
from d123.common.utils.arrow_helper import open_arrow_table
from d123.datatypes.detections.detection import BoxDetectionWrapper, DetectionRecording, TrafficLightDetectionWrapper
from d123.datatypes.maps.abstract_map import AbstractMap
from d123.datatypes.maps.gpkg.gpkg_map import get_local_map_api, get_map_api_from_names
from d123.datatypes.scene.abstract_scene import AbstractScene
from d123.datatypes.scene.arrow.utils.conversion import (
->>>>>>> dev_v0.0.7:d123/datatypes/scene/arrow/arrow_scene.py
get_box_detections_from_arrow_table,
get_camera_from_arrow_table,
get_ego_vehicle_state_from_arrow_table,
From 5bf2e5aa9c33bfda2d0f2b4a042164f991746d54 Mon Sep 17 00:00:00 2001
From: jbwang <1159270049@qq.com>
Date: Tue, 30 Sep 2025 14:47:20 +0800
Subject: [PATCH 052/145] merge dev_0.0.7 into kitti360 and makes kitti360
compatible with existing code
---
d123/datasets/av2/av2_data_converter.py | 2 +-
d123/datasets/carla/carla_data_converter.py | 2 +-
.../kitti_360/kitti_360_data_converter.py | 35 ++++++++++---------
d123/datasets/kitti_360/kitti_360_helper.py | 15 +++++---
.../kitti_360/kitti_360_map_conversion.py | 8 ++---
d123/datasets/kitti_360/labels.py | 2 +-
d123/datasets/kitti_360/load_sensor.py | 2 +-
.../kitti_360/preprocess_detection.py | 4 +--
d123/datasets/nuplan/nuplan_data_converter.py | 2 +-
d123/datasets/wopd/wopd_data_converter.py | 2 +-
d123/datatypes/scene/arrow/arrow_scene.py | 2 +-
.../datatypes/scene/arrow/utils/conversion.py | 2 +-
.../default_dataset_conversion.yaml | 2 +-
.../config/datasets/kitti360_dataset.yaml | 4 +--
14 files changed, 47 insertions(+), 37 deletions(-)
diff --git a/d123/datasets/av2/av2_data_converter.py b/d123/datasets/av2/av2_data_converter.py
index f066aa42..7d2e3525 100644
--- a/d123/datasets/av2/av2_data_converter.py
+++ b/d123/datasets/av2/av2_data_converter.py
@@ -25,7 +25,7 @@
from d123.datasets.av2.av2_map_conversion import convert_av2_map
from d123.datasets.raw_data_converter import DataConverterConfig, RawDataConverter
from d123.datatypes.scene.scene_metadata import LogMetadata
-from d123.datatypes.sensors.camera import CameraMetadata, CameraType, camera_metadata_dict_to_json
+from d123.datatypes.sensors.camera import PinholeCameraMetadata, CameraType, camera_metadata_dict_to_json
from d123.datatypes.sensors.lidar import LiDARMetadata, LiDARType, lidar_metadata_dict_to_json
from d123.datatypes.time.time_point import TimePoint
from d123.datatypes.vehicle_state.ego_state import DynamicStateSE3, EgoStateSE3, EgoStateSE3Index
diff --git a/d123/datasets/carla/carla_data_converter.py b/d123/datasets/carla/carla_data_converter.py
index bcf8342c..f5b8fd16 100644
--- a/d123/datasets/carla/carla_data_converter.py
+++ b/d123/datasets/carla/carla_data_converter.py
@@ -19,7 +19,7 @@
from d123.datatypes.maps.abstract_map_objects import AbstractLane
from d123.datatypes.maps.gpkg.gpkg_map import get_map_api_from_names
from d123.datatypes.scene.scene_metadata import LogMetadata
-from d123.datatypes.sensors.camera import CameraMetadata, CameraType, camera_metadata_dict_to_json
+from d123.datatypes.sensors.camera import PinholeCameraMetadata, CameraType, camera_metadata_dict_to_json
from d123.datatypes.sensors.lidar import LiDARMetadata, LiDARType, lidar_metadata_dict_to_json
from d123.datatypes.sensors.lidar_index import CarlaLidarIndex
from d123.datatypes.vehicle_state.ego_state import EgoStateSE3Index
diff --git a/d123/datasets/kitti_360/kitti_360_data_converter.py b/d123/datasets/kitti_360/kitti_360_data_converter.py
index 76396bbd..0616dcfa 100644
--- a/d123/datasets/kitti_360/kitti_360_data_converter.py
+++ b/d123/datasets/kitti_360/kitti_360_data_converter.py
@@ -22,20 +22,21 @@
from nuplan.planning.utils.multithreading.worker_utils import WorkerPool, worker_map
-from d123.common.datatypes.detection.detection_types import DetectionType
-from d123.common.datatypes.sensor.camera import PinholeCameraMetadata, FisheyeMEICameraMetadata, CameraType, camera_metadata_dict_to_json
-from d123.common.datatypes.sensor.lidar import LiDARMetadata, LiDARType, lidar_metadata_dict_to_json
-from d123.common.datatypes.sensor.lidar_index import Kitti360LidarIndex
-from d123.common.datatypes.time.time_point import TimePoint
-from d123.common.datatypes.vehicle_state.ego_state import DynamicStateSE3, EgoStateSE3, EgoStateSE3Index
-from d123.common.datatypes.vehicle_state.vehicle_parameters import get_kitti360_station_wagon_parameters,rear_axle_se3_to_center_se3
-from d123.dataset.arrow.helper import open_arrow_table, write_arrow_table
-from d123.dataset.dataset_specific.raw_data_converter import DataConverterConfig, RawDataConverter
-from d123.dataset.logs.log_metadata import LogMetadata
-from d123.dataset.dataset_specific.kitti_360.kitti_360_helper import KITTI360Bbox3D, KITTI3602NUPLAN_IMU_CALIBRATION, get_lidar_extrinsic
-from d123.dataset.dataset_specific.kitti_360.labels import KIITI360_DETECTION_NAME_DICT,kittiId2label,BBOX_LABLES_TO_DETECTION_NAME_DICT
-from d123.dataset.dataset_specific.kitti_360.kitti_360_map_conversion import convert_kitti360_map
+from d123.datatypes.detections.detection_types import DetectionType
+from d123.datatypes.sensors.camera import PinholeCameraMetadata, FisheyeMEICameraMetadata, CameraType, camera_metadata_dict_to_json
+from d123.datatypes.sensors.lidar import LiDARMetadata, LiDARType, lidar_metadata_dict_to_json
+from d123.datatypes.sensors.lidar_index import Kitti360LidarIndex
+from d123.datatypes.time.time_point import TimePoint
+from d123.datatypes.vehicle_state.ego_state import DynamicStateSE3, EgoStateSE3, EgoStateSE3Index
+from d123.datatypes.vehicle_state.vehicle_parameters import get_kitti360_station_wagon_parameters,rear_axle_se3_to_center_se3
+from d123.common.utils.arrow_helper import open_arrow_table, write_arrow_table
+from d123.datasets.raw_data_converter import DataConverterConfig, RawDataConverter
+from d123.datatypes.scene.scene_metadata import LogMetadata
+from d123.datasets.kitti_360.kitti_360_helper import KITTI360Bbox3D, KITTI3602NUPLAN_IMU_CALIBRATION, get_lidar_extrinsic
+from d123.datasets.kitti_360.labels import KIITI360_DETECTION_NAME_DICT,kittiId2label,BBOX_LABLES_TO_DETECTION_NAME_DICT
+from d123.datasets.kitti_360.kitti_360_map_conversion import convert_kitti360_map
from d123.geometry import BoundingBoxSE3, BoundingBoxSE3Index, StateSE3, Vector3D, Vector3DIndex
+from d123.geometry.rotation import EulerAngles
KITTI360_DT: Final[float] = 0.1
SORT_BY_TIMESTAMP: Final[bool] = True
@@ -482,13 +483,15 @@ def _extract_ego_state_all(log_name: str) -> Tuple[List[List[float]], List[int]]
R_mat_cali = R_mat @ KITTI3602NUPLAN_IMU_CALIBRATION[:3,:3]
yaw, pitch, roll = Quaternion(matrix=R_mat_cali[:3, :3]).yaw_pitch_roll
+ ego_quaternion = EulerAngles(roll=roll, pitch=pitch, yaw=yaw).quaternion
rear_axle_pose = StateSE3(
x=poses[pos, 4],
y=poses[pos, 8],
z=poses[pos, 12],
- roll=roll,
- pitch=pitch,
- yaw=yaw,
+ qw=ego_quaternion.qw,
+ qx=ego_quaternion.qx,
+ qy=ego_quaternion.qy,
+ qz=ego_quaternion.qz,
)
center = rear_axle_se3_to_center_se3(rear_axle_se3=rear_axle_pose, vehicle_parameters=vehicle_parameters)
diff --git a/d123/datasets/kitti_360/kitti_360_helper.py b/d123/datasets/kitti_360/kitti_360_helper.py
index e8520253..01c3d1fe 100644
--- a/d123/datasets/kitti_360/kitti_360_helper.py
+++ b/d123/datasets/kitti_360/kitti_360_helper.py
@@ -8,7 +8,8 @@
from d123.geometry import BoundingBoxSE3, StateSE3
from d123.geometry.polyline import Polyline3D
-from d123.dataset.dataset_specific.kitti_360.labels import kittiId2label,BBOX_LABLES_TO_DETECTION_NAME_DICT
+from d123.geometry.rotation import EulerAngles
+from d123.datasets.kitti_360.labels import kittiId2label,BBOX_LABLES_TO_DETECTION_NAME_DICT
import os
from pathlib import Path
@@ -124,6 +125,7 @@ def parse_scale_rotation(self):
Rm[0] = -Rm[0]
scale = np.diag(Sm)
yaw, pitch, roll = R.from_matrix(Rm).as_euler('zyx', degrees=False)
+ obj_quaternion = EulerAngles(roll=roll, pitch=pitch, yaw=yaw).quaternion
self.Rm = np.array(Rm)
self.Sm = np.array(Sm)
@@ -131,15 +133,20 @@ def parse_scale_rotation(self):
self.yaw = yaw
self.pitch = pitch
self.roll = roll
+ self.qw = obj_quaternion.qw
+ self.qx = obj_quaternion.qx
+ self.qy = obj_quaternion.qy
+ self.qz = obj_quaternion.qz
def get_state_array(self) -> np.ndarray:
center = StateSE3(
x=self.T[0],
y=self.T[1],
z=self.T[2],
- roll=self.roll,
- pitch=self.pitch,
- yaw=self.yaw,
+ qw=self.qw,
+ qx=self.qx,
+ qy=self.qy,
+ qz=self.qz,
)
scale = self.scale
bounding_box_se3 = BoundingBoxSE3(center, scale[0], scale[1], scale[2])
diff --git a/d123/datasets/kitti_360/kitti_360_map_conversion.py b/d123/datasets/kitti_360/kitti_360_map_conversion.py
index 924a7822..643a13c6 100644
--- a/d123/datasets/kitti_360/kitti_360_map_conversion.py
+++ b/d123/datasets/kitti_360/kitti_360_map_conversion.py
@@ -11,14 +11,14 @@
from shapely.geometry import LineString
import shapely.geometry as geom
-from d123.dataset.conversion.map.road_edge.road_edge_2d_utils import (
+from d123.datasets.utils.maps.road_edge.road_edge_2d_utils import (
get_road_edge_linear_rings,
split_line_geometry_by_max_length,
)
-from d123.dataset.maps.gpkg.utils import get_all_rows_with_value, get_row_with_value
-from d123.dataset.maps.map_datatypes import MapLayer, RoadEdgeType, RoadLineType
+from d123.datatypes.maps.gpkg.utils import get_all_rows_with_value, get_row_with_value
+from d123.datatypes.maps.map_datatypes import MapLayer, RoadEdgeType, RoadLineType
from d123.geometry.polyline import Polyline3D
-from d123.dataset.dataset_specific.kitti_360.kitti_360_helper import KITTI360_MAP_Bbox3D
+from d123.datasets.kitti_360.kitti_360_helper import KITTI360_MAP_Bbox3D
MAX_ROAD_EDGE_LENGTH = 100.0 # meters, used to filter out very long road edges
diff --git a/d123/datasets/kitti_360/labels.py b/d123/datasets/kitti_360/labels.py
index 6903be9f..45e2d315 100644
--- a/d123/datasets/kitti_360/labels.py
+++ b/d123/datasets/kitti_360/labels.py
@@ -167,7 +167,7 @@ def assureSingleInstanceName( name ):
# all good then
return name
-from d123.common.datatypes.detection.detection_types import DetectionType
+from d123.datatypes.detections.detection_types import DetectionType
BBOX_LABLES_TO_DETECTION_NAME_DICT = {
'car': 'car',
diff --git a/d123/datasets/kitti_360/load_sensor.py b/d123/datasets/kitti_360/load_sensor.py
index c4df6d36..7ca4489a 100644
--- a/d123/datasets/kitti_360/load_sensor.py
+++ b/d123/datasets/kitti_360/load_sensor.py
@@ -3,7 +3,7 @@
import numpy as np
import logging
-from d123.common.datatypes.sensor.lidar import LiDAR, LiDARMetadata
+from d123.datatypes.sensors.lidar import LiDAR, LiDARMetadata
def load_kitti360_lidar_from_path(filepath: Path, lidar_metadata: LiDARMetadata) -> LiDAR:
diff --git a/d123/datasets/kitti_360/preprocess_detection.py b/d123/datasets/kitti_360/preprocess_detection.py
index 97ea6eb8..92806736 100644
--- a/d123/datasets/kitti_360/preprocess_detection.py
+++ b/d123/datasets/kitti_360/preprocess_detection.py
@@ -31,8 +31,8 @@
PATH_3D_BBOX_ROOT = KITTI360_DATA_ROOT / DIR_3D_BBOX
PATH_POSES_ROOT = KITTI360_DATA_ROOT / DIR_POSES
-from d123.dataset.dataset_specific.kitti_360.kitti_360_helper import KITTI360Bbox3D, KITTI3602NUPLAN_IMU_CALIBRATION, get_lidar_extrinsic
-from d123.dataset.dataset_specific.kitti_360.labels import KIITI360_DETECTION_NAME_DICT, kittiId2label, BBOX_LABLES_TO_DETECTION_NAME_DICT
+from d123.datasets.kitti_360.kitti_360_helper import KITTI360Bbox3D, KITTI3602NUPLAN_IMU_CALIBRATION, get_lidar_extrinsic
+from d123.datasets.kitti_360.labels import KIITI360_DETECTION_NAME_DICT, kittiId2label, BBOX_LABLES_TO_DETECTION_NAME_DICT
def _bbox_xml_path(log_name: str) -> Path:
if log_name == "2013_05_28_drive_0004_sync":
diff --git a/d123/datasets/nuplan/nuplan_data_converter.py b/d123/datasets/nuplan/nuplan_data_converter.py
index 398e536a..980c53b1 100644
--- a/d123/datasets/nuplan/nuplan_data_converter.py
+++ b/d123/datasets/nuplan/nuplan_data_converter.py
@@ -21,7 +21,7 @@
from d123.datatypes.detections.detection import TrafficLightStatus
from d123.datatypes.detections.detection_types import DetectionType
from d123.datatypes.scene.scene_metadata import LogMetadata
-from d123.datatypes.sensors.camera import CameraMetadata, CameraType, camera_metadata_dict_to_json
+from d123.datatypes.sensors.camera import PinholeCameraMetadata, CameraType, camera_metadata_dict_to_json
from d123.datatypes.sensors.lidar import LiDARMetadata, LiDARType, lidar_metadata_dict_to_json
from d123.datatypes.sensors.lidar_index import NuplanLidarIndex
from d123.datatypes.time.time_point import TimePoint
diff --git a/d123/datasets/wopd/wopd_data_converter.py b/d123/datasets/wopd/wopd_data_converter.py
index ebac241a..7a7aa7b1 100644
--- a/d123/datasets/wopd/wopd_data_converter.py
+++ b/d123/datasets/wopd/wopd_data_converter.py
@@ -18,7 +18,7 @@
from d123.datasets.wopd.waymo_map_utils.wopd_map_utils import convert_wopd_map
from d123.datasets.wopd.wopd_utils import parse_range_image_and_camera_projection
from d123.datatypes.scene.scene_metadata import LogMetadata
-from d123.datatypes.sensors.camera import CameraMetadata, CameraType, camera_metadata_dict_to_json
+from d123.datatypes.sensors.camera import PinholeCameraMetadata, CameraType, camera_metadata_dict_to_json
from d123.datatypes.sensors.lidar import LiDARMetadata, LiDARType, lidar_metadata_dict_to_json
from d123.datatypes.sensors.lidar_index import WopdLidarIndex
from d123.datatypes.vehicle_state.ego_state import DynamicStateSE3, EgoStateSE3, EgoStateSE3Index
diff --git a/d123/datatypes/scene/arrow/arrow_scene.py b/d123/datatypes/scene/arrow/arrow_scene.py
index e05b717c..0fc61ba8 100644
--- a/d123/datatypes/scene/arrow/arrow_scene.py
+++ b/d123/datatypes/scene/arrow/arrow_scene.py
@@ -69,7 +69,7 @@ def __init__(
) = _get_scene_data(arrow_file_path)
self._metadata: LogMetadata = _metadata
self._vehicle_parameters: VehicleParameters = _vehicle_parameters
- self._camera_metadata: Dict[CameraType, Union[PinholeCameraMetadata, FisheyeMEICameraMetadata]] = _camera_metadata
+ self._camera_metadata: Dict[CameraType, CameraMetadata] = _camera_metadata
self._lidar_metadata: Dict[LiDARType, LiDARMetadata] = _lidar_metadata
self._map_api: Optional[AbstractMap] = None
diff --git a/d123/datatypes/scene/arrow/utils/conversion.py b/d123/datatypes/scene/arrow/utils/conversion.py
index 1f6c879c..dfa63d54 100644
--- a/d123/datatypes/scene/arrow/utils/conversion.py
+++ b/d123/datatypes/scene/arrow/utils/conversion.py
@@ -154,7 +154,7 @@ def get_lidar_from_arrow_table(
elif log_metadata.dataset == "wopd":
raise NotImplementedError
elif log_metadata.dataset == "kitti360":
- from d123.dataset.dataset_specific.kitti_360.load_sensor import load_kitti360_lidar_from_path
+ from d123.datasets.kitti_360.load_sensor import load_kitti360_lidar_from_path
lidar = load_kitti360_lidar_from_path(full_lidar_path, lidar_metadata)
else:
diff --git a/d123/script/config/dataset_conversion/default_dataset_conversion.yaml b/d123/script/config/dataset_conversion/default_dataset_conversion.yaml
index 01084657..2c474fe8 100644
--- a/d123/script/config/dataset_conversion/default_dataset_conversion.yaml
+++ b/d123/script/config/dataset_conversion/default_dataset_conversion.yaml
@@ -15,7 +15,7 @@ defaults:
- default_dataset_paths
- _self_
- datasets:
- - nuplan_private_dataset
+ # - nuplan_private_dataset
# - carla_dataset
# - wopd_dataset
# - av2_sensor_dataset
diff --git a/d123/script/config/datasets/kitti360_dataset.yaml b/d123/script/config/datasets/kitti360_dataset.yaml
index 17b9e863..c5816a29 100644
--- a/d123/script/config/datasets/kitti360_dataset.yaml
+++ b/d123/script/config/datasets/kitti360_dataset.yaml
@@ -1,12 +1,12 @@
kitti360_dataset:
- _target_: d123.dataset.dataset_specific.kitti_360.kitti_360_data_converter.Kitti360DataConverter
+ _target_: d123.datasets.kitti_360.kitti_360_data_converter.Kitti360DataConverter
_convert_: 'all'
splits: ["kitti360"]
log_path: ${oc.env:KITTI360_DATA_ROOT}
data_converter_config:
- _target_: d123.dataset.dataset_specific.raw_data_converter.DataConverterConfig
+ _target_: d123.datasets.raw_data_converter.DataConverterConfig
_convert_: 'all'
output_path: ${d123_data_root}
From 0ac18baa706ff3673e73a810df4d097e663d2749 Mon Sep 17 00:00:00 2001
From: Daniel Dauner
Date: Thu, 2 Oct 2025 23:10:43 +0200
Subject: [PATCH 053/145] Massively improve viser speed and stability for
cameras and bounding boxes. paritally clean up the viser code.
---
d123/common/utils/timer.py | 11 +-
d123/common/visualization/color/default.py | 10 ++
d123/common/visualization/viser/server.py | 150 ++++++++++++----
d123/common/visualization/viser/utils.py | 163 ++++++++++--------
.../datatypes/scene/arrow/utils/conversion.py | 13 +-
d123/geometry/se.py | 2 +-
d123/geometry/utils/bounding_box_utils.py | 40 +++++
.../default_dataset_conversion.yaml | 4 +-
.../config/datasets/av2_sensor_dataset.yaml | 4 +-
.../datasets/nuplan_private_dataset.yaml | 2 +-
notebooks/viz/viser_testing_v2_scene.ipynb | 43 +----
test_viser.py | 34 ++++
12 files changed, 313 insertions(+), 163 deletions(-)
create mode 100644 test_viser.py
diff --git a/d123/common/utils/timer.py b/d123/common/utils/timer.py
index b2c57015..69137c0e 100644
--- a/d123/common/utils/timer.py
+++ b/d123/common/utils/timer.py
@@ -55,7 +55,7 @@ def end(self) -> None:
self._time_logs[self._end_key].append(time.perf_counter() - self._start_time)
- def stats(self, verbose: bool = True) -> Optional[pd.DataFrame]:
+ def to_pandas(self) -> Optional[pd.DataFrame]:
"""
Returns a DataFrame with statistics of the logged times.
:param verbose: whether to print the timings, defaults to True
@@ -71,8 +71,8 @@ def stats(self, verbose: bool = True) -> Optional[pd.DataFrame]:
statistics[key] = timings_statistics
dataframe = pd.DataFrame.from_dict(statistics).transpose()
- if verbose:
- print(dataframe.to_string())
+ # if verbose:
+ # print(dataframe.to_string())
return dataframe
@@ -91,3 +91,8 @@ def flush(self) -> None:
self._time_logs: Dict[str, List[float]] = {}
self._start_time: Optional[float] = None
self._iteration_time: Optional[float] = None
+
+ def __str__(self) -> str:
+ """String representation of the Timer."""
+ dataframe = self.to_pandas()
+ return dataframe.to_string() if dataframe is not None else "No timings logged"
diff --git a/d123/common/visualization/color/default.py b/d123/common/visualization/color/default.py
index 9eda3f7c..5d90977a 100644
--- a/d123/common/visualization/color/default.py
+++ b/d123/common/visualization/color/default.py
@@ -167,6 +167,16 @@
marker_style=None,
zorder=2,
),
+ DetectionType.EGO: PlotConfig(
+ fill_color=ELLIS_5[0],
+ fill_color_alpha=1.0,
+ line_color=BLACK,
+ line_color_alpha=1.0,
+ line_width=1.0,
+ line_style="-",
+ marker_style=HEADING_MARKER_STYLE,
+ zorder=4,
+ ),
}
EGO_VEHICLE_CONFIG: PlotConfig = PlotConfig(
diff --git a/d123/common/visualization/viser/server.py b/d123/common/visualization/viser/server.py
index e5c95081..29e97418 100644
--- a/d123/common/visualization/viser/server.py
+++ b/d123/common/visualization/viser/server.py
@@ -1,10 +1,11 @@
import time
+from concurrent.futures import ThreadPoolExecutor
from typing import Dict, List, Literal
import numpy as np
-import trimesh
import viser
+from d123.common.utils.timer import Timer
from d123.common.visualization.viser.utils import (
get_bounding_box_meshes,
get_bounding_box_outlines,
@@ -43,15 +44,19 @@
# Cameras config:
-VISUALIZE_CAMERA_FRUSTUM: List[CameraType] = [CameraType.CAM_F0, CameraType.CAM_L0, CameraType.CAM_R0]
-# VISUALIZE_CAMERA_FRUSTUM: List[CameraType] = all_camera_types
+# VISUALIZE_CAMERA_FRUSTUM: List[CameraType] = [CameraType.CAM_F0, CameraType.CAM_L0, CameraType.CAM_R0]
+VISUALIZE_CAMERA_FRUSTUM: List[CameraType] = all_camera_types
# VISUALIZE_CAMERA_FRUSTUM: List[CameraType] = [CameraType.CAM_STEREO_L, CameraType.CAM_STEREO_R]
# VISUALIZE_CAMERA_FRUSTUM: List[CameraType] = []
-VISUALIZE_CAMERA_GUI: List[CameraType] = [CameraType.CAM_F0]
+# VISUALIZE_CAMERA_GUI: List[CameraType] = [CameraType.CAM_F0]
+
+VISUALIZE_CAMERA_GUI: List[CameraType] = []
+
CAMERA_SCALE: float = 1.0
+RESIZE_FACTOR = 0.25
# Lidar config:
-LIDAR_AVAILABLE: bool = True
+LIDAR_AVAILABLE: bool = False
LIDAR_TYPES: List[LiDARType] = [
LiDARType.LIDAR_MERGED,
@@ -124,8 +129,8 @@ def set_scene(self, scene: AbstractScene) -> None:
gui_prev_frame = self.server.gui.add_button("Prev Frame", disabled=True)
gui_next_scene = self.server.gui.add_button("Next Scene", disabled=False)
gui_playing = self.server.gui.add_checkbox("Playing", True)
- gui_framerate = self.server.gui.add_slider("FPS", min=1, max=60, step=0.1, initial_value=10)
- gui_framerate_options = self.server.gui.add_button_group("FPS options", ("10", "20", "30", "60"))
+ gui_framerate = self.server.gui.add_slider("FPS", min=1, max=90, step=0.1, initial_value=10)
+ gui_framerate_options = self.server.gui.add_button_group("FPS options", ("10", "20", "30", "60", "90"))
# Frame step buttons.
@gui_next_frame.on_click
@@ -158,25 +163,26 @@ def _(_) -> None:
# Toggle frame visibility when the timestep slider changes.
@gui_timestep.on_update
def _(_) -> None:
- nonlocal current_frame_handle, current_frame_handle, prev_timestep
+ nonlocal prev_timestep, bounding_box_handle
current_timestep = gui_timestep.value
- start = time.time()
+ timer = Timer()
+ timer.start()
+
+ start = time.perf_counter()
# with self.server.atomic():
- mew_frame_handle = self.server.scene.add_frame(f"/frame{gui_timestep.value}", show_axes=False)
+
if BOUNDING_BOX_TYPE == "mesh":
- meshes = []
- for _, mesh in get_bounding_box_meshes(scene, gui_timestep.value).items():
- meshes.append(mesh)
- self.server.scene.add_mesh_trimesh(
- f"/frame{gui_timestep.value}/detections",
- trimesh.util.concatenate(meshes),
+ mesh = get_bounding_box_meshes(scene, gui_timestep.value)
+ new_bounding_box_handle = self.server.scene.add_mesh_trimesh(
+ "box_detections",
+ mesh=mesh,
visible=True,
)
elif BOUNDING_BOX_TYPE == "lines":
lines, colors = get_bounding_box_outlines(scene, gui_timestep.value)
- self.server.scene.add_line_segments(
- f"/frame{gui_timestep.value}/detections",
+ new_bounding_box_handle = self.server.scene.add_line_segments(
+ "box_detections",
points=lines,
colors=colors,
line_width=LINE_WIDTH,
@@ -184,44 +190,115 @@ def _(_) -> None:
else:
raise ValueError(f"Unknown bounding box type: {BOUNDING_BOX_TYPE}")
- current_frame_handle.remove()
- current_frame_handle = mew_frame_handle
+ # bounding_box_handle.visible = False
+ # time.sleep(0.005)
+ # bounding_box_handle.remove()
+ bounding_box_handle = new_bounding_box_handle
+ new_bounding_box_handle.visible = True
+
+ timer.log("Update bounding boxes")
for camera_type in VISUALIZE_CAMERA_GUI:
camera = get_camera_if_available(scene, camera_type, gui_timestep.value)
if camera is not None:
camera_gui_handles[camera_type].image = camera.image
- for camera_type in VISUALIZE_CAMERA_FRUSTUM:
+ camera_timer = Timer()
+ camera_timer.start()
+ import concurrent.futures
+
+ def load_camera_data(camera_type):
camera = get_camera_if_available(scene, camera_type, gui_timestep.value)
if camera is not None:
- camera_position, camera_quaternion = get_camera_values(scene, camera, gui_timestep.value)
- camera_frustum_handles[camera_type].position = camera_position.array
- camera_frustum_handles[camera_type].wxyz = camera_quaternion.q
- camera_frustum_handles[camera_type].image = camera.image
+ camera_position, camera_rotation, camera_image = get_camera_values(
+ scene, camera, gui_timestep.value, resize_factor=RESIZE_FACTOR
+ )
+ camera_frustum_handles[camera_type].position = camera_position
+ camera_frustum_handles[camera_type].wxyz = camera_rotation
+ camera_frustum_handles[camera_type].image = camera_image
+
+ return camera_type, None
+ return camera_type, None
+
+ with ThreadPoolExecutor(max_workers=len(VISUALIZE_CAMERA_FRUSTUM)) as executor:
+ future_to_camera = {
+ executor.submit(load_camera_data, camera_type): camera_type
+ for camera_type in VISUALIZE_CAMERA_FRUSTUM
+ }
+
+ for future in concurrent.futures.as_completed(future_to_camera):
+ camera_type, camera_data = future.result()
+
+ camera_timer.log("Load camera data")
+
+ # for camera_type in VISUALIZE_CAMERA_FRUSTUM:
+
+ # # camera = get_camera_if_available(scene, camera_type, gui_timestep.value)
+ # # camera_timer.log("Get camera")
+
+ # if camera_type in camera_cache.keys():
+ # camera_position, camera_rotation, camera_image = camera_cache[camera_type]
+ # # camera_position, camera_quaternion, camera_image = get_camera_values(
+ # # scene, camera, gui_timestep.value, resize_factor=RESIZE_FACTOR
+ # # )
+
+ # # camera_timer.log("Get camera values")
+
+ # camera_frustum_handles[camera_type].position = camera_position
+ # camera_frustum_handles[camera_type].wxyz = camera_rotation
+ # camera_frustum_handles[camera_type].image = camera_image
+
+ camera_timer.log("Update camera frustum")
+ camera_timer.end()
+ # print(camera_timer) # 0.0082
+
+ timer.log("Update cameras")
if LIDAR_AVAILABLE:
try:
points, colors = get_lidar_points(scene, gui_timestep.value, LIDAR_TYPES)
except Exception as e:
- print(f"Error getting lidar points: {e}")
+ # print(f"Error getting lidar points: {e}")
points = np.zeros((0, 3))
colors = np.zeros((0, 3))
gui_lidar.points = points
gui_lidar.colors = colors
+ # timer.log("Update lidar")
+ timer.end()
+ # print(timer)
+
prev_timestep = current_timestep
- rendering_time = time.time() - start
+ rendering_time = time.perf_counter() - start
sleep_time = 1.0 / gui_framerate.value - rendering_time
- time.sleep(max(sleep_time, 0.0))
- self.server.flush() # Optional!
+ if sleep_time > 0:
+ time.sleep(max(sleep_time, 0.0))
+ # self.server.flush() # Optional!
+ # print(f"Render time: {rendering_time:.3f}s, sleep time: {sleep_time:.3f}s")
# Load in frames.
- current_frame_handle = self.server.scene.add_frame(f"/frame{gui_timestep.value}", show_axes=False)
self.server.scene.add_frame("/map", show_axes=False)
+ if BOUNDING_BOX_TYPE == "mesh":
+ mesh = get_bounding_box_meshes(scene, gui_timestep.value)
+ bounding_box_handle = self.server.scene.add_mesh_trimesh(
+ "box_detections",
+ mesh=mesh,
+ visible=True,
+ )
+ elif BOUNDING_BOX_TYPE == "lines":
+ lines, colors = get_bounding_box_outlines(scene, gui_timestep.value)
+ bounding_box_handle = self.server.scene.add_line_segments(
+ "box_detections",
+ points=lines,
+ colors=colors,
+ line_width=LINE_WIDTH,
+ )
+ else:
+ raise ValueError(f"Unknown bounding box type: {BOUNDING_BOX_TYPE}")
+
camera_gui_handles: Dict[CameraType, viser.GuiImageHandle] = {}
camera_frustum_handles: Dict[CameraType, viser.CameraFrustumHandle] = {}
@@ -232,28 +309,29 @@ def _(_) -> None:
camera_gui_handles[camera_type] = self.server.gui.add_image(
image=camera.image,
label=camera_type.serialize(),
- format="jpeg",
)
for camera_type in VISUALIZE_CAMERA_FRUSTUM:
camera = get_camera_if_available(scene, camera_type, gui_timestep.value)
if camera is not None:
- camera_position, camera_quaternion = get_camera_values(scene, camera, gui_timestep.value)
+ camera_position, camera_quaternion, camera_image = get_camera_values(
+ scene, camera, gui_timestep.value, resize_factor=RESIZE_FACTOR
+ )
camera_frustum_handles[camera_type] = self.server.scene.add_camera_frustum(
f"camera_frustum_{camera_type.serialize()}",
fov=camera.metadata.fov_y,
aspect=camera.metadata.aspect_ratio,
scale=CAMERA_SCALE,
- image=camera.image,
- position=camera_position.array,
- wxyz=camera_quaternion.q,
+ image=camera_image,
+ position=camera_position,
+ wxyz=camera_quaternion,
)
if LIDAR_AVAILABLE:
try:
points, colors = get_lidar_points(scene, gui_timestep.value, LIDAR_TYPES)
except Exception as e:
- print(f"Error getting lidar points: {e}")
+ # print(f"Error getting lidar points: {e}")
points = np.zeros((0, 3))
colors = np.zeros((0, 3))
diff --git a/d123/common/visualization/viser/utils.py b/d123/common/visualization/viser/utils.py
index 75d72b91..74615d46 100644
--- a/d123/common/visualization/viser/utils.py
+++ b/d123/common/visualization/viser/utils.py
@@ -1,26 +1,31 @@
from typing import Final, List, Optional, Tuple
+import cv2
import numpy as np
import numpy.typing as npt
import trimesh
-from pyquaternion import Quaternion # TODO: remove
from d123.common.visualization.color.color import TAB_10, Color
-from d123.common.visualization.color.config import PlotConfig
-from d123.common.visualization.color.default import BOX_DETECTION_CONFIG, EGO_VEHICLE_CONFIG, MAP_SURFACE_CONFIG
+from d123.common.visualization.color.default import BOX_DETECTION_CONFIG, MAP_SURFACE_CONFIG
+from d123.datatypes.detections.detection_types import DetectionType
from d123.datatypes.maps.abstract_map import MapLayer
from d123.datatypes.maps.abstract_map_objects import AbstractLane, AbstractSurfaceMapObject
from d123.datatypes.scene.abstract_scene import AbstractScene
from d123.datatypes.sensors.camera import Camera, CameraType
from d123.datatypes.sensors.lidar import LiDARType
-from d123.geometry import BoundingBoxSE3, Corners3DIndex, Point3D, Point3DIndex, Polyline3D, StateSE3, StateSE3Index
+from d123.geometry import Corners3DIndex, Point3D, Point3DIndex, Polyline3D, StateSE3, StateSE3Index
from d123.geometry.geometry_index import BoundingBoxSE3Index
from d123.geometry.transform.transform_euler_se3 import convert_relative_to_absolute_points_3d_array
+from d123.geometry.transform.transform_se3 import convert_relative_to_absolute_se3_array
+from d123.geometry.utils.bounding_box_utils import (
+ bbse3_array_to_corners_array,
+ corners_array_to_3d_mesh,
+)
# TODO: Refactor this file.
# TODO: Add general utilities for 3D primitives and mesh support.
-MAP_RADIUS: Final[float] = 500
+MAP_RADIUS: Final[float] = 200
BRIGHTNESS_FACTOR: Final[float] = 1.0
@@ -42,52 +47,43 @@ def configure_trimesh(mesh: trimesh.Trimesh, color: Color):
return mesh
-def bounding_box_to_trimesh(bbox: BoundingBoxSE3, plot_config: PlotConfig) -> trimesh.Trimesh:
+def get_bounding_box_meshes(scene: AbstractScene, iteration: int):
- # Create a unit box centered at origin
- box_mesh = trimesh.creation.box(extents=[bbox.length, bbox.width, bbox.height])
+ initial_ego_vehicle_state = scene.get_ego_state_at_iteration(0)
+ ego_vehicle_state = scene.get_ego_state_at_iteration(iteration)
+ box_detections = scene.get_box_detections_at_iteration(iteration)
- # Apply rotations in order: roll, pitch, yaw
- box_mesh = box_mesh.apply_transform(trimesh.transformations.rotation_matrix(bbox.center.yaw, [0, 0, 1]))
- box_mesh = box_mesh.apply_transform(trimesh.transformations.rotation_matrix(bbox.center.pitch, [0, 1, 0]))
- box_mesh = box_mesh.apply_transform(trimesh.transformations.rotation_matrix(bbox.center.roll, [1, 0, 0]))
+ # Load boxes to visualize, including ego vehicle at the last position
+ boxes = [bd.bounding_box_se3 for bd in box_detections.box_detections] + [ego_vehicle_state.bounding_box_se3]
+ boxes_type = [bd.metadata.detection_type for bd in box_detections.box_detections] + [DetectionType.EGO]
- # Apply translation
- box_mesh = box_mesh.apply_translation([bbox.center.x, bbox.center.y, bbox.center.z])
+ # create meshes for all boxes
+ box_se3_array = np.array([box.array for box in boxes])
+ box_se3_array[..., BoundingBoxSE3Index.XYZ] -= initial_ego_vehicle_state.center_se3.array[StateSE3Index.XYZ]
+ box_corners_array = bbse3_array_to_corners_array(box_se3_array)
+ box_vertices, box_faces = corners_array_to_3d_mesh(box_corners_array)
- return configure_trimesh(box_mesh, plot_config.fill_color)
+ # Create colors for each box based on detection type
+ box_colors = []
+ for box_type in boxes_type:
+ box_colors.append(BOX_DETECTION_CONFIG[box_type].fill_color.rgba)
+ # Convert to numpy array and repeat for each vertex
+ box_colors = np.array(box_colors)
+ vertex_colors = np.repeat(box_colors, 8, axis=0) # 8 vertices per box
-def get_bounding_box_meshes(scene: AbstractScene, iteration: int):
- initial_ego_vehicle_state = scene.get_ego_state_at_iteration(0)
+ # Create trimesh object
+ mesh = trimesh.Trimesh(vertices=box_vertices, faces=box_faces)
+ mesh.visual.vertex_colors = vertex_colors
- ego_vehicle_state = scene.get_ego_state_at_iteration(iteration)
- box_detections = scene.get_box_detections_at_iteration(iteration)
- # traffic_light_detections = scene.get_traffic_light_detections_at_iteration(iteration)
- # map_api = scene.map_api
+ return mesh
- output = {}
- for box_detection in box_detections:
- bbox: BoundingBoxSE3 = box_detection.bounding_box
- bbox.array[BoundingBoxSE3Index.XYZ] -= initial_ego_vehicle_state.center_se3.array[StateSE3Index.XYZ]
- plot_config = BOX_DETECTION_CONFIG[box_detection.metadata.detection_type]
- trimesh_box = bounding_box_to_trimesh(bbox, plot_config)
- output[f"{box_detection.metadata.detection_type.serialize()}/{box_detection.metadata.track_token}"] = (
- trimesh_box
- )
-
- ego_bbox = ego_vehicle_state.bounding_box
- ego_bbox.array[BoundingBoxSE3Index.XYZ] -= initial_ego_vehicle_state.center_se3.array[StateSE3Index.XYZ]
- trimesh_box = bounding_box_to_trimesh(ego_bbox, EGO_VEHICLE_CONFIG)
- output["ego"] = trimesh_box
- return output
+def _get_bounding_box_lines_from_array(corners_array: npt.NDArray[np.float64]) -> npt.NDArray[np.float64]:
+ assert corners_array.shape[-1] == len(Point3DIndex)
+ assert corners_array.shape[-2] == len(Corners3DIndex)
+ assert corners_array.ndim >= 2
-def _get_bounding_box_lines(bounding_box: BoundingBoxSE3) -> npt.NDArray[np.float64]:
- """
- TODO: Vectorize this function and move to geometry module.
- """
- corners = bounding_box.corners_array
index_pairs = [
(Corners3DIndex.FRONT_LEFT_BOTTOM, Corners3DIndex.FRONT_RIGHT_BOTTOM),
(Corners3DIndex.FRONT_RIGHT_BOTTOM, Corners3DIndex.BACK_RIGHT_BOTTOM),
@@ -102,45 +98,50 @@ def _get_bounding_box_lines(bounding_box: BoundingBoxSE3) -> npt.NDArray[np.floa
(Corners3DIndex.BACK_RIGHT_BOTTOM, Corners3DIndex.BACK_RIGHT_TOP),
(Corners3DIndex.BACK_LEFT_BOTTOM, Corners3DIndex.BACK_LEFT_TOP),
]
- lines = np.zeros((len(index_pairs), 2, len(Point3DIndex)), dtype=np.float64)
- for i, (start_idx, end_idx) in enumerate(index_pairs):
- lines[i, 0] = corners[start_idx]
- lines[i, 1] = corners[end_idx]
+
+ # Handle both single box and batched cases
+ if corners_array.ndim == 2:
+ # Single box case: (8, 3)
+ lines = np.zeros((len(index_pairs), 2, len(Point3DIndex)), dtype=np.float64)
+ for i, (start_idx, end_idx) in enumerate(index_pairs):
+ lines[i, 0] = corners_array[start_idx]
+ lines[i, 1] = corners_array[end_idx]
+ else:
+ # Batched case: (..., 8, 3)
+ batch_shape = corners_array.shape[:-2]
+ lines = np.zeros(batch_shape + (len(index_pairs), 2, len(Point3DIndex)), dtype=np.float64)
+ for i, (start_idx, end_idx) in enumerate(index_pairs):
+ lines[..., i, 0, :] = corners_array[..., start_idx, :]
+ lines[..., i, 1, :] = corners_array[..., end_idx, :]
+
return lines
def get_bounding_box_outlines(scene: AbstractScene, iteration: int):
initial_ego_vehicle_state = scene.get_ego_state_at_iteration(0)
- origin: StateSE3 = initial_ego_vehicle_state.center_se3
-
ego_vehicle_state = scene.get_ego_state_at_iteration(iteration)
box_detections = scene.get_box_detections_at_iteration(iteration)
- lines = []
- colors = []
- for box_detection in box_detections:
- bbox: BoundingBoxSE3 = box_detection.bounding_box_se3
- bbox_lines = _get_bounding_box_lines(bbox)
- bbox_lines[..., Point3DIndex.XYZ] = bbox_lines[..., Point3DIndex.XYZ] - origin.array[StateSE3Index.XYZ]
- bbox_color = np.zeros(bbox_lines.shape, dtype=np.float32)
- bbox_color[..., :] = (
- BOX_DETECTION_CONFIG[box_detection.metadata.detection_type]
- .fill_color.set_brightness(BRIGHTNESS_FACTOR)
- .rgb_norm
- )
+ # Load boxes to visualize, including ego vehicle at the last position
+ boxes = [bd.bounding_box_se3 for bd in box_detections.box_detections] + [ego_vehicle_state.bounding_box_se3]
+ boxes_type = [bd.metadata.detection_type for bd in box_detections.box_detections] + [DetectionType.EGO]
+
+ # Create lines for all boxes
+ box_se3_array = np.array([box.array for box in boxes])
+ box_se3_array[..., BoundingBoxSE3Index.XYZ] -= initial_ego_vehicle_state.center_se3.array[StateSE3Index.XYZ]
+ box_corners_array = bbse3_array_to_corners_array(box_se3_array)
+ box_lines = _get_bounding_box_lines_from_array(box_corners_array)
- lines.append(bbox_lines)
- colors.append(bbox_color)
+ # Create colors for all boxes
+ box_colors = np.zeros(box_lines.shape, dtype=np.float32)
+ for i, box_type in enumerate(boxes_type):
+ box_colors[i, ...] = BOX_DETECTION_CONFIG[box_type].fill_color.set_brightness(BRIGHTNESS_FACTOR).rgb_norm
- ego_bbox_lines = _get_bounding_box_lines(ego_vehicle_state.bounding_box_se3)
- ego_bbox_lines[..., Point3DIndex.XYZ] = ego_bbox_lines[..., Point3DIndex.XYZ] - origin.array[StateSE3Index.XYZ]
- ego_bbox_color = np.zeros(ego_bbox_lines.shape, dtype=np.float32)
- ego_bbox_color[..., :] = EGO_VEHICLE_CONFIG.fill_color.set_brightness(BRIGHTNESS_FACTOR).rgb_norm
+ box_lines = box_lines.reshape(-1, *box_lines.shape[2:])
+ box_colors = box_colors.reshape(-1, *box_colors.shape[2:])
- lines.append(ego_bbox_lines)
- colors.append(ego_bbox_color)
- return np.concatenate(lines, axis=0), np.concatenate(colors, axis=0)
+ return box_lines, box_colors
def get_map_meshes(scene: AbstractScene):
@@ -275,25 +276,35 @@ def get_camera_if_available(scene: AbstractScene, camera_type: CameraType, itera
return camera
-def get_camera_values(scene: AbstractScene, camera: Camera, iteration: int) -> Tuple[Point3D, Quaternion]:
+def get_camera_values(
+ scene: AbstractScene, camera: Camera, iteration: int, resize_factor: Optional[float] = None
+) -> Tuple[npt.NDArray[np.float64], npt.NDArray[np.float64], npt.NDArray[np.uint8]]:
+
initial_point_3d = scene.get_ego_state_at_iteration(0).center_se3.point_3d
rear_axle = scene.get_ego_state_at_iteration(iteration).rear_axle_se3
rear_axle_array = rear_axle.array
rear_axle_array[:3] -= initial_point_3d.array
- rear_axle = StateSE3.from_array(rear_axle_array)
+ rear_axle = StateSE3.from_array(rear_axle_array, copy=False)
camera_to_ego = camera.extrinsic # 4x4 transformation from camera to ego frame
+ camera_se3 = StateSE3.from_transformation_matrix(camera_to_ego)
- ego_transform = rear_axle.transformation_matrix
-
- camera_transform = ego_transform @ camera_to_ego
+ camera_se3_array = convert_relative_to_absolute_se3_array(origin=rear_axle, se3_array=camera_se3.array)
+ abs_camera_se3 = StateSE3.from_array(camera_se3_array, copy=False)
# Camera transformation in ego frame
- camera_position = Point3D(*camera_transform[:3, 3])
- camera_rotation = Quaternion(matrix=camera_transform[:3, :3])
+ camera_position = abs_camera_se3.point_3d.array
+ camera_rotation = abs_camera_se3.quaternion.array
+
+ camera_image = camera.image
+
+ if resize_factor is not None:
+ new_width = int(camera_image.shape[1] * resize_factor)
+ new_height = int(camera_image.shape[0] * resize_factor)
+ camera_image = cv2.resize(camera_image, (new_width, new_height), interpolation=cv2.INTER_LINEAR)
- return camera_position, camera_rotation
+ return camera_position, camera_rotation, camera_image
def get_lidar_points(
diff --git a/d123/datatypes/scene/arrow/utils/conversion.py b/d123/datatypes/scene/arrow/utils/conversion.py
index 514d9f20..2b95f2fb 100644
--- a/d123/datatypes/scene/arrow/utils/conversion.py
+++ b/d123/datatypes/scene/arrow/utils/conversion.py
@@ -1,14 +1,13 @@
# TODO: rename this file and potentially move somewhere more appropriate.
-import io
import os
from pathlib import Path
from typing import Dict, List, Optional
+import cv2
import numpy as np
import numpy.typing as npt
import pyarrow as pa
-from PIL import Image
from d123.datatypes.detections.detection import (
BoxDetection,
@@ -31,7 +30,7 @@
DATASET_SENSOR_ROOT: Dict[str, Path] = {
"nuplan": Path(os.environ["NUPLAN_DATA_ROOT"]) / "nuplan-v1.1" / "sensor_blobs",
"carla": Path(os.environ["CARLA_DATA_ROOT"]) / "sensor_blobs",
- # "av2-sensor": Path(os.environ["AV2_SENSOR_DATA_ROOT"]) / "sensor",
+ "av2-sensor": Path(os.environ["AV2_SENSOR_DATA_ROOT"]) / "sensor_mini",
}
@@ -111,11 +110,11 @@ def get_camera_from_arrow_table(
sensor_root = DATASET_SENSOR_ROOT[log_metadata.dataset]
full_image_path = sensor_root / table_data
assert full_image_path.exists(), f"Camera file not found: {full_image_path}"
- img = Image.open(full_image_path)
- img.load()
- image = np.asarray(img, dtype=np.uint8)
+ image = cv2.imread(str(full_image_path), cv2.IMREAD_COLOR)
+ image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
elif isinstance(table_data, bytes):
- image = np.array(Image.open(io.BytesIO(table_data)))
+ image = cv2.imdecode(np.frombuffer(table_data, np.uint8), cv2.IMREAD_UNCHANGED)
+ image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
else:
raise NotImplementedError("Only string file paths for camera data are supported.")
diff --git a/d123/geometry/se.py b/d123/geometry/se.py
index 213dca2f..0efc8898 100644
--- a/d123/geometry/se.py
+++ b/d123/geometry/se.py
@@ -149,7 +149,7 @@ def from_transformation_matrix(cls, transformation_matrix: npt.NDArray[np.float6
assert transformation_matrix.ndim == 2
assert transformation_matrix.shape == (4, 4)
array = np.zeros(len(StateSE3Index), dtype=np.float64)
- array[StateSE3Index.XYZ] = transformation_matrix[:3, :3]
+ array[StateSE3Index.XYZ] = transformation_matrix[:3, 3]
array[StateSE3Index.QUATERNION] = Quaternion.from_rotation_matrix(transformation_matrix[:3, :3])
return StateSE3.from_array(array)
diff --git a/d123/geometry/utils/bounding_box_utils.py b/d123/geometry/utils/bounding_box_utils.py
index 4d3a4cd0..74c506e1 100644
--- a/d123/geometry/utils/bounding_box_utils.py
+++ b/d123/geometry/utils/bounding_box_utils.py
@@ -1,3 +1,5 @@
+from typing import Tuple
+
import numpy as np
import numpy.typing as npt
import shapely
@@ -136,3 +138,41 @@ def bbse3_array_to_corners_array(bbse3_array: npt.NDArray[np.float64]) -> npt.ND
)
return corners_world.squeeze(axis=0) if ndim_one else corners_world
+
+
+def corners_array_to_3d_mesh(
+ corners_array: npt.NDArray[np.float64],
+) -> Tuple[npt.NDArray[np.float64], npt.NDArray[np.int32]]:
+
+ num_boxes = corners_array.shape[0]
+ vertices = corners_array.reshape(-1, 3)
+
+ # Define the faces for a single box using Corners3DIndex
+ faces_single = np.array(
+ [
+ # Bottom face
+ [Corners3DIndex.FRONT_LEFT_BOTTOM, Corners3DIndex.FRONT_RIGHT_BOTTOM, Corners3DIndex.BACK_RIGHT_BOTTOM],
+ [Corners3DIndex.FRONT_LEFT_BOTTOM, Corners3DIndex.BACK_RIGHT_BOTTOM, Corners3DIndex.BACK_LEFT_BOTTOM],
+ # Top face
+ [Corners3DIndex.BACK_RIGHT_TOP, Corners3DIndex.FRONT_RIGHT_TOP, Corners3DIndex.FRONT_LEFT_TOP],
+ [Corners3DIndex.BACK_LEFT_TOP, Corners3DIndex.BACK_RIGHT_TOP, Corners3DIndex.FRONT_LEFT_TOP],
+ # Left face
+ [Corners3DIndex.FRONT_LEFT_BOTTOM, Corners3DIndex.BACK_LEFT_BOTTOM, Corners3DIndex.BACK_LEFT_TOP],
+ [Corners3DIndex.FRONT_LEFT_BOTTOM, Corners3DIndex.BACK_LEFT_TOP, Corners3DIndex.FRONT_LEFT_TOP],
+ # Right face
+ [Corners3DIndex.BACK_RIGHT_TOP, Corners3DIndex.BACK_RIGHT_BOTTOM, Corners3DIndex.FRONT_RIGHT_BOTTOM],
+ [Corners3DIndex.FRONT_RIGHT_TOP, Corners3DIndex.BACK_RIGHT_TOP, Corners3DIndex.FRONT_RIGHT_BOTTOM],
+ # Front face
+ [Corners3DIndex.FRONT_RIGHT_TOP, Corners3DIndex.FRONT_RIGHT_BOTTOM, Corners3DIndex.FRONT_LEFT_BOTTOM],
+ [Corners3DIndex.FRONT_LEFT_TOP, Corners3DIndex.FRONT_RIGHT_TOP, Corners3DIndex.FRONT_LEFT_BOTTOM],
+ # Back face
+ [Corners3DIndex.BACK_LEFT_TOP, Corners3DIndex.BACK_LEFT_BOTTOM, Corners3DIndex.BACK_RIGHT_BOTTOM],
+ [Corners3DIndex.BACK_RIGHT_TOP, Corners3DIndex.BACK_LEFT_TOP, Corners3DIndex.BACK_RIGHT_BOTTOM],
+ ],
+ dtype=np.int32,
+ )
+
+ # Offset the faces for each box
+ faces = np.vstack([faces_single + i * 8 for i in range(num_boxes)])
+
+ return vertices, faces
diff --git a/d123/script/config/dataset_conversion/default_dataset_conversion.yaml b/d123/script/config/dataset_conversion/default_dataset_conversion.yaml
index be97439e..0a4544da 100644
--- a/d123/script/config/dataset_conversion/default_dataset_conversion.yaml
+++ b/d123/script/config/dataset_conversion/default_dataset_conversion.yaml
@@ -15,10 +15,10 @@ defaults:
- default_dataset_paths
- _self_
- datasets:
- - nuplan_private_dataset
+ # - nuplan_private_dataset
# - carla_dataset
# - wopd_dataset
- # - av2_sensor_dataset
+ - av2_sensor_dataset
force_log_conversion: True
force_map_conversion: True
diff --git a/d123/script/config/datasets/av2_sensor_dataset.yaml b/d123/script/config/datasets/av2_sensor_dataset.yaml
index d567a175..d65947e4 100644
--- a/d123/script/config/datasets/av2_sensor_dataset.yaml
+++ b/d123/script/config/datasets/av2_sensor_dataset.yaml
@@ -3,7 +3,7 @@ av2_sensor_dataset:
_convert_: 'all'
splits: ["av2-sensor-mini_train"]
- log_path: "/mnt/elements_0/argoverse"
+ log_path: "/media/nvme1/argoverse"
data_converter_config:
_target_: d123.datasets.raw_data_converter.DataConverterConfig
@@ -13,4 +13,4 @@ av2_sensor_dataset:
force_log_conversion: ${force_log_conversion}
force_map_conversion: ${force_map_conversion}
camera_store_option: "path"
- lidar_store_option: null
+ lidar_store_option: "path"
diff --git a/d123/script/config/datasets/nuplan_private_dataset.yaml b/d123/script/config/datasets/nuplan_private_dataset.yaml
index 7062f38f..af6f16ba 100644
--- a/d123/script/config/datasets/nuplan_private_dataset.yaml
+++ b/d123/script/config/datasets/nuplan_private_dataset.yaml
@@ -12,5 +12,5 @@ nuplan_private_dataset:
output_path: ${d123_data_root}
force_log_conversion: ${force_log_conversion}
force_map_conversion: ${force_map_conversion}
- camera_store_option: "path"
+ camera_store_option: "binary"
lidar_store_option: "path"
diff --git a/notebooks/viz/viser_testing_v2_scene.ipynb b/notebooks/viz/viser_testing_v2_scene.ipynb
index f2371058..e3dc780f 100644
--- a/notebooks/viz/viser_testing_v2_scene.ipynb
+++ b/notebooks/viz/viser_testing_v2_scene.ipynb
@@ -7,11 +7,11 @@
"metadata": {},
"outputs": [],
"source": [
- "from d123.dataset.scene.scene_builder import ArrowSceneBuilder\n",
- "from d123.dataset.scene.scene_filter import SceneFilter\n",
+ "from d123.datatypes.scene.arrow.arrow_scene_builder import ArrowSceneBuilder\n",
+ "from d123.datatypes.scene.scene_filter import SceneFilter\n",
"\n",
"from d123.common.multithreading.worker_sequential import Sequential\n",
- "from d123.common.datatypes.sensor.camera import CameraType"
+ "from d123.datatypes.sensors.camera import CameraType"
]
},
{
@@ -21,16 +21,11 @@
"metadata": {},
"outputs": [],
"source": [
- "import requests\n",
- "from PIL import Image\n",
- "from io import BytesIO\n",
"\n",
- "\n",
- "\n",
- "# splits = [\"nuplan_private_test\"]\n",
+ "splits = [\"nuplan_private_test\"]\n",
"# splits = [\"carla\"]\n",
"# splits = [\"wopd_train\"]\n",
- "splits = [\"av2-sensor-mini_train\"]\n",
+ "# splits = [\"av2-sensor-mini_train\"]\n",
"log_names = None\n",
"\n",
"scene_tokens = None\n",
@@ -42,7 +37,7 @@
" duration_s=10,\n",
" history_s=0.0,\n",
" timestamp_threshold_s=10,\n",
- " shuffle=False,\n",
+ " shuffle=True,\n",
" camera_types=[CameraType.CAM_F0],\n",
")\n",
"scene_builder = ArrowSceneBuilder(\"/home/daniel/d123_workspace/data\")\n",
@@ -59,14 +54,6 @@
"id": "2",
"metadata": {},
"outputs": [],
- "source": []
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "3",
- "metadata": {},
- "outputs": [],
"source": [
"from d123.common.visualization.viser.server import ViserVisualizationServer\n",
"\n",
@@ -74,24 +61,10 @@
"visualization_server = ViserVisualizationServer(scenes, scene_index=0)"
]
},
- {
- "cell_type": "markdown",
- "id": "4",
- "metadata": {},
- "source": []
- },
{
"cell_type": "code",
"execution_count": null,
- "id": "5",
- "metadata": {},
- "outputs": [],
- "source": []
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "6",
+ "id": "3",
"metadata": {},
"outputs": [],
"source": []
@@ -99,7 +72,7 @@
],
"metadata": {
"kernelspec": {
- "display_name": "d123_dev",
+ "display_name": "d123",
"language": "python",
"name": "python3"
},
diff --git a/test_viser.py b/test_viser.py
new file mode 100644
index 00000000..072c835e
--- /dev/null
+++ b/test_viser.py
@@ -0,0 +1,34 @@
+from d123.common.multithreading.worker_sequential import Sequential
+from d123.common.visualization.viser.server import ViserVisualizationServer
+from d123.datatypes.scene.arrow.arrow_scene_builder import ArrowSceneBuilder
+from d123.datatypes.scene.scene_filter import SceneFilter
+from d123.datatypes.sensors.camera import CameraType
+
+if __name__ == "__main__":
+
+ splits = ["nuplan_private_test"]
+ # splits = ["carla"]
+ # splits = ["wopd_train"]
+ # splits = ["av2-sensor-mini_train"]
+ log_names = None
+
+ scene_tokens = None
+
+ scene_filter = SceneFilter(
+ split_names=splits,
+ log_names=log_names,
+ scene_tokens=scene_tokens,
+ duration_s=10,
+ history_s=0.5,
+ timestamp_threshold_s=10,
+ shuffle=False,
+ camera_types=[CameraType.CAM_F0],
+ )
+ scene_builder = ArrowSceneBuilder("/home/daniel/d123_workspace/data")
+ worker = Sequential()
+ # worker = RayDistributed()
+ scenes = scene_builder.get_scenes(scene_filter, worker)
+
+ print(f"Found {len(scenes)} scenes")
+
+ visualization_server = ViserVisualizationServer(scenes, scene_index=0)
From fd74132a7cf794dfcec49b882931c37aaee3129b Mon Sep 17 00:00:00 2001
From: Daniel Dauner
Date: Fri, 3 Oct 2025 21:05:28 +0200
Subject: [PATCH 054/145] Refactor viser viewer, including speed improvements
(#39)
---
.../visualization/viser/elements/__init__.py | 7 +
.../viser/elements/detection_elements.py | 102 +++++
.../viser/elements/map_elements.py | 82 ++++
.../viser/elements/sensor_elements.py | 170 ++++++++
d123/common/visualization/viser/server.py | 386 ------------------
d123/common/visualization/viser/utils.py | 356 ----------------
.../visualization/viser/viser_config.py | 71 ++++
.../visualization/viser/viser_viewer.py | 245 +++++++++++
d123/datatypes/maps/gpkg/gpkg_map_objects.py | 3 +-
d123/datatypes/maps/gpkg/utils.py | 42 ++
d123/datatypes/scene/abstract_scene.py | 14 +-
d123/datatypes/scene/arrow/arrow_scene.py | 60 +--
d123/geometry/utils/bounding_box_utils.py | 40 +-
d123/script/run_viser.py | 4 +-
test_viser.py | 14 +-
15 files changed, 810 insertions(+), 786 deletions(-)
create mode 100644 d123/common/visualization/viser/elements/__init__.py
create mode 100644 d123/common/visualization/viser/elements/detection_elements.py
create mode 100644 d123/common/visualization/viser/elements/map_elements.py
create mode 100644 d123/common/visualization/viser/elements/sensor_elements.py
delete mode 100644 d123/common/visualization/viser/server.py
delete mode 100644 d123/common/visualization/viser/utils.py
create mode 100644 d123/common/visualization/viser/viser_config.py
create mode 100644 d123/common/visualization/viser/viser_viewer.py
diff --git a/d123/common/visualization/viser/elements/__init__.py b/d123/common/visualization/viser/elements/__init__.py
new file mode 100644
index 00000000..55c47327
--- /dev/null
+++ b/d123/common/visualization/viser/elements/__init__.py
@@ -0,0 +1,7 @@
+from d123.common.visualization.viser.elements.detection_elements import add_box_detections_to_viser_server
+from d123.common.visualization.viser.elements.map_elements import add_map_to_viser_server
+from d123.common.visualization.viser.elements.sensor_elements import (
+ add_camera_frustums_to_viser_server,
+ add_camera_gui_to_viser_server,
+ add_lidar_pc_to_viser_server,
+)
diff --git a/d123/common/visualization/viser/elements/detection_elements.py b/d123/common/visualization/viser/elements/detection_elements.py
new file mode 100644
index 00000000..cdb2b0c8
--- /dev/null
+++ b/d123/common/visualization/viser/elements/detection_elements.py
@@ -0,0 +1,102 @@
+import numpy as np
+import numpy.typing as npt
+import trimesh
+import viser
+
+from d123.common.visualization.color.default import BOX_DETECTION_CONFIG
+from d123.common.visualization.viser.viser_config import ViserConfig
+from d123.datatypes.detections.detection_types import DetectionType
+from d123.datatypes.scene.abstract_scene import AbstractScene
+from d123.datatypes.vehicle_state.ego_state import EgoStateSE3
+from d123.geometry.geometry_index import BoundingBoxSE3Index, Corners3DIndex, StateSE3Index
+from d123.geometry.utils.bounding_box_utils import (
+ bbse3_array_to_corners_array,
+ corners_array_to_3d_mesh,
+ corners_array_to_edge_lines,
+)
+
+
+def add_box_detections_to_viser_server(
+ scene: AbstractScene,
+ scene_interation: int,
+ initial_ego_state: EgoStateSE3,
+ viser_server: viser.ViserServer,
+ viser_config: ViserConfig,
+) -> None:
+ if viser_config.bounding_box_visible:
+ if viser_config.bounding_box_type == "mesh":
+ mesh = _get_bounding_box_meshes(scene, scene_interation, initial_ego_state)
+ viser_server.scene.add_mesh_trimesh(
+ "box_detections",
+ mesh=mesh,
+ visible=True,
+ )
+ elif viser_config.bounding_box_type == "lines":
+ lines, colors = _get_bounding_box_outlines(scene, scene_interation, initial_ego_state)
+ viser_server.scene.add_line_segments(
+ "box_detections",
+ points=lines,
+ colors=colors,
+ line_width=viser_config.bounding_box_line_width,
+ )
+ else:
+ raise ValueError(f"Unknown bounding box type: {viser_config.bounding_box_type}")
+
+
+def _get_bounding_box_meshes(scene: AbstractScene, iteration: int, initial_ego_state: EgoStateSE3) -> trimesh.Trimesh:
+
+ ego_vehicle_state = scene.get_ego_state_at_iteration(iteration)
+ box_detections = scene.get_box_detections_at_iteration(iteration)
+
+ # Load boxes to visualize, including ego vehicle at the last position
+ boxes = [bd.bounding_box_se3 for bd in box_detections.box_detections] + [ego_vehicle_state.bounding_box_se3]
+ boxes_type = [bd.metadata.detection_type for bd in box_detections.box_detections] + [DetectionType.EGO]
+
+ # create meshes for all boxes
+ box_se3_array = np.array([box.array for box in boxes])
+ box_se3_array[..., BoundingBoxSE3Index.XYZ] -= initial_ego_state.center_se3.array[StateSE3Index.XYZ]
+ box_corners_array = bbse3_array_to_corners_array(box_se3_array)
+ box_vertices, box_faces = corners_array_to_3d_mesh(box_corners_array)
+
+ # Create colors for each box based on detection type
+ box_colors = []
+ for box_type in boxes_type:
+ box_colors.append(BOX_DETECTION_CONFIG[box_type].fill_color.rgba)
+
+ # Convert to numpy array and repeat for each vertex
+ box_colors = np.array(box_colors)
+ vertex_colors = np.repeat(box_colors, len(Corners3DIndex), axis=0)
+
+ # Create trimesh object
+ mesh = trimesh.Trimesh(vertices=box_vertices, faces=box_faces)
+ mesh.visual.vertex_colors = vertex_colors
+
+ return mesh
+
+
+def _get_bounding_box_outlines(
+ scene: AbstractScene, iteration: int, initial_ego_state: EgoStateSE3
+) -> npt.NDArray[np.float64]:
+
+ ego_vehicle_state = scene.get_ego_state_at_iteration(iteration)
+ box_detections = scene.get_box_detections_at_iteration(iteration)
+
+ # Load boxes to visualize, including ego vehicle at the last position
+ boxes = [bd.bounding_box_se3 for bd in box_detections.box_detections] + [ego_vehicle_state.bounding_box_se3]
+ boxes_type = [bd.metadata.detection_type for bd in box_detections.box_detections] + [DetectionType.EGO]
+
+ # Create lines for all boxes
+ box_se3_array = np.array([box.array for box in boxes])
+ box_se3_array[..., BoundingBoxSE3Index.XYZ] -= initial_ego_state.center_se3.array[StateSE3Index.XYZ]
+ box_corners_array = bbse3_array_to_corners_array(box_se3_array)
+ box_outlines = corners_array_to_edge_lines(box_corners_array)
+
+ # Create colors for all boxes
+ box_colors = np.zeros(box_outlines.shape, dtype=np.float32)
+ for i, box_type in enumerate(boxes_type):
+ box_colors[i, ...] = BOX_DETECTION_CONFIG[box_type].fill_color.rgb_norm
+
+ box_outlines = box_outlines.reshape(-1, *box_outlines.shape[2:])
+ box_colors = box_colors.reshape(-1, *box_colors.shape[2:])
+
+ return box_outlines, box_colors
diff --git a/d123/common/visualization/viser/elements/map_elements.py b/d123/common/visualization/viser/elements/map_elements.py
new file mode 100644
index 00000000..90c247cb
--- /dev/null
+++ b/d123/common/visualization/viser/elements/map_elements.py
@@ -0,0 +1,82 @@
+from typing import Dict
+
+import trimesh
+import viser
+
+from d123.common.visualization.color.default import MAP_SURFACE_CONFIG
+from d123.common.visualization.viser.viser_config import ViserConfig
+from d123.datatypes.maps.abstract_map import MapLayer
+from d123.datatypes.maps.abstract_map_objects import AbstractSurfaceMapObject
+from d123.datatypes.scene.abstract_scene import AbstractScene
+from d123.datatypes.vehicle_state.ego_state import EgoStateSE3
+from d123.geometry import Point3D, Point3DIndex
+
+
+def add_map_to_viser_server(
+ scene: AbstractScene,
+ initial_ego_state: EgoStateSE3,
+ viser_server: viser.ViserServer,
+ viser_config: ViserConfig,
+) -> None:
+
+ if viser_config.map_visible:
+ for name, mesh in _get_map_trimesh_dict(scene, initial_ego_state, viser_config).items():
+ viser_server.scene.add_mesh_trimesh(f"/map/{name}", mesh, visible=True)
+
+
+def _get_map_trimesh_dict(
+ scene: AbstractScene,
+ initial_ego_state: EgoStateSE3,
+ viser_config: ViserConfig,
+) -> Dict[str, trimesh.Trimesh]:
+
+ # Unpack scene center for translation of map objects.
+ scene_center: Point3D = initial_ego_state.center.point_3d
+ scene_center_array = scene_center.array
+
+ # Load map objects within a certain radius around the scene center.
+ map_layers = [
+ MapLayer.LANE_GROUP,
+ MapLayer.INTERSECTION,
+ MapLayer.WALKWAY,
+ MapLayer.CROSSWALK,
+ MapLayer.CARPARK,
+ MapLayer.GENERIC_DRIVABLE,
+ ]
+ map_objects_dict = scene.map_api.get_proximal_map_objects(
+ scene_center.point_2d,
+ radius=viser_config.map_radius,
+ layers=map_layers,
+ )
+
+ # Create trimesh meshes for each map layer.
+ trimesh_dict = {}
+ for map_layer in map_objects_dict.keys():
+ surface_meshes = []
+ for map_surface in map_objects_dict[map_layer]:
+ map_surface: AbstractSurfaceMapObject
+
+ trimesh_mesh = map_surface.trimesh_mesh
+ trimesh_mesh.vertices -= scene_center_array
+
+ # Adjust height of non-road surfaces to avoid z-fighting in the visualization.
+ if map_layer in [
+ MapLayer.WALKWAY,
+ MapLayer.CROSSWALK,
+ MapLayer.CARPARK,
+ ]:
+ trimesh_mesh.vertices[..., Point3DIndex.Z] += viser_config.map_non_road_z_offset
+
+ # If the map does not have z-values, we place the surfaces on the ground level of the ego vehicle.
+ if not scene.log_metadata.map_has_z:
+ trimesh_mesh.vertices[..., Point3DIndex.Z] += (
+ scene_center.z - initial_ego_state.vehicle_parameters.height / 2
+ )
+
+ # Color the mesh based on the map layer type.
+ trimesh_mesh.visual.face_colors = MAP_SURFACE_CONFIG[map_layer].fill_color.rgba
+ surface_meshes.append(trimesh_mesh)
+
+ trimesh_dict[f"{map_layer.serialize()}"] = trimesh.util.concatenate(surface_meshes)
+
+ return trimesh_dict
diff --git a/d123/common/visualization/viser/elements/sensor_elements.py b/d123/common/visualization/viser/elements/sensor_elements.py
new file mode 100644
index 00000000..ee9b57bf
--- /dev/null
+++ b/d123/common/visualization/viser/elements/sensor_elements.py
@@ -0,0 +1,170 @@
+import concurrent.futures
+from typing import Dict, List, Optional, Tuple
+
+import cv2
+import numpy as np
+import numpy.typing as npt
+import viser
+
+from d123.common.visualization.viser.viser_config import ViserConfig
+from d123.datatypes.scene.abstract_scene import AbstractScene
+from d123.datatypes.sensors.camera import Camera, CameraType
+from d123.datatypes.sensors.lidar import LiDARType
+from d123.datatypes.vehicle_state.ego_state import EgoStateSE3
+from d123.geometry import StateSE3, StateSE3Index
+from d123.geometry.transform.transform_se3 import (
+ convert_relative_to_absolute_points_3d_array,
+ convert_relative_to_absolute_se3_array,
+)
+
+
+def add_camera_frustums_to_viser_server(
+ scene: AbstractScene,
+ scene_interation: int,
+ initial_ego_state: EgoStateSE3,
+ viser_server: viser.ViserServer,
+ viser_config: ViserConfig,
+ camera_frustum_handles: Dict[CameraType, viser.CameraFrustumHandle],
+) -> None:
+
+ if viser_config.camera_frustum_visible:
+ scene_center_array = initial_ego_state.center.point_3d.array
+ ego_pose = scene.get_ego_state_at_iteration(scene_interation).rear_axle_se3.array
+ ego_pose[StateSE3Index.XYZ] -= scene_center_array
+
+ def _add_camera_frustums_to_viser_server(camera_type: CameraType) -> None:
+ camera = scene.get_camera_at_iteration(scene_interation, camera_type)
+ if camera is not None:
+ camera_position, camera_quaternion, camera_image = _get_camera_values(
+ camera,
+ ego_pose.copy(),
+ viser_config.camera_frustum_image_scale,
+ )
+ if camera_type in camera_frustum_handles:
+ camera_frustum_handles[camera_type].position = camera_position
+ camera_frustum_handles[camera_type].wxyz = camera_quaternion
+ camera_frustum_handles[camera_type].image = camera_image
+ else:
+ camera_frustum_handles[camera_type] = viser_server.scene.add_camera_frustum(
+ f"camera_frustums/{camera_type.serialize()}",
+ fov=camera.metadata.fov_y,
+ aspect=camera.metadata.aspect_ratio,
+ scale=viser_config.camera_frustum_frustum_scale,
+ image=camera_image,
+ position=camera_position,
+ wxyz=camera_quaternion,
+ )
+
+ return None
+
+ # NOTE; In order to speed up adding camera frustums, we use multithreading and resize the images.
+ with concurrent.futures.ThreadPoolExecutor(max_workers=len(viser_config.camera_frustum_types)) as executor:
+ future_to_camera = {
+ executor.submit(_add_camera_frustums_to_viser_server, camera_type): camera_type
+ for camera_type in viser_config.camera_frustum_types
+ }
+ for future in concurrent.futures.as_completed(future_to_camera):
+ _ = future.result()
+
+ # TODO: Remove serial implementation, if not needed anymore.
+ # for camera_type in viser_config.camera_frustum_types:
+ # _add_camera_frustums_to_viser_server(camera_type)
+
+ return None
+
+
+def add_camera_gui_to_viser_server(
+ scene: AbstractScene,
+ scene_interation: int,
+ viser_server: viser.ViserServer,
+ viser_config: ViserConfig,
+ camera_gui_handles: Dict[CameraType, viser.GuiImageHandle],
+) -> None:
+ if viser_config.camera_gui_visible:
+ for camera_type in viser_config.camera_gui_types:
+ camera = scene.get_camera_at_iteration(scene_interation, camera_type)
+ if camera is not None:
+ if camera_type in camera_gui_handles:
+ camera_gui_handles[camera_type].image = _rescale_image(
+ camera.image, viser_config.camera_gui_image_scale
+ )
+ else:
+ with viser_server.gui.add_folder(f"Camera {camera_type.serialize()}"):
+ camera_gui_handles[camera_type] = viser_server.gui.add_image(
+ image=_rescale_image(camera.image, viser_config.camera_gui_image_scale),
+ label=camera_type.serialize(),
+ )
+
+
+def add_lidar_pc_to_viser_server(
+ scene: AbstractScene,
+ scene_interation: int,
+ initial_ego_state: EgoStateSE3,
+ viser_server: viser.ViserServer,
+ viser_config: ViserConfig,
+ lidar_pc_handle: Optional[viser.PointCloudHandle],
+) -> None:
+ if viser_config.lidar_visible:
+
+ scene_center_array = initial_ego_state.center.point_3d.array
+ ego_pose = scene.get_ego_state_at_iteration(scene_interation).rear_axle_se3.array
+ ego_pose[StateSE3Index.XYZ] -= scene_center_array
+
+ def _load_lidar_points(lidar_type: LiDARType) -> npt.NDArray[np.float32]:
+ lidar = scene.get_lidar_at_iteration(scene_interation, lidar_type)
+ if lidar is not None:
+ return lidar.xyz
+ else:
+ return np.zeros((0, 3), dtype=np.float32)
+
+ with concurrent.futures.ThreadPoolExecutor(max_workers=len(viser_config.lidar_types)) as executor:
+ future_to_lidar = {
+ executor.submit(_load_lidar_points, lidar_type): lidar_type for lidar_type in viser_config.lidar_types
+ }
+ lidar_points_3d_list: List[npt.NDArray[np.float32]] = []
+ for future in concurrent.futures.as_completed(future_to_lidar):
+ lidar_points_3d_list.append(future.result())
+
+ points_3d_local = (
+ np.concatenate(lidar_points_3d_list, axis=0) if lidar_points_3d_list else np.zeros((0, 3), dtype=np.float32)
+ )
+ points = convert_relative_to_absolute_points_3d_array(ego_pose, points_3d_local)
+ colors = np.zeros_like(points)
+
+ if lidar_pc_handle is not None:
+ lidar_pc_handle.points = points
+ lidar_pc_handle.colors = colors
+ else:
+ lidar_pc_handle = viser_server.scene.add_point_cloud(
+ "lidar_points",
+ points=points,
+ colors=colors,
+ point_size=viser_config.lidar_point_size,
+ point_shape=viser_config.lidar_point_shape,
+ )
+
+
+def _get_camera_values(
+ camera: Camera,
+ ego_pose: npt.NDArray[np.float64],
+ resize_factor: Optional[float] = None,
+) -> Tuple[npt.NDArray[np.float64], npt.NDArray[np.float64], npt.NDArray[np.uint8]]:
+ assert ego_pose.ndim == 1 and len(ego_pose) == len(StateSE3Index)
+
+ rel_camera_pose = StateSE3.from_transformation_matrix(camera.extrinsic).array
+ abs_camera_pose = convert_relative_to_absolute_se3_array(origin=ego_pose, se3_array=rel_camera_pose)
+
+ camera_position = abs_camera_pose[StateSE3Index.XYZ]
+ camera_rotation = abs_camera_pose[StateSE3Index.QUATERNION]
+
+ camera_image = _rescale_image(camera.image, resize_factor)
+ return camera_position, camera_rotation, camera_image
+
+
+def _rescale_image(image: npt.NDArray[np.uint8], scale: float) -> npt.NDArray[np.uint8]:
+ if scale == 1.0:
+ return image
+ new_width = int(image.shape[1] * scale)
+ new_height = int(image.shape[0] * scale)
+ downscaled_image = cv2.resize(image, (new_width, new_height), interpolation=cv2.INTER_LINEAR)
+ return downscaled_image
diff --git a/d123/common/visualization/viser/server.py b/d123/common/visualization/viser/server.py
deleted file mode 100644
index 29e97418..00000000
--- a/d123/common/visualization/viser/server.py
+++ /dev/null
@@ -1,386 +0,0 @@
-import time
-from concurrent.futures import ThreadPoolExecutor
-from typing import Dict, List, Literal
-
-import numpy as np
-import viser
-
-from d123.common.utils.timer import Timer
-from d123.common.visualization.viser.utils import (
- get_bounding_box_meshes,
- get_bounding_box_outlines,
- get_camera_if_available,
- get_camera_values,
- get_lidar_points,
- get_map_meshes,
-)
-from d123.datatypes.scene.abstract_scene import AbstractScene
-from d123.datatypes.sensors.camera import CameraType
-from d123.datatypes.sensors.lidar import LiDARType
-
-# TODO: Try to fix performance issues.
-# TODO: Refactor this file.
-
-all_camera_types: List[CameraType] = [
- CameraType.CAM_F0,
- CameraType.CAM_B0,
- CameraType.CAM_L0,
- CameraType.CAM_L1,
- CameraType.CAM_L2,
- CameraType.CAM_R0,
- CameraType.CAM_R1,
- CameraType.CAM_R2,
-]
-
-# MISC config:
-LINE_WIDTH: float = 4.0
-
-# Bounding box config:
-BOUNDING_BOX_TYPE: Literal["mesh", "lines"] = "mesh"
-
-# Map config:
-MAP_AVAILABLE: bool = True
-
-
-# Cameras config:
-
-# VISUALIZE_CAMERA_FRUSTUM: List[CameraType] = [CameraType.CAM_F0, CameraType.CAM_L0, CameraType.CAM_R0]
-VISUALIZE_CAMERA_FRUSTUM: List[CameraType] = all_camera_types
-# VISUALIZE_CAMERA_FRUSTUM: List[CameraType] = [CameraType.CAM_STEREO_L, CameraType.CAM_STEREO_R]
-# VISUALIZE_CAMERA_FRUSTUM: List[CameraType] = []
-# VISUALIZE_CAMERA_GUI: List[CameraType] = [CameraType.CAM_F0]
-
-VISUALIZE_CAMERA_GUI: List[CameraType] = []
-
-CAMERA_SCALE: float = 1.0
-RESIZE_FACTOR = 0.25
-
-# Lidar config:
-LIDAR_AVAILABLE: bool = False
-
-LIDAR_TYPES: List[LiDARType] = [
- LiDARType.LIDAR_MERGED,
- LiDARType.LIDAR_TOP,
- LiDARType.LIDAR_FRONT,
- LiDARType.LIDAR_SIDE_LEFT,
- LiDARType.LIDAR_SIDE_RIGHT,
- LiDARType.LIDAR_BACK,
-]
-# LIDAR_TYPES: List[LiDARType] = [
-# LiDARType.LIDAR_TOP,
-# ]
-LIDAR_POINT_SIZE: float = 0.05
-
-
-class ViserVisualizationServer:
- def __init__(
- self,
- scenes: List[AbstractScene],
- scene_index: int = 0,
- host: str = "localhost",
- port: int = 8080,
- label: str = "D123 Viser Server",
- ):
- assert len(scenes) > 0, "At least one scene must be provided."
- self.scenes = scenes
- self.scene_index = scene_index
-
- self.host = host
- self.port = port
- self.label = label
-
- self.server = viser.ViserServer(host=self.host, port=self.port, label=self.label)
- self.set_scene(self.scenes[self.scene_index % len(self.scenes)])
-
- def next(self) -> None:
- self.server.flush()
- self.server.gui.reset()
- self.server.scene.reset()
- self.scene_index = (self.scene_index + 1) % len(self.scenes)
- print(f"Viser server started at {self.host}:{self.port}")
- self.set_scene(self.scenes[self.scene_index])
-
- def set_scene(self, scene: AbstractScene) -> None:
- num_frames = scene.get_number_of_iterations()
- # print(scene.available_camera_types)
-
- self.server.gui.configure_theme(dark_mode=False, control_width="large")
-
- # TODO: Fix lighting. Environment map can help, but cannot be freely configured.
- # self.server.scene.configure_environment_map(
- # hdri="warehouse",
- # background=False,
- # background_intensity=0.25,
- # environment_intensity=0.5,
- # )
-
- with self.server.gui.add_folder("Playback"):
- server_playing = True
-
- gui_timestep = self.server.gui.add_slider(
- "Timestep",
- min=0,
- max=num_frames - 1,
- step=1,
- initial_value=0,
- disabled=True,
- )
- gui_next_frame = self.server.gui.add_button("Next Frame", disabled=True)
- gui_prev_frame = self.server.gui.add_button("Prev Frame", disabled=True)
- gui_next_scene = self.server.gui.add_button("Next Scene", disabled=False)
- gui_playing = self.server.gui.add_checkbox("Playing", True)
- gui_framerate = self.server.gui.add_slider("FPS", min=1, max=90, step=0.1, initial_value=10)
- gui_framerate_options = self.server.gui.add_button_group("FPS options", ("10", "20", "30", "60", "90"))
-
- # Frame step buttons.
- @gui_next_frame.on_click
- def _(_) -> None:
- gui_timestep.value = (gui_timestep.value + 1) % num_frames
-
- @gui_prev_frame.on_click
- def _(_) -> None:
- gui_timestep.value = (gui_timestep.value - 1) % num_frames
-
- @gui_next_scene.on_click
- def _(_) -> None:
- nonlocal server_playing
- server_playing = False
-
- # Disable frame controls when we're playing.
- @gui_playing.on_update
- def _(_) -> None:
- gui_timestep.disabled = gui_playing.value
- gui_next_frame.disabled = gui_playing.value
- gui_prev_frame.disabled = gui_playing.value
-
- # Set the framerate when we click one of the options.
- @gui_framerate_options.on_click
- def _(_) -> None:
- gui_framerate.value = int(gui_framerate_options.value)
-
- prev_timestep = gui_timestep.value
-
- # Toggle frame visibility when the timestep slider changes.
- @gui_timestep.on_update
- def _(_) -> None:
- nonlocal prev_timestep, bounding_box_handle
- current_timestep = gui_timestep.value
-
- timer = Timer()
- timer.start()
-
- start = time.perf_counter()
- # with self.server.atomic():
-
- if BOUNDING_BOX_TYPE == "mesh":
- mesh = get_bounding_box_meshes(scene, gui_timestep.value)
- new_bounding_box_handle = self.server.scene.add_mesh_trimesh(
- "box_detections",
- mesh=mesh,
- visible=True,
- )
- elif BOUNDING_BOX_TYPE == "lines":
- lines, colors = get_bounding_box_outlines(scene, gui_timestep.value)
- new_bounding_box_handle = self.server.scene.add_line_segments(
- "box_detections",
- points=lines,
- colors=colors,
- line_width=LINE_WIDTH,
- )
- else:
- raise ValueError(f"Unknown bounding box type: {BOUNDING_BOX_TYPE}")
-
- # bounding_box_handle.visible = False
- # time.sleep(0.005)
- # bounding_box_handle.remove()
- bounding_box_handle = new_bounding_box_handle
- new_bounding_box_handle.visible = True
-
- timer.log("Update bounding boxes")
-
- for camera_type in VISUALIZE_CAMERA_GUI:
- camera = get_camera_if_available(scene, camera_type, gui_timestep.value)
- if camera is not None:
- camera_gui_handles[camera_type].image = camera.image
-
- camera_timer = Timer()
- camera_timer.start()
- import concurrent.futures
-
- def load_camera_data(camera_type):
- camera = get_camera_if_available(scene, camera_type, gui_timestep.value)
- if camera is not None:
- camera_position, camera_rotation, camera_image = get_camera_values(
- scene, camera, gui_timestep.value, resize_factor=RESIZE_FACTOR
- )
- camera_frustum_handles[camera_type].position = camera_position
- camera_frustum_handles[camera_type].wxyz = camera_rotation
- camera_frustum_handles[camera_type].image = camera_image
-
- return camera_type, None
- return camera_type, None
-
- with ThreadPoolExecutor(max_workers=len(VISUALIZE_CAMERA_FRUSTUM)) as executor:
- future_to_camera = {
- executor.submit(load_camera_data, camera_type): camera_type
- for camera_type in VISUALIZE_CAMERA_FRUSTUM
- }
-
- for future in concurrent.futures.as_completed(future_to_camera):
- camera_type, camera_data = future.result()
-
- camera_timer.log("Load camera data")
-
- # for camera_type in VISUALIZE_CAMERA_FRUSTUM:
-
- # # camera = get_camera_if_available(scene, camera_type, gui_timestep.value)
- # # camera_timer.log("Get camera")
-
- # if camera_type in camera_cache.keys():
- # camera_position, camera_rotation, camera_image = camera_cache[camera_type]
- # # camera_position, camera_quaternion, camera_image = get_camera_values(
- # # scene, camera, gui_timestep.value, resize_factor=RESIZE_FACTOR
- # # )
-
- # # camera_timer.log("Get camera values")
-
- # camera_frustum_handles[camera_type].position = camera_position
- # camera_frustum_handles[camera_type].wxyz = camera_rotation
- # camera_frustum_handles[camera_type].image = camera_image
-
- camera_timer.log("Update camera frustum")
- camera_timer.end()
- # print(camera_timer) # 0.0082
-
- timer.log("Update cameras")
-
- if LIDAR_AVAILABLE:
- try:
- points, colors = get_lidar_points(scene, gui_timestep.value, LIDAR_TYPES)
- except Exception as e:
- # print(f"Error getting lidar points: {e}")
- points = np.zeros((0, 3))
- colors = np.zeros((0, 3))
-
- gui_lidar.points = points
- gui_lidar.colors = colors
-
- # timer.log("Update lidar")
- timer.end()
- # print(timer)
-
- prev_timestep = current_timestep
-
- rendering_time = time.perf_counter() - start
- sleep_time = 1.0 / gui_framerate.value - rendering_time
- if sleep_time > 0:
- time.sleep(max(sleep_time, 0.0))
- # self.server.flush() # Optional!
- # print(f"Render time: {rendering_time:.3f}s, sleep time: {sleep_time:.3f}s")
-
- # Load in frames.
- self.server.scene.add_frame("/map", show_axes=False)
-
- if BOUNDING_BOX_TYPE == "mesh":
- mesh = get_bounding_box_meshes(scene, gui_timestep.value)
- bounding_box_handle = self.server.scene.add_mesh_trimesh(
- "box_detections",
- mesh=mesh,
- visible=True,
- )
- elif BOUNDING_BOX_TYPE == "lines":
- lines, colors = get_bounding_box_outlines(scene, gui_timestep.value)
- bounding_box_handle = self.server.scene.add_line_segments(
- "box_detections",
- points=lines,
- colors=colors,
- line_width=LINE_WIDTH,
- )
- else:
- raise ValueError(f"Unknown bounding box type: {BOUNDING_BOX_TYPE}")
-
- camera_gui_handles: Dict[CameraType, viser.GuiImageHandle] = {}
- camera_frustum_handles: Dict[CameraType, viser.CameraFrustumHandle] = {}
-
- for camera_type in VISUALIZE_CAMERA_GUI:
- camera = get_camera_if_available(scene, camera_type, gui_timestep.value)
- if camera is not None:
- with self.server.gui.add_folder(f"Camera {camera_type.serialize()}"):
- camera_gui_handles[camera_type] = self.server.gui.add_image(
- image=camera.image,
- label=camera_type.serialize(),
- )
-
- for camera_type in VISUALIZE_CAMERA_FRUSTUM:
- camera = get_camera_if_available(scene, camera_type, gui_timestep.value)
- if camera is not None:
- camera_position, camera_quaternion, camera_image = get_camera_values(
- scene, camera, gui_timestep.value, resize_factor=RESIZE_FACTOR
- )
- camera_frustum_handles[camera_type] = self.server.scene.add_camera_frustum(
- f"camera_frustum_{camera_type.serialize()}",
- fov=camera.metadata.fov_y,
- aspect=camera.metadata.aspect_ratio,
- scale=CAMERA_SCALE,
- image=camera_image,
- position=camera_position,
- wxyz=camera_quaternion,
- )
-
- if LIDAR_AVAILABLE:
- try:
- points, colors = get_lidar_points(scene, gui_timestep.value, LIDAR_TYPES)
- except Exception as e:
- # print(f"Error getting lidar points: {e}")
- points = np.zeros((0, 3))
- colors = np.zeros((0, 3))
-
- gui_lidar = self.server.scene.add_point_cloud(
- name="LiDAR",
- points=points,
- colors=colors,
- point_size=LIDAR_POINT_SIZE,
- point_shape="circle",
- )
-
- if MAP_AVAILABLE:
- for name, mesh in get_map_meshes(scene).items():
- self.server.scene.add_mesh_trimesh(f"/map/{name}", mesh, visible=True)
-
- # centerlines, __, __, road_edges = get_map_lines(scene)
- # for i, centerline in enumerate(centerlines):
- # self.server.scene.add_line_segments(
- # "/map/centerlines",
- # centerlines,
- # colors=[[BLACK.rgb]],
- # line_width=LINE_WIDTH,
- # )
- # self.server.scene.add_line_segments(
- # "/map/left_boundary",
- # left_boundaries,
- # colors=[[TAB_10[2].rgb]],
- # line_width=LINE_WIDTH,
- # )
- # self.server.scene.add_line_segments(
- # "/map/right_boundary",clear
- # right_boundaries,
- # colors=[[TAB_10[3].rgb]],
- # line_width=LINE_WIDTH,
- # )
- # print(centerlines.shape, road_edges.shape)
- # self.server.scene.add_line_segments(
- # "/map/road_edges",
- # road_edges,
- # colors=[[BLACK.rgb]],
- # line_width=LINE_WIDTH,
- # )
-
- # Playback update loop.
- prev_timestep = gui_timestep.value
- while server_playing:
- # Update the timestep if we're playing.
- if gui_playing.value:
- gui_timestep.value = (gui_timestep.value + 1) % num_frames
-
- self.server.flush()
- self.next()
diff --git a/d123/common/visualization/viser/utils.py b/d123/common/visualization/viser/utils.py
deleted file mode 100644
index 74615d46..00000000
--- a/d123/common/visualization/viser/utils.py
+++ /dev/null
@@ -1,356 +0,0 @@
-from typing import Final, List, Optional, Tuple
-
-import cv2
-import numpy as np
-import numpy.typing as npt
-import trimesh
-
-from d123.common.visualization.color.color import TAB_10, Color
-from d123.common.visualization.color.default import BOX_DETECTION_CONFIG, MAP_SURFACE_CONFIG
-from d123.datatypes.detections.detection_types import DetectionType
-from d123.datatypes.maps.abstract_map import MapLayer
-from d123.datatypes.maps.abstract_map_objects import AbstractLane, AbstractSurfaceMapObject
-from d123.datatypes.scene.abstract_scene import AbstractScene
-from d123.datatypes.sensors.camera import Camera, CameraType
-from d123.datatypes.sensors.lidar import LiDARType
-from d123.geometry import Corners3DIndex, Point3D, Point3DIndex, Polyline3D, StateSE3, StateSE3Index
-from d123.geometry.geometry_index import BoundingBoxSE3Index
-from d123.geometry.transform.transform_euler_se3 import convert_relative_to_absolute_points_3d_array
-from d123.geometry.transform.transform_se3 import convert_relative_to_absolute_se3_array
-from d123.geometry.utils.bounding_box_utils import (
- bbse3_array_to_corners_array,
- corners_array_to_3d_mesh,
-)
-
-# TODO: Refactor this file.
-# TODO: Add general utilities for 3D primitives and mesh support.
-
-MAP_RADIUS: Final[float] = 200
-BRIGHTNESS_FACTOR: Final[float] = 1.0
-
-
-def configure_trimesh(mesh: trimesh.Trimesh, color: Color):
- # base_color = [r / 255.0 for r in color.rgba]
- mesh.visual.face_colors = color.rgba
-
- # pbr_material = trimesh.visual.material.PBRMaterial(
- # baseColorFactor=base_color, # Your desired color (RGBA, 0-1 range)
- # metallicFactor=0.0, # 0.0 = non-metallic (more matte)
- # roughnessFactor=1.0, # 0.8 = quite rough (less shiny, 0=mirror, 1=completely rough)
- # emissiveFactor=[0.0, 0.0, 0.0], # No emission
- # alphaCutoff=0.9, # Alpha threshold for transparency
- # doubleSided=True, # Single-sided material
- # )
- # mesh.visual.material = pbr_material
- # mesh.visual = mesh.visual.to_texture()
-
- return mesh
-
-
-def get_bounding_box_meshes(scene: AbstractScene, iteration: int):
-
- initial_ego_vehicle_state = scene.get_ego_state_at_iteration(0)
- ego_vehicle_state = scene.get_ego_state_at_iteration(iteration)
- box_detections = scene.get_box_detections_at_iteration(iteration)
-
- # Load boxes to visualize, including ego vehicle at the last position
- boxes = [bd.bounding_box_se3 for bd in box_detections.box_detections] + [ego_vehicle_state.bounding_box_se3]
- boxes_type = [bd.metadata.detection_type for bd in box_detections.box_detections] + [DetectionType.EGO]
-
- # create meshes for all boxes
- box_se3_array = np.array([box.array for box in boxes])
- box_se3_array[..., BoundingBoxSE3Index.XYZ] -= initial_ego_vehicle_state.center_se3.array[StateSE3Index.XYZ]
- box_corners_array = bbse3_array_to_corners_array(box_se3_array)
- box_vertices, box_faces = corners_array_to_3d_mesh(box_corners_array)
-
- # Create colors for each box based on detection type
- box_colors = []
- for box_type in boxes_type:
- box_colors.append(BOX_DETECTION_CONFIG[box_type].fill_color.rgba)
-
- # Convert to numpy array and repeat for each vertex
- box_colors = np.array(box_colors)
- vertex_colors = np.repeat(box_colors, 8, axis=0) # 8 vertices per box
-
- # Create trimesh object
- mesh = trimesh.Trimesh(vertices=box_vertices, faces=box_faces)
- mesh.visual.vertex_colors = vertex_colors
-
- return mesh
-
-
-def _get_bounding_box_lines_from_array(corners_array: npt.NDArray[np.float64]) -> npt.NDArray[np.float64]:
- assert corners_array.shape[-1] == len(Point3DIndex)
- assert corners_array.shape[-2] == len(Corners3DIndex)
- assert corners_array.ndim >= 2
-
- index_pairs = [
- (Corners3DIndex.FRONT_LEFT_BOTTOM, Corners3DIndex.FRONT_RIGHT_BOTTOM),
- (Corners3DIndex.FRONT_RIGHT_BOTTOM, Corners3DIndex.BACK_RIGHT_BOTTOM),
- (Corners3DIndex.BACK_RIGHT_BOTTOM, Corners3DIndex.BACK_LEFT_BOTTOM),
- (Corners3DIndex.BACK_LEFT_BOTTOM, Corners3DIndex.FRONT_LEFT_BOTTOM),
- (Corners3DIndex.FRONT_LEFT_TOP, Corners3DIndex.FRONT_RIGHT_TOP),
- (Corners3DIndex.FRONT_RIGHT_TOP, Corners3DIndex.BACK_RIGHT_TOP),
- (Corners3DIndex.BACK_RIGHT_TOP, Corners3DIndex.BACK_LEFT_TOP),
- (Corners3DIndex.BACK_LEFT_TOP, Corners3DIndex.FRONT_LEFT_TOP),
- (Corners3DIndex.FRONT_LEFT_BOTTOM, Corners3DIndex.FRONT_LEFT_TOP),
- (Corners3DIndex.FRONT_RIGHT_BOTTOM, Corners3DIndex.FRONT_RIGHT_TOP),
- (Corners3DIndex.BACK_RIGHT_BOTTOM, Corners3DIndex.BACK_RIGHT_TOP),
- (Corners3DIndex.BACK_LEFT_BOTTOM, Corners3DIndex.BACK_LEFT_TOP),
- ]
-
- # Handle both single box and batched cases
- if corners_array.ndim == 2:
- # Single box case: (8, 3)
- lines = np.zeros((len(index_pairs), 2, len(Point3DIndex)), dtype=np.float64)
- for i, (start_idx, end_idx) in enumerate(index_pairs):
- lines[i, 0] = corners_array[start_idx]
- lines[i, 1] = corners_array[end_idx]
- else:
- # Batched case: (..., 8, 3)
- batch_shape = corners_array.shape[:-2]
- lines = np.zeros(batch_shape + (len(index_pairs), 2, len(Point3DIndex)), dtype=np.float64)
- for i, (start_idx, end_idx) in enumerate(index_pairs):
- lines[..., i, 0, :] = corners_array[..., start_idx, :]
- lines[..., i, 1, :] = corners_array[..., end_idx, :]
-
- return lines
-
-
-def get_bounding_box_outlines(scene: AbstractScene, iteration: int):
-
- initial_ego_vehicle_state = scene.get_ego_state_at_iteration(0)
- ego_vehicle_state = scene.get_ego_state_at_iteration(iteration)
- box_detections = scene.get_box_detections_at_iteration(iteration)
-
- # Load boxes to visualize, including ego vehicle at the last position
- boxes = [bd.bounding_box_se3 for bd in box_detections.box_detections] + [ego_vehicle_state.bounding_box_se3]
- boxes_type = [bd.metadata.detection_type for bd in box_detections.box_detections] + [DetectionType.EGO]
-
- # Create lines for all boxes
- box_se3_array = np.array([box.array for box in boxes])
- box_se3_array[..., BoundingBoxSE3Index.XYZ] -= initial_ego_vehicle_state.center_se3.array[StateSE3Index.XYZ]
- box_corners_array = bbse3_array_to_corners_array(box_se3_array)
- box_lines = _get_bounding_box_lines_from_array(box_corners_array)
-
- # Create colors for all boxes
- box_colors = np.zeros(box_lines.shape, dtype=np.float32)
- for i, box_type in enumerate(boxes_type):
- box_colors[i, ...] = BOX_DETECTION_CONFIG[box_type].fill_color.set_brightness(BRIGHTNESS_FACTOR).rgb_norm
-
- box_lines = box_lines.reshape(-1, *box_lines.shape[2:])
- box_colors = box_colors.reshape(-1, *box_colors.shape[2:])
-
- return box_lines, box_colors
-
-
-def get_map_meshes(scene: AbstractScene):
- initial_ego_vehicle_state = scene.get_ego_state_at_iteration(0)
- center = initial_ego_vehicle_state.center_se3
- map_layers = [
- MapLayer.LANE_GROUP,
- MapLayer.LANE,
- MapLayer.WALKWAY,
- MapLayer.CROSSWALK,
- MapLayer.CARPARK,
- MapLayer.GENERIC_DRIVABLE,
- ]
-
- map_objects_dict = scene.map_api.get_proximal_map_objects(center.point_2d, radius=MAP_RADIUS, layers=map_layers)
- output = {}
-
- for map_layer in map_objects_dict.keys():
- surface_meshes = []
- for map_surface in map_objects_dict[map_layer]:
- map_surface: AbstractSurfaceMapObject
- trimesh_mesh = map_surface.trimesh_mesh
- if map_layer in [
- MapLayer.WALKWAY,
- MapLayer.CROSSWALK,
- MapLayer.GENERIC_DRIVABLE,
- MapLayer.CARPARK,
- ]:
- # Push meshes up by a few centimeters to avoid overlap with the ground in the visualization.
- trimesh_mesh.vertices -= Point3D(x=center.x, y=center.y, z=center.z - 0.1).array
- else:
- trimesh_mesh.vertices -= Point3D(x=center.x, y=center.y, z=center.z).array
-
- if not scene.log_metadata.map_has_z:
- trimesh_mesh.vertices += Point3D(
- x=0, y=0, z=center.z - initial_ego_vehicle_state.vehicle_parameters.height / 2
- ).array
-
- trimesh_mesh = configure_trimesh(trimesh_mesh, MAP_SURFACE_CONFIG[map_layer].fill_color)
- surface_meshes.append(trimesh_mesh)
- output[f"{map_layer.serialize()}"] = trimesh.util.concatenate(surface_meshes)
- return output
-
-
-def get_map_lines(scene: AbstractScene):
- map_layers = [MapLayer.LANE, MapLayer.ROAD_EDGE]
- initial_ego_vehicle_state = scene.get_ego_state_at_iteration(0)
- center = initial_ego_vehicle_state.center_se3
- map_objects_dict = scene.map_api.get_proximal_map_objects(center.point_2d, radius=MAP_RADIUS, layers=map_layers)
-
- def polyline_to_segments(polyline: Polyline3D) -> npt.NDArray[np.float64]:
- polyline_array = polyline.array - center.point_3d.array
- polyline_array = polyline_array.reshape(-1, 1, 3)
- polyline_array = np.concatenate([polyline_array[:-1], polyline_array[1:]], axis=1)
- return polyline_array
-
- centerlines, right_boundaries, left_boundaries, road_edges = [], [], [], []
- for lane in map_objects_dict[MapLayer.LANE]:
- lane: AbstractLane
-
- centerlines.append(polyline_to_segments(lane.centerline))
- right_boundaries.append(polyline_to_segments(lane.right_boundary))
- left_boundaries.append(polyline_to_segments(lane.left_boundary))
-
- for road_edge in map_objects_dict[MapLayer.ROAD_EDGE]:
- road_edges.append(polyline_to_segments(road_edge.polyline_3d))
-
- centerlines = np.concatenate(centerlines, axis=0)
- left_boundaries = np.concatenate(left_boundaries, axis=0)
- right_boundaries = np.concatenate(right_boundaries, axis=0)
- road_edges = np.concatenate(road_edges, axis=0)
-
- if not scene.log_metadata.map_has_z:
- # If the map does not have a z-coordinate, we set it to the height of the ego vehicle.
- centerlines[:, :, 2] += center.z - initial_ego_vehicle_state.vehicle_parameters.height / 2
- left_boundaries[:, :, 2] += center.z - initial_ego_vehicle_state.vehicle_parameters.height / 2
- right_boundaries[:, :, 2] += center.z - initial_ego_vehicle_state.vehicle_parameters.height / 2
- road_edges[:, :, 2] += center.z - initial_ego_vehicle_state.vehicle_parameters.height / 2
-
- return centerlines, left_boundaries, right_boundaries, road_edges
-
-
-def get_trimesh_from_boundaries(
- left_boundary: Polyline3D, right_boundary: Polyline3D, resolution: float = 1.0
-) -> trimesh.Trimesh:
- resolution = 1.0 # [m]
-
- average_length = (left_boundary.length + right_boundary.length) / 2
- num_samples = int(average_length // resolution) + 1
- left_boundary_array = _interpolate_polyline(left_boundary, num_samples=num_samples)
- right_boundary_array = _interpolate_polyline(right_boundary, num_samples=num_samples)
- return _create_lane_mesh_from_boundary_arrays(left_boundary_array, right_boundary_array)
-
-
-def _interpolate_polyline(polyline_3d: Polyline3D, num_samples: int = 20) -> npt.NDArray[np.float64]:
- if num_samples < 2:
- num_samples = 2
- distances = np.linspace(0, polyline_3d.length, num=num_samples, endpoint=True, dtype=np.float64)
- return polyline_3d.interpolate(distances)
-
-
-def _create_lane_mesh_from_boundary_arrays(
- left_boundary_array: npt.NDArray[np.float64],
- right_boundary_array: npt.NDArray[np.float64],
-) -> trimesh.Trimesh:
-
- # Ensure both polylines have the same number of points
- if left_boundary_array.shape[0] != right_boundary_array.shape[0]:
- raise ValueError("Both polylines must have the same number of points")
-
- n_points = left_boundary_array.shape[0]
-
- # Combine vertices from both polylines
- vertices = np.vstack([left_boundary_array, right_boundary_array])
-
- # Create faces by connecting corresponding points on the two polylines
- faces = []
- for i in range(n_points - 1):
- faces.append([i, i + n_points, i + 1])
- faces.append([i + 1, i + n_points, i + n_points + 1])
-
- faces = np.array(faces)
- mesh = trimesh.Trimesh(vertices=vertices, faces=faces)
- mesh.visual.face_colors = MAP_SURFACE_CONFIG[MapLayer.LANE].fill_color.rgba
- return mesh
-
-
-def get_camera_if_available(scene: AbstractScene, camera_type: CameraType, iteration: int) -> Optional[Camera]:
- camera: Optional[Camera] = None
- if camera_type in scene.available_camera_types:
- camera: Camera = scene.get_camera_at_iteration(iteration, camera_type)
- return camera
-
-
-def get_camera_values(
- scene: AbstractScene, camera: Camera, iteration: int, resize_factor: Optional[float] = None
-) -> Tuple[npt.NDArray[np.float64], npt.NDArray[np.float64], npt.NDArray[np.uint8]]:
-
- initial_point_3d = scene.get_ego_state_at_iteration(0).center_se3.point_3d
- rear_axle = scene.get_ego_state_at_iteration(iteration).rear_axle_se3
-
- rear_axle_array = rear_axle.array
- rear_axle_array[:3] -= initial_point_3d.array
- rear_axle = StateSE3.from_array(rear_axle_array, copy=False)
-
- camera_to_ego = camera.extrinsic # 4x4 transformation from camera to ego frame
- camera_se3 = StateSE3.from_transformation_matrix(camera_to_ego)
-
- camera_se3_array = convert_relative_to_absolute_se3_array(origin=rear_axle, se3_array=camera_se3.array)
- abs_camera_se3 = StateSE3.from_array(camera_se3_array, copy=False)
-
- # Camera transformation in ego frame
- camera_position = abs_camera_se3.point_3d.array
- camera_rotation = abs_camera_se3.quaternion.array
-
- camera_image = camera.image
-
- if resize_factor is not None:
- new_width = int(camera_image.shape[1] * resize_factor)
- new_height = int(camera_image.shape[0] * resize_factor)
- camera_image = cv2.resize(camera_image, (new_width, new_height), interpolation=cv2.INTER_LINEAR)
-
- return camera_position, camera_rotation, camera_image
-
-
-def get_lidar_points(
- scene: AbstractScene, iteration: int, lidar_types: List[LiDARType]
-) -> Tuple[npt.NDArray[np.float32], npt.NDArray[np.float32]]:
-
- initial_ego_vehicle_state = scene.get_ego_state_at_iteration(0)
- current_ego_vehicle_state = scene.get_ego_state_at_iteration(iteration)
-
- def float_to_rgb(values: npt.NDArray[np.float32], cmap_name: str = "viridis") -> npt.NDArray[np.float32]:
- """
- Converts an array of float values to RGB colors using a matplotlib colormap.
- Normalizes values to [0, 1] using min-max scaling.
- Returns an array of shape (N, 3) with RGB values in [0, 1].
- """
- import matplotlib.pyplot as plt
-
- vmin = np.min(values)
- vmax = np.max(values)
- if vmax > vmin:
- normed = (values - vmin) / (vmax - vmin)
- else:
- normed = np.zeros_like(values)
- cmap = plt.get_cmap(cmap_name)
- rgb = cmap(normed)[:, :3] # Ignore alpha channel
- return rgb.astype(np.float32)
-
- points_ = []
- colors_ = []
- for lidar_idx, lidar_type in enumerate(lidar_types):
- if lidar_type not in scene.available_lidar_types:
- continue
- lidar = scene.get_lidar_at_iteration(iteration, lidar_type)
-
- # 1. convert points to ego frame
- points = lidar.xyz
-
- # 2. convert points to world frame
- origin = current_ego_vehicle_state.rear_axle_se3
- points = convert_relative_to_absolute_points_3d_array(origin, points)
- points = points - initial_ego_vehicle_state.center_se3.point_3d.array
- points_.append(points)
- colors_.append([TAB_10[lidar_idx % len(TAB_10)].rgb] * points.shape[0])
- # colors_.append(float_to_rgb(lidar.intensity, cmap_name="viridis"))
-
- points_ = np.concatenate(points_, axis=0) if points_ else np.empty((0, 3), dtype=np.float32)
- colors_ = np.concatenate(colors_, axis=0) if colors_ else np.empty((0, 3), dtype=np.float32)
-
- return points_, colors_
diff --git a/d123/common/visualization/viser/viser_config.py b/d123/common/visualization/viser/viser_config.py
new file mode 100644
index 00000000..59d5256d
--- /dev/null
+++ b/d123/common/visualization/viser/viser_config.py
@@ -0,0 +1,71 @@
+from dataclasses import dataclass, field
+from typing import List, Literal, Optional, Tuple
+
+from d123.datatypes.sensors.camera import CameraType
+from d123.datatypes.sensors.lidar import LiDARType
+
+all_camera_types: List[CameraType] = [
+ CameraType.CAM_F0,
+ CameraType.CAM_B0,
+ CameraType.CAM_L0,
+ CameraType.CAM_L1,
+ CameraType.CAM_L2,
+ CameraType.CAM_R0,
+ CameraType.CAM_R1,
+ CameraType.CAM_R2,
+]
+
+all_lidar_types: List[LiDARType] = [
+ LiDARType.LIDAR_MERGED,
+ LiDARType.LIDAR_TOP,
+ LiDARType.LIDAR_FRONT,
+ LiDARType.LIDAR_SIDE_LEFT,
+ LiDARType.LIDAR_SIDE_RIGHT,
+ LiDARType.LIDAR_BACK,
+]
+
+
+@dataclass
+class ViserConfig:
+
+ # Server
+ server_host: str = "localhost"
+ server_port: int = 8080
+ server_label: str = "D123 Viser Server"
+ server_verbose: bool = True
+
+ # Theme
+ theme_control_layout: Literal["floating", "collapsible", "fixed"] = "floating"
+ theme_control_width: Literal["small", "medium", "large"] = "large"
+ theme_dark_mode: bool = False
+ theme_show_logo: bool = True
+ theme_show_share_button: bool = True
+ theme_brand_color: Optional[Tuple[int, int, int]] = None
+
+ # Map
+ map_visible: bool = True
+ map_radius: float = 1000.0 # [m]
+ map_non_road_z_offset: float = 0.0 # small translation to place crosswalks, parking, etc. on top of the road
+
+ # Bounding boxes
+ bounding_box_visible: bool = True
+ bounding_box_type: Literal["mesh", "lines"] = "mesh"
+ bounding_box_line_width: float = 4.0
+
+ # Cameras
+ # -> Frustum
+ camera_frustum_visible: bool = True
+ camera_frustum_types: List[CameraType] = field(default_factory=lambda: all_camera_types.copy())
+ camera_frustum_frustum_scale: float = 1.0
+ camera_frustum_image_scale: float = 0.25 # Resize factor for the camera image shown on the frustum (<1.0 for speed)
+
+ # -> GUI
+ camera_gui_visible: bool = True
+ camera_gui_types: List[CameraType] = field(default_factory=lambda: [CameraType.CAM_F0].copy())
+ camera_gui_image_scale: float = 0.25 # Resize factor for the camera image shown in the GUI (<1.0 for speed)
+
+ # LiDAR
+ lidar_visible: bool = True
+ lidar_types: List[LiDARType] = field(default_factory=lambda: all_lidar_types.copy())
+ lidar_point_size: float = 0.05
+ lidar_point_shape: Literal["square", "diamond", "circle", "rounded", "sparkle"] = "circle"
diff --git a/d123/common/visualization/viser/viser_viewer.py b/d123/common/visualization/viser/viser_viewer.py
new file mode 100644
index 00000000..9774918f
--- /dev/null
+++ b/d123/common/visualization/viser/viser_viewer.py
@@ -0,0 +1,245 @@
+import logging
+import time
+from typing import Dict, List, Optional
+
+import viser
+from viser.theme import TitlebarButton, TitlebarConfig, TitlebarImage
+
+from d123.common.visualization.viser.elements import (
+ add_box_detections_to_viser_server,
+ add_camera_frustums_to_viser_server,
+ add_camera_gui_to_viser_server,
+ add_lidar_pc_to_viser_server,
+ add_map_to_viser_server,
+)
+from d123.common.visualization.viser.viser_config import ViserConfig
+from d123.datatypes.scene.abstract_scene import AbstractScene
+from d123.datatypes.sensors.camera import CameraType
+from d123.datatypes.sensors.lidar import LiDARType
+from d123.datatypes.vehicle_state.ego_state import EgoStateSE3
+
+logger = logging.getLogger(__name__)
+
+
+all_camera_types: List[CameraType] = [
+ CameraType.CAM_F0,
+ CameraType.CAM_B0,
+ CameraType.CAM_L0,
+ CameraType.CAM_L1,
+ CameraType.CAM_L2,
+ CameraType.CAM_R0,
+ CameraType.CAM_R1,
+ CameraType.CAM_R2,
+]
+
+all_lidar_types: List[LiDARType] = [
+ LiDARType.LIDAR_MERGED,
+ LiDARType.LIDAR_TOP,
+ LiDARType.LIDAR_FRONT,
+ LiDARType.LIDAR_SIDE_LEFT,
+ LiDARType.LIDAR_SIDE_RIGHT,
+ LiDARType.LIDAR_BACK,
+]
+
+
+def _build_viser_server(viser_config: ViserConfig) -> viser.ViserServer:
+ server = viser.ViserServer(
+ host=viser_config.server_host,
+ port=viser_config.server_port,
+ label=viser_config.server_label,
+ verbose=viser_config.server_verbose,
+ )
+
+ # TODO: Fix links and logo.
+ buttons = (
+ TitlebarButton(
+ text="Getting Started",
+ icon=None,
+ href="https://nerf.studio",
+ ),
+ TitlebarButton(
+ text="Github",
+ icon="GitHub",
+ href="https://github.com/nerfstudio-project/nerfstudio",
+ ),
+ TitlebarButton(
+ text="Documentation",
+ icon="Description",
+ href="https://docs.nerf.studio",
+ ),
+ )
+ image = TitlebarImage(
+ image_url_light="https://docs.nerf.studio/_static/imgs/logo.png",
+ image_url_dark="https://docs.nerf.studio/_static/imgs/logo-dark.png",
+ image_alt="NerfStudio Logo",
+ href="https://docs.nerf.studio/",
+ )
+ titlebar_theme = TitlebarConfig(buttons=buttons, image=image)
+
+ server.gui.configure_theme(
+ titlebar_content=titlebar_theme,
+ control_layout=viser_config.theme_control_layout,
+ control_width=viser_config.theme_control_width,
+ dark_mode=viser_config.theme_dark_mode,
+ show_logo=viser_config.theme_show_logo,
+ show_share_button=viser_config.theme_show_share_button,
+ brand_color=viser_config.theme_brand_color,
+ )
+
+ return server
+
+
+class ViserViewer:
+ def __init__(
+ self,
+ scenes: List[AbstractScene],
+ viser_config: ViserConfig = ViserConfig(),
+ scene_index: int = 0.0,
+ ) -> None:
+ assert len(scenes) > 0, "At least one scene must be provided."
+
+ self._scenes = scenes
+ self._viser_config = viser_config
+ self._scene_index = scene_index
+
+ self._viser_server = _build_viser_server(self._viser_config)
+ self.set_scene(self._scenes[self._scene_index % len(self._scenes)])
+
+ def next(self) -> None:
+ self._viser_server.flush()
+ self._viser_server.gui.reset()
+ self._viser_server.scene.reset()
+ self._scene_index = (self._scene_index + 1) % len(self._scenes)
+ self.set_scene(self._scenes[self._scene_index])
+
+ def set_scene(self, scene: AbstractScene) -> None:
+ num_frames = scene.get_number_of_iterations()
+ initial_ego_state: EgoStateSE3 = scene.get_ego_state_at_iteration(0)
+
+ with self._viser_server.gui.add_folder("Playback"):
+ server_playing = True
+ gui_timestep = self._viser_server.gui.add_slider(
+ "Timestep",
+ min=0,
+ max=num_frames - 1,
+ step=1,
+ initial_value=0,
+ disabled=True,
+ )
+ gui_next_frame = self._viser_server.gui.add_button("Next Frame", disabled=True)
+ gui_prev_frame = self._viser_server.gui.add_button("Prev Frame", disabled=True)
+ gui_next_scene = self._viser_server.gui.add_button("Next Scene", disabled=False)
+ gui_playing = self._viser_server.gui.add_checkbox("Playing", True)
+ gui_framerate = self._viser_server.gui.add_slider("FPS", min=1, max=100, step=1, initial_value=10)
+ gui_framerate_options = self._viser_server.gui.add_button_group(
+ "FPS options", ("10", "25", "50", "75", "100")
+ )
+
+ # Frame step buttons.
+ @gui_next_frame.on_click
+ def _(_) -> None:
+ gui_timestep.value = (gui_timestep.value + 1) % num_frames
+
+ @gui_prev_frame.on_click
+ def _(_) -> None:
+ gui_timestep.value = (gui_timestep.value - 1) % num_frames
+
+ @gui_next_scene.on_click
+ def _(_) -> None:
+ nonlocal server_playing
+ server_playing = False
+
+ # Disable frame controls when we're playing.
+ @gui_playing.on_update
+ def _(_) -> None:
+ gui_timestep.disabled = gui_playing.value
+ gui_next_frame.disabled = gui_playing.value
+ gui_prev_frame.disabled = gui_playing.value
+
+ # Set the framerate when we click one of the options.
+ @gui_framerate_options.on_click
+ def _(_) -> None:
+ gui_framerate.value = int(gui_framerate_options.value)
+
+ # Toggle frame visibility when the timestep slider changes.
+ @gui_timestep.on_update
+ def _(_) -> None:
+ start = time.perf_counter()
+ add_box_detections_to_viser_server(
+ scene,
+ gui_timestep.value,
+ initial_ego_state,
+ self._viser_server,
+ self._viser_config,
+ )
+ add_camera_frustums_to_viser_server(
+ scene,
+ gui_timestep.value,
+ initial_ego_state,
+ self._viser_server,
+ self._viser_config,
+ camera_frustum_handles,
+ )
+ add_camera_gui_to_viser_server(
+ scene,
+ gui_timestep.value,
+ self._viser_server,
+ self._viser_config,
+ camera_gui_handles,
+ )
+ add_lidar_pc_to_viser_server(
+ scene,
+ gui_timestep.value,
+ initial_ego_state,
+ self._viser_server,
+ self._viser_config,
+ lidar_pc_handle,
+ )
+ rendering_time = time.perf_counter() - start
+ sleep_time = 1.0 / gui_framerate.value - rendering_time
+ if sleep_time > 0:
+ time.sleep(max(sleep_time, 0.0))
+
+ camera_frustum_handles: Dict[CameraType, viser.CameraFrustumHandle] = {}
+ camera_gui_handles: Dict[CameraType, viser.GuiImageHandle] = {}
+ lidar_pc_handle: Optional[viser.PointCloudHandle] = None
+
+ add_box_detections_to_viser_server(
+ scene,
+ gui_timestep.value,
+ initial_ego_state,
+ self._viser_server,
+ self._viser_config,
+ )
+ add_camera_frustums_to_viser_server(
+ scene,
+ gui_timestep.value,
+ initial_ego_state,
+ self._viser_server,
+ self._viser_config,
+ camera_frustum_handles,
+ )
+ add_camera_gui_to_viser_server(
+ scene,
+ gui_timestep.value,
+ self._viser_server,
+ self._viser_config,
+ camera_gui_handles,
+ )
+ add_lidar_pc_to_viser_server(
+ scene,
+ gui_timestep.value,
+ initial_ego_state,
+ self._viser_server,
+ self._viser_config,
+ lidar_pc_handle,
+ )
+ add_map_to_viser_server(scene, initial_ego_state, self._viser_server, self._viser_config)
+
+ # Playback update loop.
+ while server_playing:
+ if gui_playing.value:
+ gui_timestep.value = (gui_timestep.value + 1) % num_frames
+
+ self._viser_server.flush()
+ self.next()
diff --git a/d123/datatypes/maps/gpkg/gpkg_map_objects.py b/d123/datatypes/maps/gpkg/gpkg_map_objects.py
index 2326490b..ff44b0d6 100644
--- a/d123/datatypes/maps/gpkg/gpkg_map_objects.py
+++ b/d123/datatypes/maps/gpkg/gpkg_map_objects.py
@@ -10,7 +10,6 @@
import shapely.geometry as geom
import trimesh
-from d123.common.visualization.viser.utils import get_trimesh_from_boundaries
from d123.datatypes.maps.abstract_map_objects import (
AbstractCarpark,
AbstractCrosswalk,
@@ -24,7 +23,7 @@
AbstractSurfaceMapObject,
AbstractWalkway,
)
-from d123.datatypes.maps.gpkg.utils import get_row_with_value
+from d123.datatypes.maps.gpkg.utils import get_row_with_value, get_trimesh_from_boundaries
from d123.datatypes.maps.map_datatypes import RoadEdgeType, RoadLineType
from d123.geometry import Point3DIndex, Polyline3D
diff --git a/d123/datatypes/maps/gpkg/utils.py b/d123/datatypes/maps/gpkg/utils.py
index 8b359f75..a1d382f7 100644
--- a/d123/datatypes/maps/gpkg/utils.py
+++ b/d123/datatypes/maps/gpkg/utils.py
@@ -2,8 +2,12 @@
import geopandas as gpd
import numpy as np
+import numpy.typing as npt
+import trimesh
from shapely import wkt
+from d123.geometry.polyline import Polyline3D
+
def load_gdf_with_geometry_columns(gdf: gpd.GeoDataFrame, geometry_column_names: List[str] = []):
# TODO: refactor
@@ -46,3 +50,41 @@ def get_row_with_value(elements: gpd.geodataframe.GeoDataFrame, column_label: st
f"{len(matching_rows)} matching keys found. Expected to only find one." "Try using get_all_rows_with_value"
)
return matching_rows.iloc[0]
+
+
+def get_trimesh_from_boundaries(
+ left_boundary: Polyline3D, right_boundary: Polyline3D, resolution: float = 0.25
+) -> trimesh.Trimesh:
+
+ def _interpolate_polyline(polyline_3d: Polyline3D, num_samples: int) -> npt.NDArray[np.float64]:
+ if num_samples < 2:
+ num_samples = 2
+ distances = np.linspace(0, polyline_3d.length, num=num_samples, endpoint=True, dtype=np.float64)
+ return polyline_3d.interpolate(distances)
+
+ average_length = (left_boundary.length + right_boundary.length) / 2
+ num_samples = int(average_length // resolution) + 1
+ left_boundary_array = _interpolate_polyline(left_boundary, num_samples)
+ right_boundary_array = _interpolate_polyline(right_boundary, num_samples)
+ return _create_lane_mesh_from_boundary_arrays(left_boundary_array, right_boundary_array)
+
+
+def _create_lane_mesh_from_boundary_arrays(
+ left_boundary_array: npt.NDArray[np.float64], right_boundary_array: npt.NDArray[np.float64]
+) -> trimesh.Trimesh:
+
+ # Ensure both polylines have the same number of points
+ if left_boundary_array.shape[0] != right_boundary_array.shape[0]:
+ raise ValueError("Both polylines must have the same number of points")
+
+ n_points = left_boundary_array.shape[0]
+ vertices = np.vstack([left_boundary_array, right_boundary_array])
+
+ faces = []
+ for i in range(n_points - 1):
+ faces.append([i, i + n_points, i + 1])
+ faces.append([i + 1, i + n_points, i + n_points + 1])
+
+ faces = np.array(faces)
+ mesh = trimesh.Trimesh(vertices=vertices, faces=faces)
+ return mesh
diff --git a/d123/datatypes/scene/abstract_scene.py b/d123/datatypes/scene/abstract_scene.py
index a3d2ccfe..af66541e 100644
--- a/d123/datatypes/scene/abstract_scene.py
+++ b/d123/datatypes/scene/abstract_scene.py
@@ -59,31 +59,31 @@ def get_timepoint_at_iteration(self, iteration: int) -> TimePoint:
raise NotImplementedError
@abc.abstractmethod
- def get_ego_state_at_iteration(self, iteration: int) -> EgoStateSE3:
+ def get_ego_state_at_iteration(self, iteration: int) -> Optional[EgoStateSE3]:
raise NotImplementedError
@abc.abstractmethod
- def get_box_detections_at_iteration(self, iteration: int) -> BoxDetectionWrapper:
+ def get_box_detections_at_iteration(self, iteration: int) -> Optional[BoxDetectionWrapper]:
raise NotImplementedError
@abc.abstractmethod
- def get_traffic_light_detections_at_iteration(self, iteration: int) -> TrafficLightDetectionWrapper:
+ def get_traffic_light_detections_at_iteration(self, iteration: int) -> Optional[TrafficLightDetectionWrapper]:
raise NotImplementedError
@abc.abstractmethod
- def get_detection_recording_at_iteration(self, iteration: int) -> DetectionRecording:
+ def get_detection_recording_at_iteration(self, iteration: int) -> Optional[DetectionRecording]:
raise NotImplementedError
@abc.abstractmethod
- def get_route_lane_group_ids(self, iteration: int) -> List[int]:
+ def get_route_lane_group_ids(self, iteration: int) -> Optional[List[int]]:
raise NotImplementedError
@abc.abstractmethod
- def get_camera_at_iteration(self, iteration: int, camera_type: CameraType) -> Camera:
+ def get_camera_at_iteration(self, iteration: int, camera_type: CameraType) -> Optional[Camera]:
raise NotImplementedError
@abc.abstractmethod
- def get_lidar_at_iteration(self, iteration: int, lidar_type: LiDARType) -> LiDAR:
+ def get_lidar_at_iteration(self, iteration: int, lidar_type: LiDARType) -> Optional[LiDAR]:
raise NotImplementedError
def open(self) -> None:
diff --git a/d123/datatypes/scene/arrow/arrow_scene.py b/d123/datatypes/scene/arrow/arrow_scene.py
index 0fc61ba8..8f61397a 100644
--- a/d123/datatypes/scene/arrow/arrow_scene.py
+++ b/d123/datatypes/scene/arrow/arrow_scene.py
@@ -132,52 +132,62 @@ def get_timepoint_at_iteration(self, iteration: int) -> TimePoint:
self._lazy_initialize()
return get_timepoint_from_arrow_table(self._recording_table, self._get_table_index(iteration))
- def get_ego_state_at_iteration(self, iteration: int) -> EgoStateSE3:
+ def get_ego_state_at_iteration(self, iteration: int) -> Optional[EgoStateSE3]:
self._lazy_initialize()
return get_ego_vehicle_state_from_arrow_table(
self._recording_table, self._get_table_index(iteration), self._vehicle_parameters
)
- def get_box_detections_at_iteration(self, iteration: int) -> BoxDetectionWrapper:
+ def get_box_detections_at_iteration(self, iteration: int) -> Optional[BoxDetectionWrapper]:
+ # TODO: Make box detections optional in ArrowScene
self._lazy_initialize()
return get_box_detections_from_arrow_table(self._recording_table, self._get_table_index(iteration))
- def get_traffic_light_detections_at_iteration(self, iteration: int) -> TrafficLightDetectionWrapper:
+ def get_traffic_light_detections_at_iteration(self, iteration: int) -> Optional[TrafficLightDetectionWrapper]:
+ # TODO: Make traffic lights optional in ArrowScene
self._lazy_initialize()
return get_traffic_light_detections_from_arrow_table(self._recording_table, self._get_table_index(iteration))
- def get_detection_recording_at_iteration(self, iteration: int) -> DetectionRecording:
+ def get_detection_recording_at_iteration(self, iteration: int) -> Optional[DetectionRecording]:
+ # TODO: Make detection recording optional in ArrowScene
return DetectionRecording(
box_detections=self.get_box_detections_at_iteration(iteration),
traffic_light_detections=self.get_traffic_light_detections_at_iteration(iteration),
)
- def get_route_lane_group_ids(self, iteration: int) -> List[int]:
+ def get_route_lane_group_ids(self, iteration: int) -> Optional[List[int]]:
self._lazy_initialize()
- table_index = self._get_table_index(iteration)
- return self._recording_table["route_lane_group_ids"][table_index].as_py()
+ route_lane_group_ids: Optional[List[int]] = None
+ if "route_lane_group_ids" in self._recording_table.column_names:
+ table_index = self._get_table_index(iteration)
+ route_lane_group_ids = self._recording_table["route_lane_group_ids"][table_index].as_py()
+ return route_lane_group_ids
- def get_camera_at_iteration(self, iteration: int, camera_type: CameraType) -> Camera:
+ def get_camera_at_iteration(self, iteration: int, camera_type: CameraType) -> Optional[Camera]:
self._lazy_initialize()
- assert camera_type in self._camera_metadata, f"Camera type {camera_type} not found in metadata."
- table_index = self._get_table_index(iteration)
- return get_camera_from_arrow_table(
- self._recording_table,
- table_index,
- self._camera_metadata[camera_type],
- self.log_metadata,
- )
+ camera: Optional[Camera] = None
+ if camera_type in self._camera_metadata:
+ table_index = self._get_table_index(iteration)
+ camera = get_camera_from_arrow_table(
+ self._recording_table,
+ table_index,
+ self._camera_metadata[camera_type],
+ self.log_metadata,
+ )
+ return camera
- def get_lidar_at_iteration(self, iteration: int, lidar_type: LiDARType) -> LiDAR:
+ def get_lidar_at_iteration(self, iteration: int, lidar_type: LiDARType) -> Optional[LiDAR]:
self._lazy_initialize()
- assert lidar_type in self._lidar_metadata, f"LiDAR type {lidar_type} not found in metadata."
- table_index = self._get_table_index(iteration)
- return get_lidar_from_arrow_table(
- self._recording_table,
- table_index,
- self._lidar_metadata[lidar_type],
- self.log_metadata,
- )
+ lidar: Optional[LiDAR] = None
+ if lidar_type in self._lidar_metadata:
+ table_index = self._get_table_index(iteration)
+ lidar = get_lidar_from_arrow_table(
+ self._recording_table,
+ table_index,
+ self._lidar_metadata[lidar_type],
+ self.log_metadata,
+ )
+ return lidar
def _lazy_initialize(self) -> None:
self.open()
diff --git a/d123/geometry/utils/bounding_box_utils.py b/d123/geometry/utils/bounding_box_utils.py
index 74c506e1..72f6e994 100644
--- a/d123/geometry/utils/bounding_box_utils.py
+++ b/d123/geometry/utils/bounding_box_utils.py
@@ -10,6 +10,7 @@
Corners2DIndex,
Corners3DIndex,
Point2DIndex,
+ Point3DIndex,
Vector2DIndex,
Vector3DIndex,
)
@@ -173,6 +174,43 @@ def corners_array_to_3d_mesh(
)
# Offset the faces for each box
- faces = np.vstack([faces_single + i * 8 for i in range(num_boxes)])
+ faces = np.vstack([faces_single + i * len(Corners3DIndex) for i in range(num_boxes)])
return vertices, faces
+
+
+def corners_array_to_edge_lines(corners_array: npt.NDArray[np.float64]) -> npt.NDArray[np.float64]:
+ assert corners_array.shape[-1] == len(Point3DIndex)
+ assert corners_array.shape[-2] == len(Corners3DIndex)
+ assert corners_array.ndim >= 2
+
+ index_pairs = [
+ (Corners3DIndex.FRONT_LEFT_BOTTOM, Corners3DIndex.FRONT_RIGHT_BOTTOM),
+ (Corners3DIndex.FRONT_RIGHT_BOTTOM, Corners3DIndex.BACK_RIGHT_BOTTOM),
+ (Corners3DIndex.BACK_RIGHT_BOTTOM, Corners3DIndex.BACK_LEFT_BOTTOM),
+ (Corners3DIndex.BACK_LEFT_BOTTOM, Corners3DIndex.FRONT_LEFT_BOTTOM),
+ (Corners3DIndex.FRONT_LEFT_TOP, Corners3DIndex.FRONT_RIGHT_TOP),
+ (Corners3DIndex.FRONT_RIGHT_TOP, Corners3DIndex.BACK_RIGHT_TOP),
+ (Corners3DIndex.BACK_RIGHT_TOP, Corners3DIndex.BACK_LEFT_TOP),
+ (Corners3DIndex.BACK_LEFT_TOP, Corners3DIndex.FRONT_LEFT_TOP),
+ (Corners3DIndex.FRONT_LEFT_BOTTOM, Corners3DIndex.FRONT_LEFT_TOP),
+ (Corners3DIndex.FRONT_RIGHT_BOTTOM, Corners3DIndex.FRONT_RIGHT_TOP),
+ (Corners3DIndex.BACK_RIGHT_BOTTOM, Corners3DIndex.BACK_RIGHT_TOP),
+ (Corners3DIndex.BACK_LEFT_BOTTOM, Corners3DIndex.BACK_LEFT_TOP),
+ ]
+
+ if corners_array.ndim == 2:
+ # Single box case: (8, 3)
+ edge_lines = np.zeros((len(index_pairs), 2, len(Point3DIndex)), dtype=np.float64)
+ for edge_idx, (start_idx, end_idx) in enumerate(index_pairs):
+ edge_lines[edge_idx, 0] = corners_array[start_idx]
+ edge_lines[edge_idx, 1] = corners_array[end_idx]
+ else:
+ # Batched case: (..., 8, 3)
+ batch_shape = corners_array.shape[:-2]
+ edge_lines = np.zeros(batch_shape + (len(index_pairs), 2, len(Point3DIndex)), dtype=np.float64)
+ for edge_idx, (start_idx, end_idx) in enumerate(index_pairs):
+ edge_lines[..., edge_idx, 0, :] = corners_array[..., start_idx, :]
+ edge_lines[..., edge_idx, 1, :] = corners_array[..., end_idx, :]
+
+ return edge_lines
diff --git a/d123/script/run_viser.py b/d123/script/run_viser.py
index e682a96e..87c3218a 100644
--- a/d123/script/run_viser.py
+++ b/d123/script/run_viser.py
@@ -3,7 +3,7 @@
import hydra
from omegaconf import DictConfig
-from d123.common.visualization.viser.server import ViserVisualizationServer
+from d123.common.visualization.viser.viser_viewer import ViserViewer
from d123.script.builders.scene_builder_builder import build_scene_builder
from d123.script.builders.scene_filter_builder import build_scene_filter
from d123.script.run_dataset_conversion import build_worker
@@ -22,7 +22,7 @@ def main(cfg: DictConfig) -> None:
scene_builder = build_scene_builder(cfg.scene_builder)
scenes = scene_builder.get_scenes(scene_filter, worker=worker)
- ViserVisualizationServer(scenes=scenes)
+ ViserViewer(scenes=scenes)
if __name__ == "__main__":
diff --git a/test_viser.py b/test_viser.py
index 072c835e..9daa2d9c 100644
--- a/test_viser.py
+++ b/test_viser.py
@@ -1,15 +1,17 @@
+import os
+
from d123.common.multithreading.worker_sequential import Sequential
-from d123.common.visualization.viser.server import ViserVisualizationServer
+from d123.common.visualization.viser.viser_viewer import ViserViewer
from d123.datatypes.scene.arrow.arrow_scene_builder import ArrowSceneBuilder
from d123.datatypes.scene.scene_filter import SceneFilter
from d123.datatypes.sensors.camera import CameraType
if __name__ == "__main__":
- splits = ["nuplan_private_test"]
+ # splits = ["nuplan_private_test"]
# splits = ["carla"]
# splits = ["wopd_train"]
- # splits = ["av2-sensor-mini_train"]
+ splits = ["av2-sensor-mini_train"]
log_names = None
scene_tokens = None
@@ -24,11 +26,9 @@
shuffle=False,
camera_types=[CameraType.CAM_F0],
)
- scene_builder = ArrowSceneBuilder("/home/daniel/d123_workspace/data")
+ scene_builder = ArrowSceneBuilder(os.environ["D123_DATA_ROOT"])
worker = Sequential()
# worker = RayDistributed()
scenes = scene_builder.get_scenes(scene_filter, worker)
-
print(f"Found {len(scenes)} scenes")
-
- visualization_server = ViserVisualizationServer(scenes, scene_index=0)
+ visualization_server = ViserViewer(scenes, scene_index=0)
From 8cf4d8eff5ed9b24c285184fd57a7b6052fbbe72 Mon Sep 17 00:00:00 2001
From: Daniel Dauner
Date: Sat, 4 Oct 2025 22:26:56 +0200
Subject: [PATCH 055/145] Several updates refactoring how sensors are stored.
---
d123/common/utils/mixin.py | 5 +
d123/common/utils/timer.py | 4 -
.../common/visualization/matplotlib/camera.py | 8 +-
.../viser/elements/sensor_elements.py | 16 +-
.../visualization/viser/viser_config.py | 28 +-
.../visualization/viser/viser_viewer.py | 26 +-
d123/datasets/av2/av2_constants.py | 20 +-
d123/datasets/av2/av2_data_converter.py | 61 ++--
d123/datasets/carla/carla_data_converter.py | 24 +-
d123/datasets/nuplan/load_sensor.py | 2 +-
d123/datasets/nuplan/nuplan_data_converter.py | 72 ++---
.../utils/sensor/camera_conventions.py | 85 ++++++
.../utils/sensor/lidar_index_registry.py} | 0
d123/datasets/wopd/wopd_data_converter.py | 94 +++----
d123/datatypes/scene/abstract_scene.py | 8 +-
d123/datatypes/scene/arrow/arrow_scene.py | 20 +-
.../datatypes/scene/arrow/utils/conversion.py | 17 +-
d123/datatypes/scene/scene_filter.py | 8 +-
d123/datatypes/sensors/__init__.py | 0
d123/datatypes/sensors/camera.py | 117 --------
d123/datatypes/sensors/camera/__init__.py | 0
.../sensors/camera/pinhole_camera.py | 260 ++++++++++++++++++
d123/datatypes/sensors/lidar/__init__.py | 0
d123/datatypes/sensors/{ => lidar}/lidar.py | 8 +-
d123/script/config/common/default_common.yaml | 3 +-
.../config/common/default_experiment.yaml | 2 +
.../default_dataset_conversion.yaml | 18 +-
d123/script/config/datasets/__init__.py | 0
.../datasets/nuplan_private_dataset.yaml | 2 +-
d123/script/run_dataset_conversion.py | 3 -
pyproject.toml | 2 +-
scripts/dataset/run_log_caching.sh | 2 -
test_viser.py | 13 +-
33 files changed, 590 insertions(+), 338 deletions(-)
create mode 100644 d123/datasets/utils/sensor/camera_conventions.py
rename d123/{datatypes/sensors/lidar_index.py => datasets/utils/sensor/lidar_index_registry.py} (100%)
create mode 100644 d123/datatypes/sensors/__init__.py
delete mode 100644 d123/datatypes/sensors/camera.py
create mode 100644 d123/datatypes/sensors/camera/__init__.py
create mode 100644 d123/datatypes/sensors/camera/pinhole_camera.py
create mode 100644 d123/datatypes/sensors/lidar/__init__.py
rename d123/datatypes/sensors/{ => lidar}/lidar.py (93%)
create mode 100644 d123/script/config/datasets/__init__.py
diff --git a/d123/common/utils/mixin.py b/d123/common/utils/mixin.py
index 56038bb4..1e17db64 100644
--- a/d123/common/utils/mixin.py
+++ b/d123/common/utils/mixin.py
@@ -12,6 +12,11 @@ def from_array(cls, array: npt.NDArray[np.float64], copy: bool = True) -> ArrayM
"""Create an instance from a NumPy array."""
raise NotImplementedError
+ @classmethod
+ def from_list(cls, values: list) -> ArrayMixin:
+ """Create an instance from a list of values."""
+ return cls.from_array(np.array(values, dtype=np.float64), copy=False)
+
@property
def array(self) -> npt.NDArray[np.float64]:
"""The array representation of the geometric entity."""
diff --git a/d123/common/utils/timer.py b/d123/common/utils/timer.py
index 69137c0e..17558977 100644
--- a/d123/common/utils/timer.py
+++ b/d123/common/utils/timer.py
@@ -58,7 +58,6 @@ def end(self) -> None:
def to_pandas(self) -> Optional[pd.DataFrame]:
"""
Returns a DataFrame with statistics of the logged times.
- :param verbose: whether to print the timings, defaults to True
:return: pandas dataframe.
"""
@@ -71,9 +70,6 @@ def to_pandas(self) -> Optional[pd.DataFrame]:
statistics[key] = timings_statistics
dataframe = pd.DataFrame.from_dict(statistics).transpose()
- # if verbose:
- # print(dataframe.to_string())
-
return dataframe
def info(self) -> Dict[str, float]:
diff --git a/d123/common/visualization/matplotlib/camera.py b/d123/common/visualization/matplotlib/camera.py
index bc33d0dd..49c567fb 100644
--- a/d123/common/visualization/matplotlib/camera.py
+++ b/d123/common/visualization/matplotlib/camera.py
@@ -13,7 +13,7 @@
from d123.common.visualization.color.default import BOX_DETECTION_CONFIG
from d123.datatypes.detections.detection import BoxDetectionSE3, BoxDetectionWrapper
from d123.datatypes.detections.detection_types import DetectionType
-from d123.datatypes.sensors.camera import Camera
+from d123.datatypes.sensors.camera.pinhole_camera import PinholeCamera
from d123.datatypes.vehicle_state.ego_state import EgoStateSE3
from d123.geometry import BoundingBoxSE3Index, Corners3DIndex
from d123.geometry.transform.transform_euler_se3 import convert_absolute_to_relative_euler_se3_array
@@ -25,7 +25,7 @@
# from navsim.visualization.lidar import filter_lidar_pc, get_lidar_pc_color
-def add_camera_ax(ax: plt.Axes, camera: Camera) -> plt.Axes:
+def add_camera_ax(ax: plt.Axes, camera: PinholeCamera) -> plt.Axes:
"""
Adds camera image to matplotlib ax object
:param ax: matplotlib ax object
@@ -70,7 +70,7 @@ def add_camera_ax(ax: plt.Axes, camera: Camera) -> plt.Axes:
def add_box_detections_to_camera_ax(
ax: plt.Axes,
- camera: Camera,
+ camera: PinholeCamera,
box_detections: BoxDetectionWrapper,
ego_state_se3: EgoStateSE3,
) -> plt.Axes:
@@ -115,7 +115,7 @@ def add_box_detections_to_camera_ax(
corners += detection_positions.reshape(-1, 1, 3)
# Then draw project corners to image.
- box_corners, corners_pc_in_fov = _transform_points_to_image(corners.reshape(-1, 3), camera.metadata.intrinsic)
+ box_corners, corners_pc_in_fov = _transform_points_to_image(corners.reshape(-1, 3), camera.metadata.intrinsics)
box_corners = box_corners.reshape(-1, 8, 2)
corners_pc_in_fov = corners_pc_in_fov.reshape(-1, 8)
valid_corners = corners_pc_in_fov.any(-1)
diff --git a/d123/common/visualization/viser/elements/sensor_elements.py b/d123/common/visualization/viser/elements/sensor_elements.py
index ee9b57bf..0cc63621 100644
--- a/d123/common/visualization/viser/elements/sensor_elements.py
+++ b/d123/common/visualization/viser/elements/sensor_elements.py
@@ -8,10 +8,10 @@
from d123.common.visualization.viser.viser_config import ViserConfig
from d123.datatypes.scene.abstract_scene import AbstractScene
-from d123.datatypes.sensors.camera import Camera, CameraType
-from d123.datatypes.sensors.lidar import LiDARType
+from d123.datatypes.sensors.camera.pinhole_camera import PinholeCamera, PinholeCameraType
+from d123.datatypes.sensors.lidar.lidar import LiDARType
from d123.datatypes.vehicle_state.ego_state import EgoStateSE3
-from d123.geometry import StateSE3, StateSE3Index
+from d123.geometry import StateSE3Index
from d123.geometry.transform.transform_se3 import (
convert_relative_to_absolute_points_3d_array,
convert_relative_to_absolute_se3_array,
@@ -24,7 +24,7 @@ def add_camera_frustums_to_viser_server(
initial_ego_state: EgoStateSE3,
viser_server: viser.ViserServer,
viser_config: ViserConfig,
- camera_frustum_handles: Dict[CameraType, viser.CameraFrustumHandle],
+ camera_frustum_handles: Dict[PinholeCameraType, viser.CameraFrustumHandle],
) -> None:
if viser_config.camera_frustum_visible:
@@ -32,7 +32,7 @@ def add_camera_frustums_to_viser_server(
ego_pose = scene.get_ego_state_at_iteration(scene_interation).rear_axle_se3.array
ego_pose[StateSE3Index.XYZ] -= scene_center_array
- def _add_camera_frustums_to_viser_server(camera_type: CameraType) -> None:
+ def _add_camera_frustums_to_viser_server(camera_type: PinholeCameraType) -> None:
camera = scene.get_camera_at_iteration(scene_interation, camera_type)
if camera is not None:
camera_position, camera_quaternion, camera_image = _get_camera_values(
@@ -78,7 +78,7 @@ def add_camera_gui_to_viser_server(
scene_interation: int,
viser_server: viser.ViserServer,
viser_config: ViserConfig,
- camera_gui_handles: Dict[CameraType, viser.GuiImageHandle],
+ camera_gui_handles: Dict[PinholeCameraType, viser.GuiImageHandle],
) -> None:
if viser_config.camera_gui_visible:
for camera_type in viser_config.camera_gui_types:
@@ -145,13 +145,13 @@ def _load_lidar_points(lidar_type: LiDARType) -> npt.NDArray[np.float32]:
def _get_camera_values(
- camera: Camera,
+ camera: PinholeCamera,
ego_pose: npt.NDArray[np.float64],
resize_factor: Optional[float] = None,
) -> Tuple[npt.NDArray[np.float64], npt.NDArray[np.float64], npt.NDArray[np.uint8]]:
assert ego_pose.ndim == 1 and len(ego_pose) == len(StateSE3Index)
- rel_camera_pose = StateSE3.from_transformation_matrix(camera.extrinsic).array
+ rel_camera_pose = camera.extrinsic.array
abs_camera_pose = convert_relative_to_absolute_se3_array(origin=ego_pose, se3_array=rel_camera_pose)
camera_position = abs_camera_pose[StateSE3Index.XYZ]
diff --git a/d123/common/visualization/viser/viser_config.py b/d123/common/visualization/viser/viser_config.py
index 59d5256d..276d6762 100644
--- a/d123/common/visualization/viser/viser_config.py
+++ b/d123/common/visualization/viser/viser_config.py
@@ -1,18 +1,18 @@
from dataclasses import dataclass, field
from typing import List, Literal, Optional, Tuple
-from d123.datatypes.sensors.camera import CameraType
-from d123.datatypes.sensors.lidar import LiDARType
+from d123.datatypes.sensors.camera.pinhole_camera import PinholeCameraType
+from d123.datatypes.sensors.lidar.lidar import LiDARType
-all_camera_types: List[CameraType] = [
- CameraType.CAM_F0,
- CameraType.CAM_B0,
- CameraType.CAM_L0,
- CameraType.CAM_L1,
- CameraType.CAM_L2,
- CameraType.CAM_R0,
- CameraType.CAM_R1,
- CameraType.CAM_R2,
+all_camera_types: List[PinholeCameraType] = [
+ PinholeCameraType.CAM_F0,
+ PinholeCameraType.CAM_B0,
+ PinholeCameraType.CAM_L0,
+ PinholeCameraType.CAM_L1,
+ PinholeCameraType.CAM_L2,
+ PinholeCameraType.CAM_R0,
+ PinholeCameraType.CAM_R1,
+ PinholeCameraType.CAM_R2,
]
all_lidar_types: List[LiDARType] = [
@@ -44,7 +44,7 @@ class ViserConfig:
# Map
map_visible: bool = True
- map_radius: float = 1000.0 # [m]
+ map_radius: float = 200.0 # [m]
map_non_road_z_offset: float = 0.0 # small translation to place crosswalks, parking, etc. on top of the road
# Bounding boxes
@@ -55,13 +55,13 @@ class ViserConfig:
# Cameras
# -> Frustum
camera_frustum_visible: bool = True
- camera_frustum_types: List[CameraType] = field(default_factory=lambda: all_camera_types.copy())
+ camera_frustum_types: List[PinholeCameraType] = field(default_factory=lambda: all_camera_types.copy())
camera_frustum_frustum_scale: float = 1.0
camera_frustum_image_scale: float = 0.25 # Resize factor for the camera image shown on the frustum (<1.0 for speed)
# -> GUI
camera_gui_visible: bool = True
- camera_gui_types: List[CameraType] = field(default_factory=lambda: [CameraType.CAM_F0].copy())
+ camera_gui_types: List[PinholeCameraType] = field(default_factory=lambda: [PinholeCameraType.CAM_F0].copy())
camera_gui_image_scale: float = 0.25 # Resize factor for the camera image shown in the GUI (<1.0 for speed)
# LiDAR
diff --git a/d123/common/visualization/viser/viser_viewer.py b/d123/common/visualization/viser/viser_viewer.py
index 9774918f..2bc44c3d 100644
--- a/d123/common/visualization/viser/viser_viewer.py
+++ b/d123/common/visualization/viser/viser_viewer.py
@@ -14,22 +14,22 @@
)
from d123.common.visualization.viser.viser_config import ViserConfig
from d123.datatypes.scene.abstract_scene import AbstractScene
-from d123.datatypes.sensors.camera import CameraType
-from d123.datatypes.sensors.lidar import LiDARType
+from d123.datatypes.sensors.camera.pinhole_camera import PinholeCameraType
+from d123.datatypes.sensors.lidar.lidar import LiDARType
from d123.datatypes.vehicle_state.ego_state import EgoStateSE3
logger = logging.getLogger(__name__)
-all_camera_types: List[CameraType] = [
- CameraType.CAM_F0,
- CameraType.CAM_B0,
- CameraType.CAM_L0,
- CameraType.CAM_L1,
- CameraType.CAM_L2,
- CameraType.CAM_R0,
- CameraType.CAM_R1,
- CameraType.CAM_R2,
+all_camera_types: List[PinholeCameraType] = [
+ PinholeCameraType.CAM_F0,
+ PinholeCameraType.CAM_B0,
+ PinholeCameraType.CAM_L0,
+ PinholeCameraType.CAM_L1,
+ PinholeCameraType.CAM_L2,
+ PinholeCameraType.CAM_R0,
+ PinholeCameraType.CAM_R1,
+ PinholeCameraType.CAM_R2,
]
all_lidar_types: List[LiDARType] = [
@@ -200,8 +200,8 @@ def _(_) -> None:
if sleep_time > 0:
time.sleep(max(sleep_time, 0.0))
- camera_frustum_handles: Dict[CameraType, viser.CameraFrustumHandle] = {}
- camera_gui_handles: Dict[CameraType, viser.GuiImageHandle] = {}
+ camera_frustum_handles: Dict[PinholeCameraType, viser.CameraFrustumHandle] = {}
+ camera_gui_handles: Dict[PinholeCameraType, viser.GuiImageHandle] = {}
lidar_pc_handle: Optional[viser.PointCloudHandle] = None
add_box_detections_to_viser_server(
diff --git a/d123/datasets/av2/av2_constants.py b/d123/datasets/av2/av2_constants.py
index 5d163245..16dab0b3 100644
--- a/d123/datasets/av2/av2_constants.py
+++ b/d123/datasets/av2/av2_constants.py
@@ -1,7 +1,7 @@
from d123.common.utils.enums import SerialIntEnum
from d123.datatypes.detections.detection_types import DetectionType
from d123.datatypes.maps.map_datatypes import RoadLineType
-from d123.datatypes.sensors.camera import CameraType
+from d123.datatypes.sensors.camera.pinhole_camera import PinholeCameraType
class AV2SensorBoxDetectionType(SerialIntEnum):
@@ -76,15 +76,15 @@ class AV2SensorBoxDetectionType(SerialIntEnum):
AV2_CAMERA_TYPE_MAPPING = {
- "ring_front_center": CameraType.CAM_F0,
- "ring_front_left": CameraType.CAM_L0,
- "ring_front_right": CameraType.CAM_R0,
- "ring_side_left": CameraType.CAM_L1,
- "ring_side_right": CameraType.CAM_R1,
- "ring_rear_left": CameraType.CAM_L2,
- "ring_rear_right": CameraType.CAM_R2,
- "stereo_front_left": CameraType.CAM_STEREO_L,
- "stereo_front_right": CameraType.CAM_STEREO_R,
+ "ring_front_center": PinholeCameraType.CAM_F0,
+ "ring_front_left": PinholeCameraType.CAM_L0,
+ "ring_front_right": PinholeCameraType.CAM_R0,
+ "ring_side_left": PinholeCameraType.CAM_L1,
+ "ring_side_right": PinholeCameraType.CAM_R1,
+ "ring_rear_left": PinholeCameraType.CAM_L2,
+ "ring_rear_right": PinholeCameraType.CAM_R2,
+ "stereo_front_left": PinholeCameraType.CAM_STEREO_L,
+ "stereo_front_right": PinholeCameraType.CAM_STEREO_R,
}
diff --git a/d123/datasets/av2/av2_data_converter.py b/d123/datasets/av2/av2_data_converter.py
index e13bbf04..638eb205 100644
--- a/d123/datasets/av2/av2_data_converter.py
+++ b/d123/datasets/av2/av2_data_converter.py
@@ -25,8 +25,14 @@
from d123.datasets.av2.av2_map_conversion import convert_av2_map
from d123.datasets.raw_data_converter import DataConverterConfig, RawDataConverter
from d123.datatypes.scene.scene_metadata import LogMetadata
-from d123.datatypes.sensors.camera import CameraMetadata, CameraType, camera_metadata_dict_to_json
-from d123.datatypes.sensors.lidar import LiDARMetadata, LiDARType, lidar_metadata_dict_to_json
+from d123.datatypes.sensors.camera.pinhole_camera import (
+ PinholeCameraMetadata,
+ PinholeCameraType,
+ PinholeDistortion,
+ PinholeIntrinsics,
+ camera_metadata_dict_to_json,
+)
+from d123.datatypes.sensors.lidar.lidar import LiDARMetadata, LiDARType, lidar_metadata_dict_to_json
from d123.datatypes.time.time_point import TimePoint
from d123.datatypes.vehicle_state.ego_state import DynamicStateSE3, EgoStateSE3, EgoStateSE3Index
from d123.datatypes.vehicle_state.vehicle_parameters import (
@@ -232,24 +238,32 @@ def convert_av2_log_to_arrow(
return []
-def get_av2_camera_metadata(log_path: Path) -> Dict[CameraType, CameraMetadata]:
+def get_av2_camera_metadata(log_path: Path) -> Dict[PinholeCameraType, PinholeCameraMetadata]:
intrinsics_file = log_path / "calibration" / "intrinsics.feather"
intrinsics_df = pd.read_feather(intrinsics_file)
- camera_metadata: Dict[CameraType, CameraMetadata] = {}
+ camera_metadata: Dict[PinholeCameraType, PinholeCameraMetadata] = {}
for _, row in intrinsics_df.iterrows():
row = row.to_dict()
-
camera_type = AV2_CAMERA_TYPE_MAPPING[row["sensor_name"]]
- camera_metadata[camera_type] = CameraMetadata(
+ camera_metadata[camera_type] = PinholeCameraMetadata(
camera_type=camera_type,
width=row["width_px"],
height=row["height_px"],
- intrinsic=np.array(
- [[row["fx_px"], 0, row["cx_px"]], [0, row["fy_px"], row["cy_px"]], [0, 0, 1]], dtype=np.float64
+ intrinsics=PinholeIntrinsics(
+ fx=row["fx_px"],
+ fy=row["fy_px"],
+ cx=row["cx_px"],
+ cy=row["cy_px"],
+ ),
+ distortion=PinholeDistortion(
+ k1=row["k1"],
+ k2=row["k2"],
+ p1=0.0,
+ p2=0.0,
+ k3=row["k3"],
),
- distortion=np.array([row["k1"], row["k2"], row["k3"], 0, 0], dtype=np.float64),
)
return camera_metadata
@@ -456,9 +470,9 @@ def _extract_camera(
synchronization_df: pd.DataFrame,
source_log_path: Path,
data_converter_config: DataConverterConfig,
-) -> Dict[CameraType, Union[str, bytes]]:
+) -> Dict[PinholeCameraType, Union[str, bytes]]:
- camera_dict: Dict[CameraType, Union[str, bytes]] = {
+ camera_dict: Dict[PinholeCameraType, Union[str, bytes]] = {
camera_type: None for camera_type in AV2_CAMERA_TYPE_MAPPING.values()
}
split = source_log_path.parent.name
@@ -491,30 +505,27 @@ def _extract_camera(
else:
absolute_image_path = source_dataset_dir / relative_image_path
assert absolute_image_path.exists()
- # TODO: Adjust for finer IMU timestamps to correct the camera extrinsic.
+ # TODO: Adjust for finer IMU timestamps to correct the camera extrinsic.
camera_extrinsic = StateSE3(
- x=row["tx_m"], y=row["ty_m"], z=row["tz_m"], qw=row["qw"], qx=row["qx"], qy=row["qy"], qz=row["qz"]
+ x=row["tx_m"],
+ y=row["ty_m"],
+ z=row["tz_m"],
+ qw=row["qw"],
+ qx=row["qx"],
+ qy=row["qy"],
+ qz=row["qz"],
)
- # camera_extrinsic = camera_extrinsic @ ego_transform
- camera_extrinsic = camera_extrinsic.transformation_matrix.flatten().tolist()
-
if data_converter_config.camera_store_option == "path":
- camera_dict[camera_type] = (str(relative_image_path), camera_extrinsic)
+ camera_dict[camera_type] = (str(relative_image_path), camera_extrinsic.tolist())
elif data_converter_config.camera_store_option == "binary":
with open(absolute_image_path, "rb") as f:
- camera_dict[camera_type] = (f.read(), camera_extrinsic)
+ camera_dict[camera_type] = (f.read(), camera_extrinsic.tolist())
return camera_dict
def _extract_lidar(lidar_pc, data_converter_config: DataConverterConfig) -> Dict[LiDARType, Optional[str]]:
-
- # lidar: Optional[str] = None
- # lidar_full_path = NUPLAN_DATA_ROOT / "nuplan-v1.1" / "sensor_blobs" / lidar_pc.filename
- # if lidar_full_path.exists():
- # lidar = lidar_pc.filename
-
- # return {LiDARType.LIDAR_MERGED: lidar}
+ # TODO: Implement this function to extract lidar data.
return {}
diff --git a/d123/datasets/carla/carla_data_converter.py b/d123/datasets/carla/carla_data_converter.py
index ec3a4824..1930bb69 100644
--- a/d123/datasets/carla/carla_data_converter.py
+++ b/d123/datasets/carla/carla_data_converter.py
@@ -15,13 +15,17 @@
from d123.common.utils.arrow_helper import open_arrow_table, write_arrow_table
from d123.datasets.raw_data_converter import DataConverterConfig, RawDataConverter
from d123.datasets.utils.maps.opendrive.opendrive_map_conversion import convert_from_xodr
+from d123.datasets.utils.sensor.lidar_index_registry import CarlaLidarIndex
from d123.datatypes.maps.abstract_map import AbstractMap, MapLayer
from d123.datatypes.maps.abstract_map_objects import AbstractLane
from d123.datatypes.maps.gpkg.gpkg_map import get_map_api_from_names
from d123.datatypes.scene.scene_metadata import LogMetadata
-from d123.datatypes.sensors.camera import CameraMetadata, CameraType, camera_metadata_dict_to_json
-from d123.datatypes.sensors.lidar import LiDARMetadata, LiDARType, lidar_metadata_dict_to_json
-from d123.datatypes.sensors.lidar_index import CarlaLidarIndex
+from d123.datatypes.sensors.camera.pinhole_camera import (
+ PinholeCameraMetadata,
+ PinholeCameraType,
+ camera_metadata_dict_to_json,
+)
+from d123.datatypes.sensors.lidar.lidar import LiDARMetadata, LiDARType, lidar_metadata_dict_to_json
from d123.datatypes.vehicle_state.ego_state import EgoStateSE3Index
from d123.datatypes.vehicle_state.vehicle_parameters import get_carla_lincoln_mkz_2020_parameters
from d123.geometry import BoundingBoxSE3Index, Point2D, Point3D, Vector3DIndex
@@ -45,7 +49,7 @@
TRAFFIC_LIGHT_ASSIGNMENT_DISTANCE: Final[float] = 1.0 # [m]
SORT_BY_TIMESTAMP: Final[bool] = True
-CARLA_CAMERA_TYPES = {CameraType.CAM_F0}
+CARLA_CAMERA_TYPES = {PinholeCameraType.CAM_F0}
CARLA_DATA_ROOT: Final[Path] = Path(os.environ["CARLA_DATA_ROOT"])
@@ -247,20 +251,20 @@ def _get_metadata(location: str, log_name: str) -> LogMetadata:
)
-def get_carla_camera_metadata(first_log_dict: Dict[str, Any]) -> Dict[CameraType, CameraMetadata]:
+def get_carla_camera_metadata(first_log_dict: Dict[str, Any]) -> Dict[PinholeCameraType, PinholeCameraMetadata]:
# FIXME: This is a placeholder function to return camera metadata.
intrinsic = np.array(
- first_log_dict[f"{CameraType.CAM_F0.serialize()}_intrinsics"],
+ first_log_dict[f"{PinholeCameraType.CAM_F0.serialize()}_intrinsics"],
dtype=np.float64,
)
camera_metadata = {
- CameraType.CAM_F0: CameraMetadata(
- camera_type=CameraType.CAM_F0,
+ PinholeCameraType.CAM_F0: PinholeCameraMetadata(
+ camera_type=PinholeCameraType.CAM_F0,
width=1024,
height=512,
- intrinsic=intrinsic,
+ intrinsics=intrinsic,
distortion=np.zeros((5,), dtype=np.float64),
)
}
@@ -417,7 +421,7 @@ def _extract_route_lane_group_ids(route: List[List[float]], map_api: AbstractMap
def _extract_cameras(
data: Dict[str, Any], log_name: str, sample_name: str, data_converter_config: DataConverterConfig
-) -> Dict[CameraType, Optional[str]]:
+) -> Dict[PinholeCameraType, Optional[str]]:
camera_dict: Dict[str, Union[str, bytes]] = {}
for camera_type in CARLA_CAMERA_TYPES:
camera_full_path = CARLA_DATA_ROOT / "sensor_blobs" / log_name / camera_type.name / f"{sample_name}.jpg"
diff --git a/d123/datasets/nuplan/load_sensor.py b/d123/datasets/nuplan/load_sensor.py
index c00e4f31..6e80df47 100644
--- a/d123/datasets/nuplan/load_sensor.py
+++ b/d123/datasets/nuplan/load_sensor.py
@@ -2,7 +2,7 @@
from pathlib import Path
from d123.common.utils.dependencies import check_dependencies
-from d123.datatypes.sensors.lidar import LiDAR, LiDARMetadata
+from d123.datatypes.sensors.lidar.lidar import LiDAR, LiDARMetadata
check_dependencies(["nuplan"], "nuplan")
from nuplan.database.utils.pointclouds.lidar import LidarPointCloud
diff --git a/d123/datasets/nuplan/nuplan_data_converter.py b/d123/datasets/nuplan/nuplan_data_converter.py
index d03dbaf9..79f824e3 100644
--- a/d123/datasets/nuplan/nuplan_data_converter.py
+++ b/d123/datasets/nuplan/nuplan_data_converter.py
@@ -10,7 +10,6 @@
import numpy as np
import pyarrow as pa
import yaml
-from pyquaternion import Quaternion
import d123.datasets.nuplan.utils as nuplan_utils
from d123.common.multithreading.worker_utils import WorkerPool, worker_map
@@ -18,12 +17,18 @@
from d123.common.utils.dependencies import check_dependencies
from d123.datasets.nuplan.nuplan_map_conversion import MAP_LOCATIONS, NuPlanMapConverter
from d123.datasets.raw_data_converter import DataConverterConfig, RawDataConverter
+from d123.datasets.utils.sensor.lidar_index_registry import NuplanLidarIndex
from d123.datatypes.detections.detection import TrafficLightStatus
from d123.datatypes.detections.detection_types import DetectionType
from d123.datatypes.scene.scene_metadata import LogMetadata
-from d123.datatypes.sensors.camera import CameraMetadata, CameraType, camera_metadata_dict_to_json
-from d123.datatypes.sensors.lidar import LiDARMetadata, LiDARType, lidar_metadata_dict_to_json
-from d123.datatypes.sensors.lidar_index import NuplanLidarIndex
+from d123.datatypes.sensors.camera.pinhole_camera import (
+ PinholeCameraMetadata,
+ PinholeCameraType,
+ PinholeDistortion,
+ PinholeIntrinsics,
+ camera_metadata_dict_to_json,
+)
+from d123.datatypes.sensors.lidar.lidar import LiDARMetadata, LiDARType, lidar_metadata_dict_to_json
from d123.datatypes.time.time_point import TimePoint
from d123.datatypes.vehicle_state.ego_state import DynamicStateSE3, EgoStateSE3, EgoStateSE3Index
from d123.datatypes.vehicle_state.vehicle_parameters import (
@@ -31,6 +36,7 @@
rear_axle_se3_to_center_se3,
)
from d123.geometry import BoundingBoxSE3, BoundingBoxSE3Index, StateSE3, Vector3D, Vector3DIndex
+from d123.geometry.geometry_index import StateSE3Index
from d123.geometry.rotation import EulerAngles
from d123.geometry.utils.constants import DEFAULT_PITCH, DEFAULT_ROLL
@@ -63,14 +69,14 @@
}
NUPLAN_CAMERA_TYPES = {
- CameraType.CAM_F0: CameraChannel.CAM_F0,
- CameraType.CAM_B0: CameraChannel.CAM_B0,
- CameraType.CAM_L0: CameraChannel.CAM_L0,
- CameraType.CAM_L1: CameraChannel.CAM_L1,
- CameraType.CAM_L2: CameraChannel.CAM_L2,
- CameraType.CAM_R0: CameraChannel.CAM_R0,
- CameraType.CAM_R1: CameraChannel.CAM_R1,
- CameraType.CAM_R2: CameraChannel.CAM_R2,
+ PinholeCameraType.CAM_F0: CameraChannel.CAM_F0,
+ PinholeCameraType.CAM_B0: CameraChannel.CAM_B0,
+ PinholeCameraType.CAM_L0: CameraChannel.CAM_L0,
+ PinholeCameraType.CAM_L1: CameraChannel.CAM_L1,
+ PinholeCameraType.CAM_L2: CameraChannel.CAM_L2,
+ PinholeCameraType.CAM_R0: CameraChannel.CAM_R0,
+ PinholeCameraType.CAM_R1: CameraChannel.CAM_R1,
+ PinholeCameraType.CAM_R2: CameraChannel.CAM_R2,
}
NUPLAN_DATA_ROOT = Path(os.environ["NUPLAN_DATA_ROOT"])
@@ -106,7 +112,7 @@ def __init__(
def _collect_log_paths(self) -> Dict[str, List[Path]]:
# NOTE: the nuplan mini folder has an internal train, val, test structure, all stored in "mini".
# The complete dataset is saved in the "trainval" folder (train and val), or in the "test" folder (for test).
- subsplit_log_names: Dict[str, List[str]] = create_splits_logs()
+ # subsplit_log_names: Dict[str, List[str]] = create_splits_logs()
log_paths_per_split: Dict[str, List[Path]] = {}
for split in self._splits:
@@ -123,7 +129,7 @@ def _collect_log_paths(self) -> Dict[str, List[Path]]:
all_log_files_in_path = [log_file for log_file in log_path.glob("*.db")]
all_log_names = set([str(log_file.stem) for log_file in all_log_files_in_path])
- set(subsplit_log_names[subsplit])
+ # set(subsplit_log_names[subsplit])
# log_paths = [log_path / f"{log_name}.db" for log_name in list(all_log_names & split_log_names)]
log_paths = [log_path / f"{log_name}.db" for log_name in list(all_log_names)]
log_paths_per_split[split] = log_paths
@@ -138,7 +144,7 @@ def get_available_splits(self) -> List[str]:
"nuplan_mini_train",
"nuplan_mini_val",
"nuplan_mini_test",
- "nuplan_private_test",
+ "nuplan_private_test", # TODO: remove, not publicly available
]
def convert_maps(self, worker: WorkerPool) -> None:
@@ -204,7 +210,7 @@ def convert_nuplan_log_to_arrow(
)
vehicle_parameters = get_nuplan_chrysler_pacifica_parameters()
camera_metadata = get_nuplan_camera_metadata(log_path)
- lidar_metadata = get_nuplan_lidar_metadata(log_db)
+ lidar_metadata = get_nuplan_lidar_metadata()
schema_column_list = [
("token", pa.string()),
@@ -231,7 +237,7 @@ def convert_nuplan_log_to_arrow(
if data_converter_config.camera_store_option == "path":
schema_column_list.append((camera_type.serialize(), pa.string()))
schema_column_list.append(
- (f"{camera_type.serialize()}_extrinsic", pa.list_(pa.float64(), 4 * 4))
+ (f"{camera_type.serialize()}_extrinsic", pa.list_(pa.float64(), len(StateSE3Index)))
)
elif data_converter_config.camera_store_option == "binary":
@@ -256,30 +262,33 @@ def convert_nuplan_log_to_arrow(
return []
-def get_nuplan_camera_metadata(log_path: Path) -> Dict[CameraType, CameraMetadata]:
+def get_nuplan_camera_metadata(log_path: Path) -> Dict[PinholeCameraType, PinholeCameraMetadata]:
- def _get_camera_metadata(camera_type: CameraType) -> CameraMetadata:
+ def _get_camera_metadata(camera_type: PinholeCameraType) -> PinholeCameraMetadata:
cam = list(get_cameras(log_path, [str(NUPLAN_CAMERA_TYPES[camera_type].value)]))[0]
- intrinsic = np.array(pickle.loads(cam.intrinsic))
- rotation = np.array(pickle.loads(cam.rotation))
- rotation = Quaternion(rotation).rotation_matrix
- distortion = np.array(pickle.loads(cam.distortion))
- return CameraMetadata(
+
+ intrinsics_camera_matrix = np.array(pickle.loads(cam.intrinsic)) # array of shape (3, 3)
+ intrinsic = PinholeIntrinsics.from_camera_matrix(intrinsics_camera_matrix)
+
+ distortion_array = np.array(pickle.loads(cam.distortion)) # array of shape (5,)
+ distortion = PinholeDistortion.from_array(distortion_array, copy=False)
+
+ return PinholeCameraMetadata(
camera_type=camera_type,
width=cam.width,
height=cam.height,
- intrinsic=intrinsic,
+ intrinsics=intrinsic,
distortion=distortion,
)
- log_cam_infos: Dict[str, CameraMetadata] = {}
+ log_cam_infos: Dict[str, PinholeCameraMetadata] = {}
for camera_type in NUPLAN_CAMERA_TYPES.keys():
log_cam_infos[camera_type] = _get_camera_metadata(camera_type)
return log_cam_infos
-def get_nuplan_lidar_metadata(log_db: NuPlanDB) -> Dict[LiDARType, LiDARMetadata]:
+def get_nuplan_lidar_metadata() -> Dict[LiDARType, LiDARMetadata]:
metadata: Dict[LiDARType, LiDARMetadata] = {}
metadata[LiDARType.LIDAR_MERGED] = LiDARMetadata(
lidar_type=LiDARType.LIDAR_MERGED,
@@ -297,7 +306,6 @@ def _write_recording_table(
data_converter_config: DataConverterConfig,
) -> None:
- # with pa.ipc.new_stream(str(log_file_path), recording_schema) as writer:
with pa.OSFile(str(log_file_path), "wb") as sink:
with pa.ipc.new_file(sink, recording_schema) as writer:
step_interval: float = int(TARGET_DT / NUPLAN_DT)
@@ -448,7 +456,7 @@ def _extract_camera(
lidar_pc: LidarPc,
source_log_path: Path,
data_converter_config: DataConverterConfig,
-) -> Dict[CameraType, Union[str, bytes]]:
+) -> Dict[PinholeCameraType, Union[str, bytes]]:
camera_dict: Dict[str, Union[str, bytes]] = {}
sensor_root = NUPLAN_DATA_ROOT / "nuplan-v1.1" / "sensor_blobs"
@@ -477,11 +485,13 @@ def _extract_camera(
c2img_e = cam_info.trans_matrix
c2e = img_e2e @ c2img_e
+ extrinsic = StateSE3.from_transformation_matrix(c2e)
+
if data_converter_config.camera_store_option == "path":
- camera_data = str(filename_jpg), c2e.flatten().tolist()
+ camera_data = str(filename_jpg), extrinsic.tolist()
elif data_converter_config.camera_store_option == "binary":
with open(filename_jpg, "rb") as f:
- camera_data = f.read(), c2e
+ camera_data = f.read(), extrinsic.tolist()
camera_dict[camera_type] = camera_data
diff --git a/d123/datasets/utils/sensor/camera_conventions.py b/d123/datasets/utils/sensor/camera_conventions.py
new file mode 100644
index 00000000..184eb9ef
--- /dev/null
+++ b/d123/datasets/utils/sensor/camera_conventions.py
@@ -0,0 +1,85 @@
+"""
+Default Camera Coordinate System in 123D:
+
+ -Y (up) /| H
+ | / | e
+ | / | i
+ | / | g
+ | / | h
+ | | | t
+ O────────────|──●──|──────────── +Z (forward), aka. optical/principal axis
+ / | / h
+ / | / t
+ / | / d
+ / | / i
++X (right) |/ W
+
+We use COLMAP/OpenCV convention (+Z forward, -Y up, +X right),
+ abbreviated as "pZmYpX" for the forward-up-right axes.
+
+Other common conventions include, for forward-up-right axes.
+ - (+X forward, +Z up, -Y right), "pXpZmY", e.g. Waymo Open Dataset
+
+NOTE: This file should be extended if other conventions are needed in the future.
+"""
+
+from enum import Enum
+from typing import Union
+
+import numpy as np
+
+from d123.geometry import StateSE3
+
+
+class CameraConvention(Enum):
+ """Camera coordinate system conventions
+ p/m: positive/negative
+ X/Y/Z: axes directions
+ order: forward, up, right
+
+ Example: pZmYpX means +Z forward, -Y up, +X right
+ """
+
+ pZmYpX = "pZmYpX" # Default in 123D (OpenCV/COLMAP)
+ pXpZmY = "pXpZmY" # e.g. Waymo Open Dataset
+
+
+def convert_camera_convention(
+ from_pose: StateSE3,
+ from_convention: Union[CameraConvention, str],
+ to_convention: Union[CameraConvention, str],
+) -> StateSE3:
+ """Convert camera pose between different conventions.
+ 123D default is pZmYpX (+Z forward, -Y up, +X right).
+
+ :param from_pose: StateSE3 representing the camera pose to convert
+ :param from_convention: CameraConvention representing the current convention of the pose
+ :param to_convention: CameraConvention representing the target convention to convert to
+ :return: StateSE3 representing the converted camera pose
+ """
+ # TODO: Write tests for this function
+ # TODO: Create function over batch/array of poses
+
+ if isinstance(from_convention, str):
+ from_convention = CameraConvention(from_convention)
+ if isinstance(to_convention, str):
+ to_convention = CameraConvention(to_convention)
+
+ if from_convention == to_convention:
+ return from_pose
+
+ flip_matrices = {
+ (CameraConvention.pXpZmY, CameraConvention.pZmYpX): np.array(
+ [
+ [0.0, -1.0, 0.0], # new X = old -Y (right)
+ [0.0, 0.0, -1.0], # new Y = old -Z (down)
+ [1.0, 0.0, 0.0], # new Z = old X (forward)
+ ],
+ dtype=np.float64,
+ ).T,
+ }
+
+ pose_transformation = from_pose.transformation_matrix.copy()
+ F = flip_matrices[(from_convention, to_convention)]
+ pose_transformation[:3, :3] = pose_transformation[:3, :3] @ F
+ return StateSE3.from_transformation_matrix(pose_transformation)
diff --git a/d123/datatypes/sensors/lidar_index.py b/d123/datasets/utils/sensor/lidar_index_registry.py
similarity index 100%
rename from d123/datatypes/sensors/lidar_index.py
rename to d123/datasets/utils/sensor/lidar_index_registry.py
diff --git a/d123/datasets/wopd/wopd_data_converter.py b/d123/datasets/wopd/wopd_data_converter.py
index 95649218..27bc4c05 100644
--- a/d123/datasets/wopd/wopd_data_converter.py
+++ b/d123/datasets/wopd/wopd_data_converter.py
@@ -10,20 +10,29 @@
import numpy as np
import numpy.typing as npt
import pyarrow as pa
-from fromd123.datatypes.detections.detection_types import DetectionType
+from pyparsing import Optional
from d123.common.multithreading.worker_utils import WorkerPool, worker_map
from d123.common.utils.dependencies import check_dependencies
from d123.datasets.raw_data_converter import DataConverterConfig, RawDataConverter
+from d123.datasets.utils.sensor.camera_conventions import CameraConvention, convert_camera_convention
+from d123.datasets.utils.sensor.lidar_index_registry import WopdLidarIndex
from d123.datasets.wopd.waymo_map_utils.wopd_map_utils import convert_wopd_map
from d123.datasets.wopd.wopd_utils import parse_range_image_and_camera_projection
+from d123.datatypes.detections.detection_types import DetectionType
from d123.datatypes.scene.scene_metadata import LogMetadata
-from d123.datatypes.sensors.camera import CameraMetadata, CameraType, camera_metadata_dict_to_json
-from d123.datatypes.sensors.lidar import LiDARMetadata, LiDARType, lidar_metadata_dict_to_json
-from d123.datatypes.sensors.lidar_index import WopdLidarIndex
+from d123.datatypes.sensors.camera.pinhole_camera import (
+ PinholeCameraMetadata,
+ PinholeCameraType,
+ PinholeDistortion,
+ PinholeIntrinsics,
+ camera_metadata_dict_to_json,
+)
+from d123.datatypes.sensors.lidar.lidar import LiDARMetadata, LiDARType, lidar_metadata_dict_to_json
from d123.datatypes.vehicle_state.ego_state import DynamicStateSE3, EgoStateSE3, EgoStateSE3Index
from d123.datatypes.vehicle_state.vehicle_parameters import get_wopd_chrysler_pacifica_parameters
from d123.geometry import BoundingBoxSE3Index, EulerAngles, StateSE3, Vector3D, Vector3DIndex
+from d123.geometry.geometry_index import StateSE3Index
from d123.geometry.transform.transform_se3 import convert_relative_to_absolute_se3_array
from d123.geometry.utils.constants import DEFAULT_PITCH, DEFAULT_ROLL
@@ -52,12 +61,12 @@
}
# https://github.com/waymo-research/waymo-open-dataset/blob/master/src/waymo_open_dataset/dataset.proto#L50
-WOPD_CAMERA_TYPES: Dict[int, CameraType] = {
- 1: CameraType.CAM_F0, # front_camera
- 2: CameraType.CAM_L0, # front_left_camera
- 3: CameraType.CAM_R0, # front_right_camera
- 4: CameraType.CAM_L1, # left_camera
- 5: CameraType.CAM_R1, # right_camera
+WOPD_CAMERA_TYPES: Dict[int, PinholeCameraType] = {
+ 1: PinholeCameraType.CAM_F0, # front_camera
+ 2: PinholeCameraType.CAM_L0, # front_left_camera
+ 3: PinholeCameraType.CAM_R0, # front_right_camera
+ 4: PinholeCameraType.CAM_L1, # left_camera
+ 5: PinholeCameraType.CAM_R1, # right_camera
}
# https://github.com/waymo-research/waymo-open-dataset/blob/master/src/waymo_open_dataset/dataset.proto#L66
@@ -239,7 +248,7 @@ def convert_wopd_tfrecord_log_to_arrow(
elif data_converter_config.camera_store_option == "binary":
schema_column_list.append((camera_type.serialize(), pa.binary()))
schema_column_list.append(
- (f"{camera_type.serialize()}_extrinsic", pa.list_(pa.float64(), 4 * 4))
+ (f"{camera_type.serialize()}_extrinsic", pa.list_(pa.float64(), len(StateSE3Index)))
)
if data_converter_config.lidar_store_option is not None:
@@ -273,26 +282,24 @@ def convert_wopd_tfrecord_log_to_arrow(
def get_wopd_camera_metadata(
initial_frame: dataset_pb2.Frame, data_converter_config: DataConverterConfig
-) -> Dict[CameraType, CameraMetadata]:
+) -> Dict[PinholeCameraType, PinholeCameraMetadata]:
- cam_metadatas: Dict[CameraType, CameraMetadata] = {}
+ cam_metadatas: Dict[PinholeCameraType, PinholeCameraMetadata] = {}
if data_converter_config.camera_store_option is not None:
for calibration in initial_frame.context.camera_calibrations:
camera_type = WOPD_CAMERA_TYPES[calibration.name]
-
# https://github.com/waymo-research/waymo-open-dataset/blob/master/src/waymo_open_dataset/dataset.proto#L96
# https://github.com/waymo-research/waymo-open-dataset/issues/834#issuecomment-2134995440
fx, fy, cx, cy, k1, k2, p1, p2, k3 = calibration.intrinsic
- _intrinsics = np.array([[fx, 0, cx], [0, fy, cy], [0, 0, 1]])
- _distortions = np.array([k1, k2, p1, p2, k3])
-
+ intrinsics = PinholeIntrinsics(fx=fx, fy=fy, cx=cx, cy=cy)
+ distortion = PinholeDistortion(k1=k1, k2=k2, p1=p1, p2=p2, k3=k3)
if camera_type in WOPD_CAMERA_TYPES.values():
- cam_metadatas[camera_type] = CameraMetadata(
+ cam_metadatas[camera_type] = PinholeCameraMetadata(
camera_type=camera_type,
width=calibration.width,
height=calibration.height,
- intrinsic=_intrinsics,
- distortion=_distortions,
+ intrinsics=intrinsics,
+ distortion=distortion,
)
return cam_metadatas
@@ -305,12 +312,14 @@ def get_wopd_lidar_metadata(
laser_metadatas: Dict[LiDARType, LiDARMetadata] = {}
if data_converter_config.lidar_store_option is not None:
for laser_calibration in initial_frame.context.laser_calibrations:
+
lidar_type = WOPD_LIDAR_TYPES[laser_calibration.name]
- extrinsic = (
- np.array(laser_calibration.extrinsic.transform, dtype=np.float64).reshape(4, 4)
- if laser_calibration.extrinsic
- else None
- )
+
+ extrinsic: Optional[StateSE3] = None
+ if laser_calibration.extrinsic:
+ extrinsic_transform = np.array(laser_calibration.extrinsic.transform, dtype=np.float64).reshape(4, 4)
+ extrinsic = StateSE3.from_transformation_matrix(extrinsic_transform)
+
laser_metadatas[lidar_type] = LiDARMetadata(
lidar_type=lidar_type,
lidar_index=WopdLidarIndex,
@@ -328,7 +337,6 @@ def _write_recording_table(
data_converter_config: DataConverterConfig,
) -> None:
- # with pa.ipc.new_stream(str(log_file_path), recording_schema) as writer:
with pa.OSFile(str(log_file_path), "wb") as sink:
with pa.ipc.new_file(sink, recording_schema) as writer:
@@ -464,40 +472,32 @@ def _extract_traffic_lights() -> Tuple[List[int], List[int]]:
def _extract_camera(
frame: dataset_pb2.Frame, data_converter_config: DataConverterConfig
-) -> Dict[CameraType, Union[str, bytes]]:
+) -> Dict[PinholeCameraType, Union[str, bytes]]:
camera_dict: Dict[str, Union[str, bytes]] = {} # TODO: Fix wrong type hint
np.array(frame.pose.transform).reshape(4, 4)
# NOTE: The extrinsic matrix in frame.context.camera_calibration is fixed to model the ego to camera transformation.
# The poses in frame.images[idx] are the motion compensated ego poses when the camera triggers.
- #
- context_extrinsic: Dict[str, npt.NDArray] = {}
+ context_extrinsic: Dict[str, StateSE3] = {}
for calibration in frame.context.camera_calibrations:
camera_type = WOPD_CAMERA_TYPES[calibration.name]
+ camera_transform = np.array(calibration.extrinsic.transform, dtype=np.float64).reshape(4, 4)
+ camera_pose = StateSE3.from_transformation_matrix(camera_transform)
+ # NOTE: WOPD uses a different camera convention than d123
+ # https://arxiv.org/pdf/1912.04838 (Figure 1.)
+ camera_pose = convert_camera_convention(
+ camera_pose,
+ from_convention=CameraConvention.pXpZmY,
+ to_convention=CameraConvention.pZmYpX,
+ )
+ context_extrinsic[camera_type] = camera_pose
- transform = np.array(calibration.extrinsic.transform).reshape(4, 4)
-
- # FIXME: This is an ugly hack to convert to uniform camera convention.
- # TODO: Extract function to convert between different camera conventions.
- flip_camera = EulerAngles(roll=np.deg2rad(0.0), pitch=np.deg2rad(90.0), yaw=np.deg2rad(-90.0)).rotation_matrix
- transform[:3, :3] = transform[:3, :3] @ flip_camera
- context_extrinsic[camera_type] = transform
-
- # TODO: Refactor to avoid code duplication
for image_proto in frame.images:
camera_type = WOPD_CAMERA_TYPES[image_proto.name]
-
- np.array(image_proto.pose.transform).reshape(4, 4)
camera_bytes = image_proto.image
-
- # # Compute the transform from ego_global_transform to ego_at_camera_transform
- # # ego_global_transform * T = ego_at_camera_transform => T = ego_global_transform^-1 * ego_at_camera_transform
- # np.linalg.inv(ego_global_transform) @ ego_at_trigger_transform
-
- # TODO: figure out the correct transform
- camera_dict[camera_type] = camera_bytes, context_extrinsic[camera_type].flatten().tolist()
+ camera_dict[camera_type] = camera_bytes, context_extrinsic[camera_type].tolist()
return camera_dict
diff --git a/d123/datatypes/scene/abstract_scene.py b/d123/datatypes/scene/abstract_scene.py
index af66541e..7f6ca5e3 100644
--- a/d123/datatypes/scene/abstract_scene.py
+++ b/d123/datatypes/scene/abstract_scene.py
@@ -6,8 +6,8 @@
from d123.datatypes.detections.detection import BoxDetectionWrapper, DetectionRecording, TrafficLightDetectionWrapper
from d123.datatypes.maps.abstract_map import AbstractMap
from d123.datatypes.scene.scene_metadata import LogMetadata
-from d123.datatypes.sensors.camera import Camera, CameraType
-from d123.datatypes.sensors.lidar import LiDAR, LiDARType
+from d123.datatypes.sensors.camera.pinhole_camera import PinholeCamera, PinholeCameraType
+from d123.datatypes.sensors.lidar.lidar import LiDAR, LiDARType
from d123.datatypes.time.time_point import TimePoint
from d123.datatypes.vehicle_state.ego_state import EgoStateSE3
@@ -33,7 +33,7 @@ def log_metadata(self) -> LogMetadata:
@property
@abc.abstractmethod
- def available_camera_types(self) -> List[CameraType]:
+ def available_camera_types(self) -> List[PinholeCameraType]:
raise NotImplementedError
@property
@@ -79,7 +79,7 @@ def get_route_lane_group_ids(self, iteration: int) -> Optional[List[int]]:
raise NotImplementedError
@abc.abstractmethod
- def get_camera_at_iteration(self, iteration: int, camera_type: CameraType) -> Optional[Camera]:
+ def get_camera_at_iteration(self, iteration: int, camera_type: PinholeCameraType) -> Optional[PinholeCamera]:
raise NotImplementedError
@abc.abstractmethod
diff --git a/d123/datatypes/scene/arrow/arrow_scene.py b/d123/datatypes/scene/arrow/arrow_scene.py
index 8f61397a..3fcc2150 100644
--- a/d123/datatypes/scene/arrow/arrow_scene.py
+++ b/d123/datatypes/scene/arrow/arrow_scene.py
@@ -18,8 +18,13 @@
get_traffic_light_detections_from_arrow_table,
)
from d123.datatypes.scene.scene_metadata import LogMetadata, SceneExtractionInfo
-from d123.datatypes.sensors.camera import Camera, CameraMetadata, CameraType, camera_metadata_dict_from_json
-from d123.datatypes.sensors.lidar import LiDAR, LiDARMetadata, LiDARType, lidar_metadata_dict_from_json
+from d123.datatypes.sensors.camera.pinhole_camera import (
+ PinholeCamera,
+ PinholeCameraMetadata,
+ PinholeCameraType,
+ camera_metadata_dict_from_json,
+)
+from d123.datatypes.sensors.lidar.lidar import LiDAR, LiDARMetadata, LiDARType, lidar_metadata_dict_from_json
from d123.datatypes.time.time_point import TimePoint
from d123.datatypes.vehicle_state.ego_state import EgoStateSE3
from d123.datatypes.vehicle_state.vehicle_parameters import VehicleParameters
@@ -29,7 +34,7 @@
def _get_scene_data(
arrow_file_path: Union[Path, str],
-) -> Tuple[LogMetadata, VehicleParameters, Dict[CameraType, CameraMetadata]]:
+) -> Tuple[LogMetadata, VehicleParameters, Dict[PinholeCameraType, PinholeCameraMetadata]]:
"""
Extracts the metadata and vehicle parameters from the arrow file.
"""
@@ -60,7 +65,6 @@ def __init__(
) -> None:
self._recording_table: pa.Table = None
-
(
_metadata,
_vehicle_parameters,
@@ -69,7 +73,7 @@ def __init__(
) = _get_scene_data(arrow_file_path)
self._metadata: LogMetadata = _metadata
self._vehicle_parameters: VehicleParameters = _vehicle_parameters
- self._camera_metadata: Dict[CameraType, CameraMetadata] = _camera_metadata
+ self._camera_metadata: Dict[PinholeCameraType, PinholeCameraMetadata] = _camera_metadata
self._lidar_metadata: Dict[LiDARType, LiDARMetadata] = _lidar_metadata
self._map_api: Optional[AbstractMap] = None
@@ -105,7 +109,7 @@ def log_metadata(self) -> LogMetadata:
return self._metadata
@property
- def available_camera_types(self) -> List[CameraType]:
+ def available_camera_types(self) -> List[PinholeCameraType]:
return list(self._camera_metadata.keys())
@property
@@ -163,9 +167,9 @@ def get_route_lane_group_ids(self, iteration: int) -> Optional[List[int]]:
route_lane_group_ids = self._recording_table["route_lane_group_ids"][table_index].as_py()
return route_lane_group_ids
- def get_camera_at_iteration(self, iteration: int, camera_type: CameraType) -> Optional[Camera]:
+ def get_camera_at_iteration(self, iteration: int, camera_type: PinholeCameraType) -> Optional[PinholeCamera]:
self._lazy_initialize()
- camera: Optional[Camera] = None
+ camera: Optional[PinholeCamera] = None
if camera_type in self._camera_metadata:
table_index = self._get_table_index(iteration)
camera = get_camera_from_arrow_table(
diff --git a/d123/datatypes/scene/arrow/utils/conversion.py b/d123/datatypes/scene/arrow/utils/conversion.py
index 2b95f2fb..8d36e80b 100644
--- a/d123/datatypes/scene/arrow/utils/conversion.py
+++ b/d123/datatypes/scene/arrow/utils/conversion.py
@@ -20,17 +20,18 @@
)
from d123.datatypes.detections.detection_types import DetectionType
from d123.datatypes.scene.scene_metadata import LogMetadata
-from d123.datatypes.sensors.camera import Camera, CameraMetadata
-from d123.datatypes.sensors.lidar import LiDAR, LiDARMetadata
+from d123.datatypes.sensors.camera.pinhole_camera import PinholeCamera, PinholeCameraMetadata
+from d123.datatypes.sensors.lidar.lidar import LiDAR, LiDARMetadata
from d123.datatypes.time.time_point import TimePoint
from d123.datatypes.vehicle_state.ego_state import EgoStateSE3
from d123.datatypes.vehicle_state.vehicle_parameters import VehicleParameters
from d123.geometry import BoundingBoxSE3, Vector3D
+from d123.geometry.se import StateSE3
DATASET_SENSOR_ROOT: Dict[str, Path] = {
"nuplan": Path(os.environ["NUPLAN_DATA_ROOT"]) / "nuplan-v1.1" / "sensor_blobs",
"carla": Path(os.environ["CARLA_DATA_ROOT"]) / "sensor_blobs",
- "av2-sensor": Path(os.environ["AV2_SENSOR_DATA_ROOT"]) / "sensor_mini",
+ # "av2-sensor": Path(os.environ["AV2_SENSOR_DATA_ROOT"]) / "sensor_mini",
}
@@ -93,13 +94,13 @@ def get_traffic_light_detections_from_arrow_table(arrow_table: pa.Table, index:
def get_camera_from_arrow_table(
arrow_table: pa.Table,
index: int,
- camera_metadata: CameraMetadata,
+ camera_metadata: PinholeCameraMetadata,
log_metadata: LogMetadata,
-) -> Camera:
+) -> PinholeCamera:
table_data = arrow_table[camera_metadata.camera_type.serialize()][index].as_py()
- extrinsic = arrow_table[f"{camera_metadata.camera_type.serialize()}_extrinsic"][index].as_py()
- extrinsic = np.array(extrinsic).reshape((4, 4)) if extrinsic else None
+ extrinsic_list = arrow_table[f"{camera_metadata.camera_type.serialize()}_extrinsic"][index].as_py()
+ extrinsic = StateSE3.from_list(extrinsic_list) if extrinsic_list is not None else None
if table_data is None or extrinsic is None:
return None
@@ -118,7 +119,7 @@ def get_camera_from_arrow_table(
else:
raise NotImplementedError("Only string file paths for camera data are supported.")
- return Camera(
+ return PinholeCamera(
metadata=camera_metadata,
image=image,
extrinsic=extrinsic,
diff --git a/d123/datatypes/scene/scene_filter.py b/d123/datatypes/scene/scene_filter.py
index 5dbb5ba6..c05073db 100644
--- a/d123/datatypes/scene/scene_filter.py
+++ b/d123/datatypes/scene/scene_filter.py
@@ -1,7 +1,7 @@
from dataclasses import dataclass
from typing import List, Optional
-from d123.datatypes.sensors.camera import CameraType
+from d123.datatypes.sensors.camera.pinhole_camera import PinholeCameraType
# TODO: Add more filter options (e.g. scene tags, ego movement, or whatever appropriate)
@@ -23,7 +23,7 @@ class SceneFilter:
duration_s: Optional[float] = 10.0
history_s: Optional[float] = 3.0
- camera_types: Optional[List[CameraType]] = None
+ camera_types: Optional[List[PinholeCameraType]] = None
max_num_scenes: Optional[int] = None
shuffle: bool = False
@@ -34,10 +34,10 @@ def __post_init__(self):
camera_types = []
for camera_type in self.camera_types:
if isinstance(camera_type, str):
- camera_type = CameraType.deserialize[camera_type]
+ camera_type = PinholeCameraType.deserialize[camera_type]
camera_types.append(camera_type)
elif isinstance(camera_type, int):
- camera_type = CameraType(camera_type)
+ camera_type = PinholeCameraType(camera_type)
camera_types.append(camera_type)
else:
raise ValueError(f"Invalid camera type: {camera_type}")
diff --git a/d123/datatypes/sensors/__init__.py b/d123/datatypes/sensors/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/d123/datatypes/sensors/camera.py b/d123/datatypes/sensors/camera.py
deleted file mode 100644
index 56fe6f07..00000000
--- a/d123/datatypes/sensors/camera.py
+++ /dev/null
@@ -1,117 +0,0 @@
-from __future__ import annotations
-
-import json
-from dataclasses import dataclass
-from typing import Any, Dict
-
-import numpy as np
-import numpy.typing as npt
-
-from d123.common.utils.enums import SerialIntEnum
-
-
-class CameraType(SerialIntEnum):
- """
- Enum for cameras in d123.
- """
-
- CAM_F0 = 0
- CAM_B0 = 1
- CAM_L0 = 2
- CAM_L1 = 3
- CAM_L2 = 4
- CAM_R0 = 5
- CAM_R1 = 6
- CAM_R2 = 7
- CAM_STEREO_L = 8
- CAM_STEREO_R = 9
-
-
-@dataclass
-class CameraMetadata:
-
- camera_type: CameraType
- width: int
- height: int
- intrinsic: npt.NDArray[np.float64] # 3x3 matrix # TODO: don't store matrix but values.
- distortion: npt.NDArray[np.float64] # 5x1 vector # TODO: don't store matrix but values.
-
- def to_dict(self) -> Dict[str, Any]:
- # TODO: remove None types. Only a placeholder for now.
- return {
- "camera_type": int(self.camera_type),
- "width": self.width,
- "height": self.height,
- "intrinsic": self.intrinsic.tolist() if self.intrinsic is not None else None,
- "distortion": self.distortion.tolist() if self.distortion is not None else None,
- }
-
- @classmethod
- def from_dict(cls, json_dict: Dict[str, Any]) -> CameraMetadata:
- # TODO: remove None types. Only a placeholder for now.
- return cls(
- camera_type=CameraType(json_dict["camera_type"]),
- width=json_dict["width"],
- height=json_dict["height"],
- intrinsic=np.array(json_dict["intrinsic"]) if json_dict["intrinsic"] is not None else None,
- distortion=np.array(json_dict["distortion"]) if json_dict["distortion"] is not None else None,
- )
-
- @property
- def aspect_ratio(self) -> float:
- return self.width / self.height
-
- @property
- def fov_x(self) -> float:
- """
- Calculates the horizontal field of view (FOV) in radian.
- """
- fx = self.intrinsic[0, 0]
- fov_x_rad = 2 * np.arctan(self.width / (2 * fx))
- return fov_x_rad
-
- @property
- def fov_y(self) -> float:
- """
- Calculates the vertical field of view (FOV) in radian.
- """
- fy = self.intrinsic[1, 1]
- fov_y_rad = 2 * np.arctan(self.height / (2 * fy))
- return fov_y_rad
-
-
-def camera_metadata_dict_to_json(camera_metadata: Dict[CameraType, CameraMetadata]) -> Dict[str, Dict[str, Any]]:
- """
- Converts a dictionary of CameraMetadata to a JSON-serializable format.
- :param camera_metadata: Dictionary of CameraMetadata.
- :return: JSON-serializable dictionary.
- """
- camera_metadata_dict = {
- camera_type.serialize(): metadata.to_dict() for camera_type, metadata in camera_metadata.items()
- }
- return json.dumps(camera_metadata_dict)
-
-
-def camera_metadata_dict_from_json(json_dict: Dict[str, Dict[str, Any]]) -> Dict[CameraType, CameraMetadata]:
- """
- Converts a JSON-serializable dictionary back to a dictionary of CameraMetadata.
- :param json_dict: JSON-serializable dictionary.
- :return: Dictionary of CameraMetadata.
- """
- camera_metadata_dict = json.loads(json_dict)
- return {
- CameraType.deserialize(camera_type): CameraMetadata.from_dict(metadata)
- for camera_type, metadata in camera_metadata_dict.items()
- }
-
-
-@dataclass
-class Camera:
-
- metadata: CameraMetadata
- image: npt.NDArray[np.uint8]
- extrinsic: npt.NDArray[np.float64] # 4x4 matrix
-
- def get_view_matrix(self) -> np.ndarray:
- # Compute the view matrix based on the camera's position and orientation
- pass
diff --git a/d123/datatypes/sensors/camera/__init__.py b/d123/datatypes/sensors/camera/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/d123/datatypes/sensors/camera/pinhole_camera.py b/d123/datatypes/sensors/camera/pinhole_camera.py
new file mode 100644
index 00000000..8f7f8d44
--- /dev/null
+++ b/d123/datatypes/sensors/camera/pinhole_camera.py
@@ -0,0 +1,260 @@
+from __future__ import annotations
+
+import json
+from dataclasses import dataclass
+from typing import Any, Dict, Optional
+
+import numpy as np
+import numpy.typing as npt
+from zmq import IntEnum
+
+from d123.common.utils.enums import SerialIntEnum
+from d123.common.utils.mixin import ArrayMixin
+from d123.geometry.se import StateSE3
+
+
+class PinholeCameraType(SerialIntEnum):
+ """
+ Enum for cameras in d123.
+ """
+
+ CAM_F0 = 0
+ CAM_B0 = 1
+ CAM_L0 = 2
+ CAM_L1 = 3
+ CAM_L2 = 4
+ CAM_R0 = 5
+ CAM_R1 = 6
+ CAM_R2 = 7
+ CAM_STEREO_L = 8
+ CAM_STEREO_R = 9
+
+
+class PinholeIntrinsicsIndex(IntEnum):
+
+ FX = 0
+ FY = 1
+ CX = 2
+ CY = 3
+ SKEW = 4 # NOTE: not used, but added for completeness
+
+
+class PinholeIntrinsics(ArrayMixin):
+
+ _array: npt.NDArray[np.float64]
+
+ def __init__(self, fx: float, fy: float, cx: float, cy: float, skew: float = 0.0) -> None:
+ array = np.zeros(len(PinholeIntrinsicsIndex), dtype=np.float64)
+ array[PinholeIntrinsicsIndex.FX] = fx
+ array[PinholeIntrinsicsIndex.FY] = fy
+ array[PinholeIntrinsicsIndex.CX] = cx
+ array[PinholeIntrinsicsIndex.CY] = cy
+ array[PinholeIntrinsicsIndex.SKEW] = skew
+ object.__setattr__(self, "_array", array)
+
+ @classmethod
+ def from_array(cls, array: npt.NDArray[np.float64], copy: bool = True) -> PinholeIntrinsics:
+ assert array.ndim == 1
+ assert array.shape[-1] == len(PinholeIntrinsicsIndex)
+ instance = object.__new__(cls)
+ object.__setattr__(instance, "_array", array.copy() if copy else array)
+ return instance
+
+ @classmethod
+ def from_camera_matrix(cls, intrinsic: npt.NDArray[np.float64]) -> PinholeIntrinsics:
+ """
+ Create a PinholeIntrinsics from a 3x3 intrinsic matrix.
+ :param intrinsic: A 3x3 numpy array representing the intrinsic matrix.
+ :return: A PinholeIntrinsics instance.
+ """
+ assert intrinsic.shape == (3, 3)
+ fx = intrinsic[0, 0]
+ fy = intrinsic[1, 1]
+ cx = intrinsic[0, 2]
+ cy = intrinsic[1, 2]
+ skew = intrinsic[0, 1] # Not used in most cases.
+ array = np.array([fx, fy, cx, cy, skew], dtype=np.float64)
+ return cls.from_array(array, copy=False)
+
+ @property
+ def array(self) -> npt.NDArray[np.float64]:
+ return self._array
+
+ @property
+ def fx(self) -> float:
+ return self._array[PinholeIntrinsicsIndex.FX]
+
+ @property
+ def fy(self) -> float:
+ return self._array[PinholeIntrinsicsIndex.FY]
+
+ @property
+ def cx(self) -> float:
+ return self._array[PinholeIntrinsicsIndex.CX]
+
+ @property
+ def cy(self) -> float:
+ return self._array[PinholeIntrinsicsIndex.CY]
+
+ @property
+ def skew(self) -> float:
+ return self._array[PinholeIntrinsicsIndex.SKEW]
+
+ @property
+ def camera_matrix(self) -> npt.NDArray[np.float64]:
+ """
+ Returns the intrinsic matrix.
+ :return: A 3x3 numpy array representing the intrinsic matrix.
+ """
+ K = np.array(
+ [
+ [self.fx, self.skew, self.cx],
+ [0.0, self.fy, self.cy],
+ [0.0, 0.0, 1.0],
+ ],
+ dtype=np.float64,
+ )
+ return K
+
+
+class PinholeDistortionIndex(IntEnum):
+ K1 = 0
+ K2 = 1
+ P1 = 2
+ P2 = 3
+ K3 = 4
+
+
+class PinholeDistortion(ArrayMixin):
+ _array: npt.NDArray[np.float64]
+
+ def __init__(self, k1: float, k2: float, p1: float, p2: float, k3: float) -> None:
+ array = np.zeros(len(PinholeDistortionIndex), dtype=np.float64)
+ array[PinholeDistortionIndex.K1] = k1
+ array[PinholeDistortionIndex.K2] = k2
+ array[PinholeDistortionIndex.P1] = p1
+ array[PinholeDistortionIndex.P2] = p2
+ array[PinholeDistortionIndex.K3] = k3
+ object.__setattr__(self, "_array", array)
+
+ @classmethod
+ def from_array(cls, array: npt.NDArray[np.float64], copy: bool = True) -> PinholeDistortion:
+ assert array.ndim == 1
+ assert array.shape[-1] == len(PinholeDistortionIndex)
+ instance = object.__new__(cls)
+ object.__setattr__(instance, "_array", array.copy() if copy else array)
+ return instance
+
+ @property
+ def array(self) -> npt.NDArray[np.float64]:
+ return self._array
+
+ @property
+ def k1(self) -> float:
+ return self._array[PinholeDistortionIndex.K1]
+
+ @property
+ def k2(self) -> float:
+ return self._array[PinholeDistortionIndex.K2]
+
+ @property
+ def p1(self) -> float:
+ return self._array[PinholeDistortionIndex.P1]
+
+ @property
+ def p2(self) -> float:
+ return self._array[PinholeDistortionIndex.P2]
+
+ @property
+ def k3(self) -> float:
+ return self._array[PinholeDistortionIndex.K3]
+
+
+@dataclass
+class PinholeCameraMetadata:
+
+ camera_type: PinholeCameraType
+ intrinsics: Optional[PinholeIntrinsics]
+ distortion: Optional[PinholeDistortion]
+ width: int
+ height: int
+
+ @classmethod
+ def from_dict(cls, json_dict: Dict[str, Any]) -> PinholeCameraMetadata:
+ return cls(
+ camera_type=PinholeCameraType(json_dict["camera_type"]),
+ intrinsics=(
+ PinholeIntrinsics.from_list(json_dict["intrinsics"]) if json_dict["intrinsics"] is not None else None
+ ),
+ distortion=(
+ PinholeDistortion.from_list(json_dict["distortion"]) if json_dict["distortion"] is not None else None
+ ),
+ width=int(json_dict["width"]),
+ height=int(json_dict["height"]),
+ )
+
+ def to_dict(self) -> Dict[str, Any]:
+ return {
+ "camera_type": int(self.camera_type),
+ "intrinsics": self.intrinsics.tolist() if self.intrinsics is not None else None,
+ "distortion": self.distortion.tolist() if self.distortion is not None else None,
+ "width": self.width,
+ "height": self.height,
+ }
+
+ @property
+ def aspect_ratio(self) -> float:
+ return self.width / self.height
+
+ @property
+ def fov_x(self) -> float:
+ """
+ Calculates the horizontal field of view (FOV) in radian.
+ """
+ fov_x_rad = 2 * np.arctan(self.width / (2 * self.intrinsics.fx))
+ return fov_x_rad
+
+ @property
+ def fov_y(self) -> float:
+ """
+ Calculates the vertical field of view (FOV) in radian.
+ """
+ fov_y_rad = 2 * np.arctan(self.height / (2 * self.intrinsics.fy))
+ return fov_y_rad
+
+
+def camera_metadata_dict_to_json(
+ camera_metadata: Dict[PinholeCameraType, PinholeCameraMetadata],
+) -> Dict[str, Dict[str, Any]]:
+ """
+ Converts a dictionary of CameraMetadata to a JSON-serializable format.
+ :param camera_metadata: Dictionary of CameraMetadata.
+ :return: JSON-serializable dictionary.
+ """
+ camera_metadata_dict = {
+ camera_type.serialize(): metadata.to_dict() for camera_type, metadata in camera_metadata.items()
+ }
+ return json.dumps(camera_metadata_dict)
+
+
+def camera_metadata_dict_from_json(
+ json_dict: Dict[str, Dict[str, Any]],
+) -> Dict[PinholeCameraType, PinholeCameraMetadata]:
+ """
+ Converts a JSON-serializable dictionary back to a dictionary of CameraMetadata.
+ :param json_dict: JSON-serializable dictionary.
+ :return: Dictionary of CameraMetadata.
+ """
+ camera_metadata_dict = json.loads(json_dict)
+ return {
+ PinholeCameraType.deserialize(camera_type): PinholeCameraMetadata.from_dict(metadata)
+ for camera_type, metadata in camera_metadata_dict.items()
+ }
+
+
+@dataclass
+class PinholeCamera:
+
+ metadata: PinholeCameraMetadata
+ image: npt.NDArray[np.uint8]
+ extrinsic: StateSE3
diff --git a/d123/datatypes/sensors/lidar/__init__.py b/d123/datatypes/sensors/lidar/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/d123/datatypes/sensors/lidar.py b/d123/datatypes/sensors/lidar/lidar.py
similarity index 93%
rename from d123/datatypes/sensors/lidar.py
rename to d123/datatypes/sensors/lidar/lidar.py
index f15ccc89..00356ee4 100644
--- a/d123/datatypes/sensors/lidar.py
+++ b/d123/datatypes/sensors/lidar/lidar.py
@@ -8,7 +8,8 @@
import numpy.typing as npt
from d123.common.utils.enums import SerialIntEnum
-from d123.datatypes.sensors.lidar_index import LIDAR_INDEX_REGISTRY, LiDARIndex
+from d123.datasets.utils.sensor.lidar_index_registry import LIDAR_INDEX_REGISTRY, LiDARIndex
+from d123.geometry import StateSE3
class LiDARType(SerialIntEnum):
@@ -27,8 +28,7 @@ class LiDARMetadata:
lidar_type: LiDARType
lidar_index: Type[LiDARIndex]
- extrinsic: Optional[npt.NDArray[np.float64]] = None # 4x4 matrix
-
+ extrinsic: Optional[StateSE3] = None
# TODO: add identifier if point cloud is returned in lidar or ego frame.
def to_dict(self) -> dict:
@@ -44,7 +44,7 @@ def from_dict(cls, json_dict: dict) -> LiDARMetadata:
if json_dict["lidar_index"] not in LIDAR_INDEX_REGISTRY:
raise ValueError(f"Unknown lidar index: {json_dict['lidar_index']}")
lidar_index_class = LIDAR_INDEX_REGISTRY[json_dict["lidar_index"]]
- extrinsic = np.array(json_dict["extrinsic"]) if json_dict["extrinsic"] is not None else None
+ extrinsic = StateSE3.from_list(json_dict["extrinsic"]) if json_dict["extrinsic"] is not None else None
return cls(lidar_type=lidar_type, lidar_index=lidar_index_class, extrinsic=extrinsic)
diff --git a/d123/script/config/common/default_common.yaml b/d123/script/config/common/default_common.yaml
index f3f88913..5012ff14 100644
--- a/d123/script/config/common/default_common.yaml
+++ b/d123/script/config/common/default_common.yaml
@@ -3,12 +3,11 @@ defaults:
- worker: ray_distributed
- scene_filter: all_scenes
- scene_builder: default_scene_builder
- - override hydra/hydra_logging: colorlog
- override hydra/job_logging: colorlog
+ - override hydra/hydra_logging: colorlog
- _self_
distributed_timeout_seconds: 7200 # Sets how long to wait while synchronizing across worker nodes in a distributed context.
-
selected_simulation_metrics: null
# Sets verbosity level, in particular determines if progress bars are shown or not.
diff --git a/d123/script/config/common/default_experiment.yaml b/d123/script/config/common/default_experiment.yaml
index 6ad3c3d5..9617f335 100644
--- a/d123/script/config/common/default_experiment.yaml
+++ b/d123/script/config/common/default_experiment.yaml
@@ -1,5 +1,7 @@
defaults:
- default_dataset_paths
+ - override hydra/job_logging: colorlog
+ - override hydra/hydra_logging: colorlog
- _self_
# Cache parameters
diff --git a/d123/script/config/dataset_conversion/default_dataset_conversion.yaml b/d123/script/config/dataset_conversion/default_dataset_conversion.yaml
index 0a4544da..b126ba58 100644
--- a/d123/script/config/dataset_conversion/default_dataset_conversion.yaml
+++ b/d123/script/config/dataset_conversion/default_dataset_conversion.yaml
@@ -1,24 +1,22 @@
hydra:
run:
- dir: ${output_dir}
- output_subdir: ${output_dir}/code/hydra # Store hydra's config breakdown here for debugging
- searchpath: # Only in these paths are discoverable
+ dir: .
+ output_subdir: null
+ searchpath:
- pkg://d123.script.config
- pkg://d123.script.config.common
-
job:
chdir: False
-
+#
defaults:
- default_common
- - default_experiment
- default_dataset_paths
- - _self_
- datasets:
- # - nuplan_private_dataset
+ - nuplan_private_dataset
# - carla_dataset
# - wopd_dataset
- - av2_sensor_dataset
+ # - av2_sensor_dataset
+ - _self_
+force_map_conversion: False
force_log_conversion: True
-force_map_conversion: True
diff --git a/d123/script/config/datasets/__init__.py b/d123/script/config/datasets/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/d123/script/config/datasets/nuplan_private_dataset.yaml b/d123/script/config/datasets/nuplan_private_dataset.yaml
index af6f16ba..7062f38f 100644
--- a/d123/script/config/datasets/nuplan_private_dataset.yaml
+++ b/d123/script/config/datasets/nuplan_private_dataset.yaml
@@ -12,5 +12,5 @@ nuplan_private_dataset:
output_path: ${d123_data_root}
force_log_conversion: ${force_log_conversion}
force_map_conversion: ${force_map_conversion}
- camera_store_option: "binary"
+ camera_store_option: "path"
lidar_store_option: "path"
diff --git a/d123/script/run_dataset_conversion.py b/d123/script/run_dataset_conversion.py
index 62ad1e30..cdbee27d 100644
--- a/d123/script/run_dataset_conversion.py
+++ b/d123/script/run_dataset_conversion.py
@@ -2,7 +2,6 @@
from typing import List
import hydra
-from nuplan.planning.script.builders.logging_builder import build_logger
from omegaconf import DictConfig
from d123.script.builders.data_converter_builder import RawDataConverter, build_data_converter
@@ -20,8 +19,6 @@ def main(cfg: DictConfig) -> None:
Main entrypoint for metric caching.
:param cfg: omegaconf dictionary
"""
- # Configure logger
- build_logger(cfg)
# Build worker
worker = build_worker(cfg)
diff --git a/pyproject.toml b/pyproject.toml
index 44061249..4e2da9e5 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -86,7 +86,7 @@ nuplan = [
"retry",
]
waymo = [
- "tensorflow==2.11.0",
+ "tensorflow==2.16.1",
"waymo-open-dataset-tf-2-11-0",
"intervaltree",
]
diff --git a/scripts/dataset/run_log_caching.sh b/scripts/dataset/run_log_caching.sh
index 34ff923c..923aaeca 100644
--- a/scripts/dataset/run_log_caching.sh
+++ b/scripts/dataset/run_log_caching.sh
@@ -2,6 +2,4 @@
python $D123_DEVKIT_ROOT/d123/script/run_dataset_conversion.py \
-experiment_name="caching" \
worker.threads_per_node=32
-# worker=single_machine_thread_pool
diff --git a/test_viser.py b/test_viser.py
index 9daa2d9c..b9fdeec9 100644
--- a/test_viser.py
+++ b/test_viser.py
@@ -4,14 +4,14 @@
from d123.common.visualization.viser.viser_viewer import ViserViewer
from d123.datatypes.scene.arrow.arrow_scene_builder import ArrowSceneBuilder
from d123.datatypes.scene.scene_filter import SceneFilter
-from d123.datatypes.sensors.camera import CameraType
+from d123.datatypes.sensors.camera.pinhole_camera import PinholeCameraType
if __name__ == "__main__":
- # splits = ["nuplan_private_test"]
+ splits = ["nuplan_private_test"]
# splits = ["carla"]
# splits = ["wopd_train"]
- splits = ["av2-sensor-mini_train"]
+ # splits = ["av2-sensor-mini_train"]
log_names = None
scene_tokens = None
@@ -20,15 +20,14 @@
split_names=splits,
log_names=log_names,
scene_tokens=scene_tokens,
- duration_s=10,
+ duration_s=18,
history_s=0.5,
timestamp_threshold_s=10,
- shuffle=False,
- camera_types=[CameraType.CAM_F0],
+ shuffle=True,
+ camera_types=[PinholeCameraType.CAM_F0],
)
scene_builder = ArrowSceneBuilder(os.environ["D123_DATA_ROOT"])
worker = Sequential()
- # worker = RayDistributed()
scenes = scene_builder.get_scenes(scene_filter, worker)
print(f"Found {len(scenes)} scenes")
visualization_server = ViserViewer(scenes, scene_index=0)
From e176d8f475c09b303ed833286fe91b5faa501954 Mon Sep 17 00:00:00 2001
From: Daniel Dauner
Date: Mon, 6 Oct 2025 22:00:13 +0200
Subject: [PATCH 056/145] Larger update on the Scene API and object. Smarted
management of memory maps, refactoring, and cleaner metadata structure.
---
d123/common/utils/arrow_helper.py | 27 ++-
d123/common/utils/mixin.py | 2 +
.../visualization/matplotlib/observation.py | 6 +-
d123/common/visualization/matplotlib/plots.py | 6 +-
d123/common/visualization/matplotlib/utils.py | 2 -
d123/common/visualization/utils/.gitkeep | 0
.../viser/elements/map_elements.py | 2 +-
.../visualization/viser/viser_config.py | 4 +-
.../visualization/viser/viser_viewer.py | 2 +-
d123/datasets/av2/av2_data_converter.py | 40 ++--
d123/datasets/carla/carla_data_converter.py | 4 +-
d123/datasets/nuplan/nuplan_data_converter.py | 37 ++-
.../{load_sensor.py => nuplan_load_sensor.py} | 0
d123/datasets/utils/arrow_schema.py | 0
d123/datasets/wopd/wopd_data_converter.py | 57 +++--
d123/datatypes/maps/gpkg/gpkg_map.py | 8 +-
d123/datatypes/scene/abstract_scene.py | 83 ++++---
d123/datatypes/scene/arrow/__init__.py | 0
d123/datatypes/scene/arrow/arrow_scene.py | 224 ++++++------------
.../scene/arrow/arrow_scene_builder.py | 43 ++--
d123/datatypes/scene/arrow/utils/__init__.py | 0
.../utils/{conversion.py => arrow_getters.py} | 31 +--
.../scene/arrow/utils/arrow_metadata_utils.py | 19 ++
d123/datatypes/scene/scene_metadata.py | 44 +++-
.../sensors/camera/pinhole_camera.py | 79 ++----
d123/datatypes/sensors/lidar/lidar.py | 40 +---
.../vehicle_state/vehicle_parameters.py | 11 +-
d123/geometry/utils/rotation_utils.py | 46 ++++
.../default_dataset_conversion.yaml | 2 +-
d123/simulation/agents/idm_agents.py | 4 +-
d123/simulation/agents/path_following.py | 2 +-
d123/simulation/agents/smart_agents.py | 2 +-
d123/simulation/gym/demo_gym_env.py | 11 +-
.../reward_builder/default_reward_builder.py | 2 +-
d123/simulation/gym/gym_env.py | 9 +-
.../metrics/sim_agents/interaction_based.py | 2 +-
.../metrics/sim_agents/sim_agents.py | 6 +-
d123/simulation/metrics/sim_agents/utils.py | 6 +-
.../observation/agents_observation.py | 4 +-
d123/simulation/simulation_2d.py | 2 +-
.../time_controller/log_time_controller.py | 2 +-
.../feature_builder/smart_feature_builder.py | 4 +-
notebooks/gym/test_simulation_2d.ipynb | 2 +-
notebooks/scene_rendering.ipynb | 2 +-
notebooks/smarty/smart_rollout.ipynb | 2 +-
notebooks/viz/bev_matplotlib.ipynb | 12 +-
notebooks/viz/bev_matplotlib_prediction.ipynb | 6 +-
notebooks/viz/camera_matplotlib.ipynb | 4 +-
notebooks/viz/video_example.ipynb | 10 +-
test_viser.py | 11 +-
50 files changed, 464 insertions(+), 460 deletions(-)
create mode 100644 d123/common/visualization/utils/.gitkeep
rename d123/datasets/nuplan/{load_sensor.py => nuplan_load_sensor.py} (100%)
create mode 100644 d123/datasets/utils/arrow_schema.py
create mode 100644 d123/datatypes/scene/arrow/__init__.py
create mode 100644 d123/datatypes/scene/arrow/utils/__init__.py
rename d123/datatypes/scene/arrow/utils/{conversion.py => arrow_getters.py} (85%)
create mode 100644 d123/datatypes/scene/arrow/utils/arrow_metadata_utils.py
diff --git a/d123/common/utils/arrow_helper.py b/d123/common/utils/arrow_helper.py
index 6f68c152..54d231c7 100644
--- a/d123/common/utils/arrow_helper.py
+++ b/d123/common/utils/arrow_helper.py
@@ -1,8 +1,12 @@
+from functools import lru_cache
from pathlib import Path
-from typing import Union
+from typing import Final, Union
import pyarrow as pa
+# TODO: Tune Parameters and add to config?
+MAX_LRU_CACHED_TABLES: Final[int] = 4096
+
def open_arrow_table(arrow_file_path: Union[str, Path]) -> pa.Table:
with pa.memory_map(str(arrow_file_path), "rb") as source:
@@ -14,3 +18,24 @@ def write_arrow_table(table: pa.Table, arrow_file_path: Union[str, Path]) -> Non
with pa.OSFile(str(arrow_file_path), "wb") as sink:
with pa.ipc.new_file(sink, table.schema) as writer:
writer.write_table(table)
+
+
+@lru_cache(maxsize=MAX_LRU_CACHED_TABLES)
+def get_lru_cached_arrow_table(arrow_file_path: Union[str, Path]) -> pa.Table:
+ """Get a memory-mapped arrow table from the LRU cache or load it from disk.
+
+ :param arrow_file_path: The path to the arrow file.
+ :return: The cached memory-mapped arrow table.
+ """
+
+ # NOTE @DanielDauner: The number of memory maps that a process can have is limited by the
+ # linux kernel parameter /proc/sys/vm/max_map_count (default: 65530 in most distributions).
+ # Thus, we cache memory-mapped arrow tables with an LRU cache to avoid
+ # hitting this limit, specifically since many scenes/routines access the same table.
+ # During cache eviction, the functools implementation calls __del__ on the
+ # evicted cache entry, which closes the memory map, if no other references to the table exist.
+ # Thus it is beneficial to keep track of all references to the table, otherwise the memory map
+ # will not be closed and the limit can still be hit.
+ # Not fully satisfied with this solution. Please reach out if you have a better idea.
+
+ return open_arrow_table(str(arrow_file_path))
diff --git a/d123/common/utils/mixin.py b/d123/common/utils/mixin.py
index 1e17db64..138a0e57 100644
--- a/d123/common/utils/mixin.py
+++ b/d123/common/utils/mixin.py
@@ -3,6 +3,8 @@
import numpy as np
import numpy.typing as npt
+# import pyarrow as pa
+
class ArrayMixin:
"""Mixin class for object entities."""
diff --git a/d123/common/visualization/matplotlib/observation.py b/d123/common/visualization/matplotlib/observation.py
index ed46c6a9..7b809f41 100644
--- a/d123/common/visualization/matplotlib/observation.py
+++ b/d123/common/visualization/matplotlib/observation.py
@@ -26,10 +26,8 @@
from d123.datatypes.maps.map_datatypes import MapLayer
from d123.datatypes.scene.abstract_scene import AbstractScene
from d123.datatypes.vehicle_state.ego_state import EgoStateSE2, EgoStateSE3
-from d123.geometry import BoundingBoxSE2, BoundingBoxSE3, Point2D
-from d123.geometry.geometry_index import StateSE2Index
+from d123.geometry import BoundingBoxSE2, BoundingBoxSE3, Point2D, StateSE2Index, Vector2D
from d123.geometry.transform.transform_se2 import translate_se2_along_body_frame
-from d123.geometry.vector import Vector2D
def add_default_map_on_ax(
@@ -100,7 +98,7 @@ def add_box_future_detections_to_ax(ax: plt.Axes, scene: AbstractScene, iteratio
if agent.metadata.detection_type == DetectionType.VEHICLE
}
frequency = 1
- for iteration in range(iteration + frequency, scene.get_number_of_iterations(), frequency):
+ for iteration in range(iteration + frequency, scene.number_of_iterations, frequency):
agents = scene.get_box_detections_at_iteration(iteration)
for agent in agents:
if agent.metadata.track_token in agents_poses:
diff --git a/d123/common/visualization/matplotlib/plots.py b/d123/common/visualization/matplotlib/plots.py
index 7a1010cb..b61b8ab2 100644
--- a/d123/common/visualization/matplotlib/plots.py
+++ b/d123/common/visualization/matplotlib/plots.py
@@ -20,7 +20,7 @@ def _plot_scene_on_ax(ax: plt.Axes, scene: AbstractScene, iteration: int = 0, ra
box_detections = scene.get_box_detections_at_iteration(iteration)
traffic_light_detections = scene.get_traffic_light_detections_at_iteration(iteration)
route_lane_group_ids = scene.get_route_lane_group_ids(iteration)
- map_api = scene.map_api
+ map_api = scene.get_map_api()
point_2d = ego_vehicle_state.bounding_box.center.state_se2.point_2d
add_default_map_on_ax(ax, map_api, point_2d, radius=radius, route_lane_group_ids=route_lane_group_ids)
@@ -63,8 +63,8 @@ def render_scene_animation(
scene.open()
if end_idx is None:
- end_idx = scene.get_number_of_iterations()
- end_idx = min(end_idx, scene.get_number_of_iterations())
+ end_idx = scene.number_of_iterations
+ end_idx = min(end_idx, scene.number_of_iterations)
fig, ax = plt.subplots(figsize=(10, 10))
diff --git a/d123/common/visualization/matplotlib/utils.py b/d123/common/visualization/matplotlib/utils.py
index 9e030b80..ef335bb0 100644
--- a/d123/common/visualization/matplotlib/utils.py
+++ b/d123/common/visualization/matplotlib/utils.py
@@ -1,4 +1,3 @@
-from functools import lru_cache
from typing import Union
import matplotlib.patches as patches
@@ -99,7 +98,6 @@ def add_shapely_linestring_to_ax(
return ax
-@lru_cache(maxsize=32)
def get_pose_triangle(size: float) -> geom.Polygon:
"""Create a triangle shape for the pose."""
half_size = size / 2
diff --git a/d123/common/visualization/utils/.gitkeep b/d123/common/visualization/utils/.gitkeep
new file mode 100644
index 00000000..e69de29b
diff --git a/d123/common/visualization/viser/elements/map_elements.py b/d123/common/visualization/viser/elements/map_elements.py
index 90c247cb..d25fe73f 100644
--- a/d123/common/visualization/viser/elements/map_elements.py
+++ b/d123/common/visualization/viser/elements/map_elements.py
@@ -43,7 +43,7 @@ def _get_map_trimesh_dict(
MapLayer.CARPARK,
MapLayer.GENERIC_DRIVABLE,
]
- map_objects_dict = scene.map_api.get_proximal_map_objects(
+ map_objects_dict = scene.get_map_api().get_proximal_map_objects(
scene_center.point_2d,
radius=viser_config.map_radius,
layers=map_layers,
diff --git a/d123/common/visualization/viser/viser_config.py b/d123/common/visualization/viser/viser_config.py
index 276d6762..f99f7823 100644
--- a/d123/common/visualization/viser/viser_config.py
+++ b/d123/common/visualization/viser/viser_config.py
@@ -45,7 +45,7 @@ class ViserConfig:
# Map
map_visible: bool = True
map_radius: float = 200.0 # [m]
- map_non_road_z_offset: float = 0.0 # small translation to place crosswalks, parking, etc. on top of the road
+ map_non_road_z_offset: float = 0.1 # small z-translation to place crosswalks, parking, etc. on top of the road
# Bounding boxes
bounding_box_visible: bool = True
@@ -65,7 +65,7 @@ class ViserConfig:
camera_gui_image_scale: float = 0.25 # Resize factor for the camera image shown in the GUI (<1.0 for speed)
# LiDAR
- lidar_visible: bool = True
+ lidar_visible: bool = False
lidar_types: List[LiDARType] = field(default_factory=lambda: all_lidar_types.copy())
lidar_point_size: float = 0.05
lidar_point_shape: Literal["square", "diamond", "circle", "rounded", "sparkle"] = "circle"
diff --git a/d123/common/visualization/viser/viser_viewer.py b/d123/common/visualization/viser/viser_viewer.py
index 2bc44c3d..8cf1ec80 100644
--- a/d123/common/visualization/viser/viser_viewer.py
+++ b/d123/common/visualization/viser/viser_viewer.py
@@ -113,7 +113,7 @@ def next(self) -> None:
self.set_scene(self._scenes[self._scene_index])
def set_scene(self, scene: AbstractScene) -> None:
- num_frames = scene.get_number_of_iterations()
+ num_frames = scene.number_of_iterations
initial_ego_state: EgoStateSE3 = scene.get_ego_state_at_iteration(0)
with self._viser_server.gui.add_folder("Playback"):
diff --git a/d123/datasets/av2/av2_data_converter.py b/d123/datasets/av2/av2_data_converter.py
index 638eb205..63a5279d 100644
--- a/d123/datasets/av2/av2_data_converter.py
+++ b/d123/datasets/av2/av2_data_converter.py
@@ -1,7 +1,5 @@
import gc
import hashlib
-import json
-from dataclasses import asdict
from functools import partial
from pathlib import Path
from typing import Any, Dict, List, Optional, Tuple, Union
@@ -24,15 +22,15 @@
)
from d123.datasets.av2.av2_map_conversion import convert_av2_map
from d123.datasets.raw_data_converter import DataConverterConfig, RawDataConverter
+from d123.datatypes.scene.arrow.utils.arrow_metadata_utils import add_log_metadata_to_arrow_schema
from d123.datatypes.scene.scene_metadata import LogMetadata
from d123.datatypes.sensors.camera.pinhole_camera import (
PinholeCameraMetadata,
PinholeCameraType,
PinholeDistortion,
PinholeIntrinsics,
- camera_metadata_dict_to_json,
)
-from d123.datatypes.sensors.lidar.lidar import LiDARMetadata, LiDARType, lidar_metadata_dict_to_json
+from d123.datatypes.sensors.lidar.lidar import LiDARMetadata, LiDARType
from d123.datatypes.time.time_point import TimePoint
from d123.datatypes.vehicle_state.ego_state import DynamicStateSE3, EgoStateSE3, EgoStateSE3Index
from d123.datatypes.vehicle_state.vehicle_parameters import (
@@ -40,6 +38,7 @@
rear_axle_se3_to_center_se3,
)
from d123.geometry import BoundingBoxSE3Index, StateSE3, Vector3D, Vector3DIndex
+from d123.geometry.geometry_index import StateSE3Index
from d123.geometry.transform.transform_se3 import convert_relative_to_absolute_se3_array
@@ -174,16 +173,18 @@ def convert_av2_log_to_arrow(
sensor_df = build_sensor_dataframe(log_path)
synchronization_df = build_synchronization_dataframe(sensor_df)
- metadata = LogMetadata(
+ log_metadata = LogMetadata(
dataset="av2-sensor",
+ split=split,
log_name=log_path.name,
- location=None,
- timestep_seconds=0.1, # TODO: verify this
+ location=None, # TODO: Add location information.
+ timestep_seconds=0.1,
+ vehicle_parameters=get_av2_ford_fusion_hybrid_parameters(),
+ camera_metadata=get_av2_camera_metadata(log_path),
+ lidar_metadata=get_av2_lidar_metadata(log_path),
map_has_z=True,
+ map_is_local=True,
)
- vehicle_parameters = get_av2_ford_fusion_hybrid_parameters() # TODO: Add av2 vehicle parameters
- camera_metadata = get_av2_camera_metadata(log_path)
- lidar_metadata = get_av2_lidar_metadata(log_path)
schema_column_list = [
("token", pa.string()),
@@ -199,31 +200,26 @@ def convert_av2_log_to_arrow(
("route_lane_group_ids", pa.list_(pa.int64())),
]
if data_converter_config.lidar_store_option is not None:
- for lidar_type in lidar_metadata.keys():
+ for lidar_type in log_metadata.lidar_metadata.keys():
if data_converter_config.lidar_store_option == "path":
schema_column_list.append((lidar_type.serialize(), pa.string()))
elif data_converter_config.lidar_store_option == "binary":
raise NotImplementedError("Binary lidar storage is not implemented.")
if data_converter_config.camera_store_option is not None:
- for camera_type in camera_metadata.keys():
+ for camera_type in log_metadata.camera_metadata.keys():
if data_converter_config.camera_store_option == "path":
schema_column_list.append((camera_type.serialize(), pa.string()))
elif data_converter_config.camera_store_option == "binary":
schema_column_list.append((camera_type.serialize(), pa.binary()))
- schema_column_list.append((f"{camera_type.serialize()}_extrinsic", pa.list_(pa.float64(), 4 * 4)))
+ schema_column_list.append(
+ (f"{camera_type.serialize()}_extrinsic", pa.list_(pa.float64(), len(StateSE3Index)))
+ )
recording_schema = pa.schema(schema_column_list)
- recording_schema = recording_schema.with_metadata(
- {
- "log_metadata": json.dumps(asdict(metadata)),
- "vehicle_parameters": json.dumps(asdict(vehicle_parameters)),
- "camera_metadata": camera_metadata_dict_to_json(camera_metadata),
- "lidar_metadata": lidar_metadata_dict_to_json(lidar_metadata),
- }
- )
+ recording_schema = add_log_metadata_to_arrow_schema(recording_schema, log_metadata)
_write_recording_table(
sensor_df,
@@ -233,7 +229,7 @@ def convert_av2_log_to_arrow(
log_path,
data_converter_config,
)
- del recording_schema, vehicle_parameters
+ del recording_schema
gc.collect()
return []
diff --git a/d123/datasets/carla/carla_data_converter.py b/d123/datasets/carla/carla_data_converter.py
index 1930bb69..c38fc83a 100644
--- a/d123/datasets/carla/carla_data_converter.py
+++ b/d123/datasets/carla/carla_data_converter.py
@@ -18,7 +18,7 @@
from d123.datasets.utils.sensor.lidar_index_registry import CarlaLidarIndex
from d123.datatypes.maps.abstract_map import AbstractMap, MapLayer
from d123.datatypes.maps.abstract_map_objects import AbstractLane
-from d123.datatypes.maps.gpkg.gpkg_map import get_map_api_from_names
+from d123.datatypes.maps.gpkg.gpkg_map import get_global_map_api
from d123.datatypes.scene.scene_metadata import LogMetadata
from d123.datatypes.sensors.camera.pinhole_camera import (
PinholeCameraMetadata,
@@ -178,7 +178,7 @@ def convert_log_internal(args: List[Dict[str, Union[List[str], List[Path]]]]) ->
bounding_box_paths = sorted([bb_path for bb_path in (log_path / "boxes").iterdir()])
first_log_dict = _load_json_gz(bounding_box_paths[0])
map_name = first_log_dict["location"]
- map_api = get_map_api_from_names("carla", map_name)
+ map_api = get_global_map_api("carla", map_name)
metadata = _get_metadata(map_name, str(log_path.stem))
vehicle_parameters = get_carla_lincoln_mkz_2020_parameters()
diff --git a/d123/datasets/nuplan/nuplan_data_converter.py b/d123/datasets/nuplan/nuplan_data_converter.py
index 79f824e3..95067b32 100644
--- a/d123/datasets/nuplan/nuplan_data_converter.py
+++ b/d123/datasets/nuplan/nuplan_data_converter.py
@@ -1,8 +1,6 @@
import gc
-import json
import os
import pickle
-from dataclasses import asdict
from functools import partial
from pathlib import Path
from typing import Any, Dict, Final, List, Optional, Tuple, Union
@@ -20,15 +18,15 @@
from d123.datasets.utils.sensor.lidar_index_registry import NuplanLidarIndex
from d123.datatypes.detections.detection import TrafficLightStatus
from d123.datatypes.detections.detection_types import DetectionType
+from d123.datatypes.scene.arrow.utils.arrow_metadata_utils import add_log_metadata_to_arrow_schema
from d123.datatypes.scene.scene_metadata import LogMetadata
from d123.datatypes.sensors.camera.pinhole_camera import (
PinholeCameraMetadata,
PinholeCameraType,
PinholeDistortion,
PinholeIntrinsics,
- camera_metadata_dict_to_json,
)
-from d123.datatypes.sensors.lidar.lidar import LiDARMetadata, LiDARType, lidar_metadata_dict_to_json
+from d123.datatypes.sensors.lidar.lidar import LiDARMetadata, LiDARType
from d123.datatypes.time.time_point import TimePoint
from d123.datatypes.vehicle_state.ego_state import DynamicStateSE3, EgoStateSE3, EgoStateSE3Index
from d123.datatypes.vehicle_state.vehicle_parameters import (
@@ -197,20 +195,23 @@ def convert_nuplan_log_to_arrow(
log_file_path = data_converter_config.output_path / split / f"{log_path.stem}.arrow"
if data_converter_config.force_log_conversion or not log_file_path.exists():
+
log_file_path.unlink(missing_ok=True)
if not log_file_path.parent.exists():
log_file_path.parent.mkdir(parents=True, exist_ok=True)
- metadata = LogMetadata(
+ log_metadata = LogMetadata(
dataset="nuplan",
+ split=split,
log_name=log_db.log_name,
location=log_db.log.map_version,
timestep_seconds=TARGET_DT,
+ vehicle_parameters=get_nuplan_chrysler_pacifica_parameters(),
+ camera_metadata=get_nuplan_camera_metadata(log_path),
+ lidar_metadata=get_nuplan_lidar_metadata(),
map_has_z=False,
+ map_is_local=False,
)
- vehicle_parameters = get_nuplan_chrysler_pacifica_parameters()
- camera_metadata = get_nuplan_camera_metadata(log_path)
- lidar_metadata = get_nuplan_lidar_metadata()
schema_column_list = [
("token", pa.string()),
@@ -226,14 +227,14 @@ def convert_nuplan_log_to_arrow(
("route_lane_group_ids", pa.list_(pa.int64())),
]
if data_converter_config.lidar_store_option is not None:
- for lidar_type in lidar_metadata.keys():
+ for lidar_type in log_metadata.lidar_metadata.keys():
if data_converter_config.lidar_store_option == "path":
schema_column_list.append((lidar_type.serialize(), pa.string()))
elif data_converter_config.lidar_store_option == "binary":
raise NotImplementedError("Binary lidar storage is not implemented.")
if data_converter_config.camera_store_option is not None:
- for camera_type in camera_metadata.keys():
+ for camera_type in log_metadata.camera_metadata.keys():
if data_converter_config.camera_store_option == "path":
schema_column_list.append((camera_type.serialize(), pa.string()))
schema_column_list.append(
@@ -244,21 +245,15 @@ def convert_nuplan_log_to_arrow(
raise NotImplementedError("Binary camera storage is not implemented.")
recording_schema = pa.schema(schema_column_list)
- recording_schema = recording_schema.with_metadata(
- {
- "log_metadata": json.dumps(asdict(metadata)),
- "vehicle_parameters": json.dumps(asdict(vehicle_parameters)),
- "camera_metadata": camera_metadata_dict_to_json(camera_metadata),
- "lidar_metadata": lidar_metadata_dict_to_json(lidar_metadata),
- }
- )
-
+ recording_schema = add_log_metadata_to_arrow_schema(recording_schema, log_metadata)
_write_recording_table(log_db, recording_schema, log_file_path, log_path, data_converter_config)
+ # Detach and remove log_db, for memory management
log_db.detach_tables()
log_db.remove_ref()
- del recording_schema, vehicle_parameters, log_db
- gc.collect()
+ del recording_schema, log_db
+ gc.collect()
+
return []
diff --git a/d123/datasets/nuplan/load_sensor.py b/d123/datasets/nuplan/nuplan_load_sensor.py
similarity index 100%
rename from d123/datasets/nuplan/load_sensor.py
rename to d123/datasets/nuplan/nuplan_load_sensor.py
diff --git a/d123/datasets/utils/arrow_schema.py b/d123/datasets/utils/arrow_schema.py
new file mode 100644
index 00000000..e69de29b
diff --git a/d123/datasets/wopd/wopd_data_converter.py b/d123/datasets/wopd/wopd_data_converter.py
index 27bc4c05..4481a532 100644
--- a/d123/datasets/wopd/wopd_data_converter.py
+++ b/d123/datasets/wopd/wopd_data_converter.py
@@ -1,8 +1,6 @@
import gc
import hashlib
-import json
import os
-from dataclasses import asdict
from functools import partial
from pathlib import Path
from typing import Any, Dict, Final, List, Tuple, Union
@@ -20,21 +18,25 @@
from d123.datasets.wopd.waymo_map_utils.wopd_map_utils import convert_wopd_map
from d123.datasets.wopd.wopd_utils import parse_range_image_and_camera_projection
from d123.datatypes.detections.detection_types import DetectionType
+from d123.datatypes.scene.arrow.utils.arrow_metadata_utils import add_log_metadata_to_arrow_schema
from d123.datatypes.scene.scene_metadata import LogMetadata
from d123.datatypes.sensors.camera.pinhole_camera import (
PinholeCameraMetadata,
PinholeCameraType,
PinholeDistortion,
PinholeIntrinsics,
- camera_metadata_dict_to_json,
)
-from d123.datatypes.sensors.lidar.lidar import LiDARMetadata, LiDARType, lidar_metadata_dict_to_json
+from d123.datatypes.sensors.lidar.lidar import LiDARMetadata, LiDARType
from d123.datatypes.vehicle_state.ego_state import DynamicStateSE3, EgoStateSE3, EgoStateSE3Index
from d123.datatypes.vehicle_state.vehicle_parameters import get_wopd_chrysler_pacifica_parameters
from d123.geometry import BoundingBoxSE3Index, EulerAngles, StateSE3, Vector3D, Vector3DIndex
-from d123.geometry.geometry_index import StateSE3Index
+from d123.geometry.geometry_index import EulerAnglesIndex, StateSE3Index
from d123.geometry.transform.transform_se3 import convert_relative_to_absolute_se3_array
from d123.geometry.utils.constants import DEFAULT_PITCH, DEFAULT_ROLL
+from d123.geometry.utils.rotation_utils import (
+ get_euler_array_from_quaternion_array,
+ get_quaternion_array_from_euler_array,
+)
check_dependencies(modules=["tensorflow", "waymo_open_dataset"], optional_name="waymo")
import tensorflow as tf
@@ -216,16 +218,18 @@ def convert_wopd_tfrecord_log_to_arrow(
if not log_file_path.parent.exists():
log_file_path.parent.mkdir(parents=True, exist_ok=True)
- metadata = LogMetadata(
+ log_metadata = LogMetadata(
dataset="wopd",
+ split=split,
log_name=log_name,
- location=None, # TODO: implement map name
- timestep_seconds=TARGET_DT, # TODO: Check if correct. Maybe not hardcode
+ location=None, # TODO: Add location information.
+ timestep_seconds=TARGET_DT,
+ vehicle_parameters=get_wopd_chrysler_pacifica_parameters(),
+ camera_metadata=get_wopd_camera_metadata(initial_frame, data_converter_config),
+ lidar_metadata=get_wopd_lidar_metadata(initial_frame, data_converter_config),
map_has_z=True,
+ map_is_local=True,
)
- vehicle_parameters = get_wopd_chrysler_pacifica_parameters()
- camera_metadata = get_wopd_camera_metadata(initial_frame, data_converter_config)
- lidar_metadata = get_wopd_lidar_metadata(initial_frame, data_converter_config)
schema_column_list = [
("token", pa.string()),
@@ -242,7 +246,7 @@ def convert_wopd_tfrecord_log_to_arrow(
]
# TODO: Adjust how cameras are added
if data_converter_config.camera_store_option is not None:
- for camera_type in camera_metadata.keys():
+ for camera_type in log_metadata.camera_metadata.keys():
if data_converter_config.camera_store_option == "path":
raise NotImplementedError("Path camera storage is not implemented.")
elif data_converter_config.camera_store_option == "binary":
@@ -252,25 +256,18 @@ def convert_wopd_tfrecord_log_to_arrow(
)
if data_converter_config.lidar_store_option is not None:
- for lidar_type in lidar_metadata.keys():
+ for lidar_type in log_metadata.lidar_metadata.keys():
if data_converter_config.lidar_store_option == "path":
raise NotImplementedError("Filepath lidar storage is not implemented.")
elif data_converter_config.lidar_store_option == "binary":
schema_column_list.append((lidar_type.serialize(), (pa.list_(pa.float32()))))
recording_schema = pa.schema(schema_column_list)
- recording_schema = recording_schema.with_metadata(
- {
- "log_metadata": json.dumps(asdict(metadata)),
- "vehicle_parameters": json.dumps(asdict(vehicle_parameters)),
- "camera_metadata": camera_metadata_dict_to_json(camera_metadata),
- "lidar_metadata": lidar_metadata_dict_to_json(lidar_metadata),
- }
- )
+ recording_schema = add_log_metadata_to_arrow_schema(recording_schema, log_metadata)
_write_recording_table(dataset, recording_schema, log_file_path, tf_record_path, data_converter_config)
- del recording_schema, vehicle_parameters, dataset
+ del recording_schema, dataset
except Exception as e:
import traceback
@@ -408,15 +405,9 @@ def _extract_detections(frame: dataset_pb2.Frame) -> Tuple[List[List[float]], Li
for detection_idx, detection in enumerate(frame.laser_labels):
if detection.type not in WOPD_DETECTION_NAME_DICT:
continue
-
- # 1. Quaternion rotations
- # NOTE: WOPD bounding boxes are (1) stored in ego frame and (2) only supply yaw rotation
- # The global pose can either consider ego roll and pitch or set them to zero.
- # (zero roll/pitch corresponds to setting it to the ego roll/pitch, before transformation to global frame)
- #
detection_quaternion = EulerAngles(
- roll=ego_rear_axle.roll if ZERO_ROLL_PITCH else DEFAULT_ROLL,
- pitch=ego_rear_axle.pitch if ZERO_ROLL_PITCH else DEFAULT_PITCH,
+ roll=DEFAULT_ROLL,
+ pitch=DEFAULT_PITCH,
yaw=detection.box.heading,
).quaternion
@@ -443,6 +434,12 @@ def _extract_detections(frame: dataset_pb2.Frame) -> Tuple[List[List[float]], Li
detections_state[:, BoundingBoxSE3Index.STATE_SE3] = convert_relative_to_absolute_se3_array(
origin=ego_rear_axle, se3_array=detections_state[:, BoundingBoxSE3Index.STATE_SE3]
)
+ if ZERO_ROLL_PITCH:
+ euler_array = get_euler_array_from_quaternion_array(detections_state[:, BoundingBoxSE3Index.QUATERNION])
+ euler_array[..., EulerAnglesIndex.ROLL] = DEFAULT_ROLL
+ euler_array[..., EulerAnglesIndex.PITCH] = DEFAULT_PITCH
+ detections_state[..., BoundingBoxSE3Index.QUATERNION] = get_quaternion_array_from_euler_array(euler_array)
+
return detections_state.tolist(), detections_velocity.tolist(), detections_token, detections_types
diff --git a/d123/datatypes/maps/gpkg/gpkg_map.py b/d123/datatypes/maps/gpkg/gpkg_map.py
index 429a611e..be15d482 100644
--- a/d123/datatypes/maps/gpkg/gpkg_map.py
+++ b/d123/datatypes/maps/gpkg/gpkg_map.py
@@ -5,7 +5,7 @@
from collections import defaultdict
from functools import lru_cache
from pathlib import Path
-from typing import Callable, Dict, Iterable, List, Optional, Tuple, Union
+from typing import Callable, Dict, Final, Iterable, List, Optional, Tuple, Union
import geopandas as gpd
import shapely
@@ -29,6 +29,7 @@
from d123.geometry import Point2D
USE_ARROW: bool = True
+MAX_LRU_CACHED_TABLES: Final[int] = 128 # TODO: add to some configs
class GPKGMap(AbstractMap):
@@ -370,8 +371,8 @@ def _get_road_line(self, id: str) -> Optional[GPKGRoadLine]:
# return list(map_object_ids)
-@lru_cache(maxsize=32)
-def get_map_api_from_names(dataset: str, location: str) -> GPKGMap:
+@lru_cache(maxsize=MAX_LRU_CACHED_TABLES)
+def get_global_map_api(dataset: str, location: str) -> GPKGMap:
D123_MAPS_ROOT = Path(os.environ.get("D123_MAPS_ROOT"))
gpkg_path = D123_MAPS_ROOT / f"{dataset}_{location}.gpkg"
assert gpkg_path.is_file(), f"{dataset}_{location}.gpkg not found in {str(D123_MAPS_ROOT)}."
@@ -381,7 +382,6 @@ def get_map_api_from_names(dataset: str, location: str) -> GPKGMap:
def get_local_map_api(split_name: str, log_name: str) -> GPKGMap:
- print(split_name, log_name)
D123_MAPS_ROOT = Path(os.environ.get("D123_MAPS_ROOT"))
gpkg_path = D123_MAPS_ROOT / split_name / f"{log_name}.gpkg"
assert gpkg_path.is_file(), f"{log_name}.gpkg not found in {str(D123_MAPS_ROOT)}."
diff --git a/d123/datatypes/scene/abstract_scene.py b/d123/datatypes/scene/abstract_scene.py
index 7f6ca5e3..c9fbc1af 100644
--- a/d123/datatypes/scene/abstract_scene.py
+++ b/d123/datatypes/scene/abstract_scene.py
@@ -5,53 +5,30 @@
from d123.datatypes.detections.detection import BoxDetectionWrapper, DetectionRecording, TrafficLightDetectionWrapper
from d123.datatypes.maps.abstract_map import AbstractMap
-from d123.datatypes.scene.scene_metadata import LogMetadata
+from d123.datatypes.scene.scene_metadata import LogMetadata, SceneExtractionMetadata
from d123.datatypes.sensors.camera.pinhole_camera import PinholeCamera, PinholeCameraType
from d123.datatypes.sensors.lidar.lidar import LiDAR, LiDARType
from d123.datatypes.time.time_point import TimePoint
from d123.datatypes.vehicle_state.ego_state import EgoStateSE3
-
-# TODO: Remove or improve open/close dynamic of Scene object.
+from d123.datatypes.vehicle_state.vehicle_parameters import VehicleParameters
class AbstractScene(abc.ABC):
- @property
- @abc.abstractmethod
- def log_name(self) -> str:
- raise NotImplementedError
-
- @property
- @abc.abstractmethod
- def token(self) -> str:
- raise NotImplementedError
-
- @property
- @abc.abstractmethod
- def log_metadata(self) -> LogMetadata:
- raise NotImplementedError
-
- @property
- @abc.abstractmethod
- def available_camera_types(self) -> List[PinholeCameraType]:
- raise NotImplementedError
-
- @property
- @abc.abstractmethod
- def available_lidar_types(self) -> List[LiDARType]:
- raise NotImplementedError
+ ####################################################################################################################
+ # Abstract Methods, to be implemented by subclasses
+ ####################################################################################################################
- @property
@abc.abstractmethod
- def map_api(self) -> Optional[AbstractMap]:
+ def get_log_metadata(self) -> LogMetadata:
raise NotImplementedError
@abc.abstractmethod
- def get_number_of_iterations(self) -> int:
+ def get_scene_extraction_metadata(self) -> SceneExtractionMetadata:
raise NotImplementedError
@abc.abstractmethod
- def get_number_of_history_iterations() -> int:
+ def get_map_api(self) -> Optional[AbstractMap]:
raise NotImplementedError
@abc.abstractmethod
@@ -86,8 +63,44 @@ def get_camera_at_iteration(self, iteration: int, camera_type: PinholeCameraType
def get_lidar_at_iteration(self, iteration: int, lidar_type: LiDARType) -> Optional[LiDAR]:
raise NotImplementedError
- def open(self) -> None:
- pass
+ ####################################################################################################################
+ # Syntactic Sugar / Properties, for easier access to common attributes
+ ####################################################################################################################
+
+ # 1. Log Metadata properties
+ @property
+ def log_metadata(self) -> LogMetadata:
+ return self.get_log_metadata()
+
+ @property
+ def log_name(self) -> str:
+ return self.log_metadata.log_name
+
+ @property
+ def vehicle_parameters(self) -> VehicleParameters:
+ return self.log_metadata.vehicle_parameters
+
+ @property
+ def available_camera_types(self) -> List[PinholeCameraType]:
+ return list(self.log_metadata.camera_metadata.keys())
+
+ @property
+ def available_lidar_types(self) -> List[LiDARType]:
+ return list(self.log_metadata.lidar_metadata.keys())
+
+ # 2. Scene Extraction Metadata properties
+ @property
+ def scene_extraction_metadata(self) -> SceneExtractionMetadata:
+ return self.get_scene_extraction_metadata()
+
+ @property
+ def token(self) -> str:
+ return self.scene_extraction_metadata.initial_token
+
+ @property
+ def number_of_iterations(self) -> int:
+ return self.scene_extraction_metadata.number_of_iterations
- def close(self) -> None:
- pass
+ @property
+ def number_of_history_iterations(self) -> int:
+ return self.scene_extraction_metadata.number_of_history_iterations
diff --git a/d123/datatypes/scene/arrow/__init__.py b/d123/datatypes/scene/arrow/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/d123/datatypes/scene/arrow/arrow_scene.py b/d123/datatypes/scene/arrow/arrow_scene.py
index 3fcc2150..8aa595db 100644
--- a/d123/datatypes/scene/arrow/arrow_scene.py
+++ b/d123/datatypes/scene/arrow/arrow_scene.py
@@ -1,15 +1,14 @@
-import json
from pathlib import Path
-from typing import Dict, List, Optional, Tuple, Union
+from typing import List, Optional, Union
import pyarrow as pa
-from d123.common.utils.arrow_helper import open_arrow_table
+from d123.common.utils.arrow_helper import get_lru_cached_arrow_table
from d123.datatypes.detections.detection import BoxDetectionWrapper, DetectionRecording, TrafficLightDetectionWrapper
from d123.datatypes.maps.abstract_map import AbstractMap
-from d123.datatypes.maps.gpkg.gpkg_map import get_local_map_api, get_map_api_from_names
+from d123.datatypes.maps.gpkg.gpkg_map import get_global_map_api, get_local_map_api
from d123.datatypes.scene.abstract_scene import AbstractScene
-from d123.datatypes.scene.arrow.utils.conversion import (
+from d123.datatypes.scene.arrow.utils.arrow_getters import (
get_box_detections_from_arrow_table,
get_camera_from_arrow_table,
get_ego_vehicle_state_from_arrow_table,
@@ -17,207 +16,132 @@
get_timepoint_from_arrow_table,
get_traffic_light_detections_from_arrow_table,
)
-from d123.datatypes.scene.scene_metadata import LogMetadata, SceneExtractionInfo
-from d123.datatypes.sensors.camera.pinhole_camera import (
- PinholeCamera,
- PinholeCameraMetadata,
- PinholeCameraType,
- camera_metadata_dict_from_json,
-)
-from d123.datatypes.sensors.lidar.lidar import LiDAR, LiDARMetadata, LiDARType, lidar_metadata_dict_from_json
+from d123.datatypes.scene.arrow.utils.arrow_metadata_utils import get_log_metadata_from_arrow
+from d123.datatypes.scene.scene_metadata import LogMetadata, SceneExtractionMetadata
+from d123.datatypes.sensors.camera.pinhole_camera import PinholeCamera, PinholeCameraType
+from d123.datatypes.sensors.lidar.lidar import LiDAR, LiDARType
from d123.datatypes.time.time_point import TimePoint
from d123.datatypes.vehicle_state.ego_state import EgoStateSE3
-from d123.datatypes.vehicle_state.vehicle_parameters import VehicleParameters
-
-# TODO: Remove or improve open/close dynamic of Scene object.
-
-
-def _get_scene_data(
- arrow_file_path: Union[Path, str],
-) -> Tuple[LogMetadata, VehicleParameters, Dict[PinholeCameraType, PinholeCameraMetadata]]:
- """
- Extracts the metadata and vehicle parameters from the arrow file.
- """
- # TODO: consider a better way to read metadata, instead of loading the entire table.
- table = open_arrow_table(arrow_file_path)
- metadata = LogMetadata(**json.loads(table.schema.metadata[b"log_metadata"].decode()))
- vehicle_parameters = VehicleParameters(**json.loads(table.schema.metadata[b"vehicle_parameters"].decode()))
-
- if b"camera_metadata" in table.schema.metadata:
- camera_metadata = camera_metadata_dict_from_json(table.schema.metadata[b"camera_metadata"].decode())
- else:
- camera_metadata = {}
-
- if b"lidar_metadata" in table.schema.metadata:
- lidar_metadata = lidar_metadata_dict_from_json(table.schema.metadata[b"lidar_metadata"].decode())
- else:
- lidar_metadata = {}
-
- del table
- return metadata, vehicle_parameters, camera_metadata, lidar_metadata
class ArrowScene(AbstractScene):
+
def __init__(
self,
arrow_file_path: Union[Path, str],
- scene_extraction_info: Optional[SceneExtractionInfo] = None,
+ scene_extraction_metadata: Optional[SceneExtractionMetadata] = None,
) -> None:
- self._recording_table: pa.Table = None
- (
- _metadata,
- _vehicle_parameters,
- _camera_metadata,
- _lidar_metadata,
- ) = _get_scene_data(arrow_file_path)
- self._metadata: LogMetadata = _metadata
- self._vehicle_parameters: VehicleParameters = _vehicle_parameters
- self._camera_metadata: Dict[PinholeCameraType, PinholeCameraMetadata] = _camera_metadata
- self._lidar_metadata: Dict[LiDARType, LiDARMetadata] = _lidar_metadata
+ self._arrow_file_path: Path = Path(arrow_file_path)
+ self._log_metadata: LogMetadata = get_log_metadata_from_arrow(arrow_file_path)
+
+ if scene_extraction_metadata is None:
+ scene_extraction_metadata = SceneExtractionMetadata(
+ initial_idx=0,
+ duration_s=self._log_metadata.timestep_seconds * len(self._arrow_file_path),
+ history_s=0.0,
+ iteration_duration_s=self._log_metadata.timestep_seconds,
+ )
+ self._scene_extraction_metadata: SceneExtractionMetadata = scene_extraction_metadata
- self._map_api: Optional[AbstractMap] = None
+ # NOTE: Lazy load a log-specific map API, and keep reference.
+ # Global maps are LRU cached internally.
+ self._local_map_api: Optional[AbstractMap] = None
- self._arrow_log_path = arrow_file_path
- self._scene_extraction_info: SceneExtractionInfo = scene_extraction_info
+ ####################################################################################################################
+ # Helpers for ArrowScene
+ ####################################################################################################################
def __reduce__(self):
+ """Helper for pickling the object."""
return (
self.__class__,
(
- self._arrow_log_path,
- self._scene_extraction_info,
+ self._arrow_file_path,
+ self._scene_extraction_metadata,
),
)
- @property
- def map_api(self) -> AbstractMap:
- self._lazy_initialize()
- return self._map_api
-
- @property
- def log_name(self) -> str:
- return str(self._arrow_log_path.stem)
-
- @property
- def token(self) -> str:
- self._lazy_initialize()
- return self._recording_table["token"][self._get_table_index(0)].as_py()
-
- @property
- def log_metadata(self) -> LogMetadata:
- return self._metadata
-
- @property
- def available_camera_types(self) -> List[PinholeCameraType]:
- return list(self._camera_metadata.keys())
-
- @property
- def available_lidar_types(self) -> List[LiDARType]:
- return list(self._lidar_metadata.keys())
+ def _get_recording_table(self) -> pa.Table:
+ """Helper function to return an LRU cached reference to the arrow table."""
+ return get_lru_cached_arrow_table(self._arrow_file_path)
def _get_table_index(self, iteration: int) -> int:
- self._lazy_initialize()
- assert (
- -self.get_number_of_history_iterations() <= iteration < self.get_number_of_iterations()
- ), "Iteration out of bounds"
- table_index = self._scene_extraction_info.initial_idx + iteration
+ assert -self.number_of_history_iterations <= iteration < self.number_of_iterations, "Iteration out of bounds"
+ table_index = self._scene_extraction_metadata.initial_idx + iteration
return table_index
- def get_number_of_iterations(self) -> int:
- self._lazy_initialize()
- return self._scene_extraction_info.number_of_iterations
+ ####################################################################################################################
+ # Implementation of AbstractScene
+ ####################################################################################################################
+
+ def get_log_metadata(self) -> LogMetadata:
+ return self._log_metadata
+
+ def get_scene_extraction_metadata(self) -> SceneExtractionMetadata:
+ return self._scene_extraction_metadata
- def get_number_of_history_iterations(self) -> int:
- self._lazy_initialize()
- return self._scene_extraction_info.number_of_history_iterations
+ def get_map_api(self) -> Optional[AbstractMap]:
+ map_api: Optional[AbstractMap] = None
+ if self.log_metadata.map_is_local:
+ if self._local_map_api is None:
+ map_api = get_local_map_api(self.log_metadata.split, self.log_name)
+ self._local_map_api = map_api
+ else:
+ map_api = self._local_map_api
+ else:
+ map_api = get_global_map_api(self.log_metadata.dataset, self.log_metadata.location)
+ return map_api
def get_timepoint_at_iteration(self, iteration: int) -> TimePoint:
- self._lazy_initialize()
- return get_timepoint_from_arrow_table(self._recording_table, self._get_table_index(iteration))
+ return get_timepoint_from_arrow_table(self._get_recording_table(), self._get_table_index(iteration))
def get_ego_state_at_iteration(self, iteration: int) -> Optional[EgoStateSE3]:
- self._lazy_initialize()
return get_ego_vehicle_state_from_arrow_table(
- self._recording_table, self._get_table_index(iteration), self._vehicle_parameters
+ self._get_recording_table(),
+ self._get_table_index(iteration),
+ self.log_metadata.vehicle_parameters,
)
def get_box_detections_at_iteration(self, iteration: int) -> Optional[BoxDetectionWrapper]:
- # TODO: Make box detections optional in ArrowScene
- self._lazy_initialize()
- return get_box_detections_from_arrow_table(self._recording_table, self._get_table_index(iteration))
+ return get_box_detections_from_arrow_table(self._get_recording_table(), self._get_table_index(iteration))
def get_traffic_light_detections_at_iteration(self, iteration: int) -> Optional[TrafficLightDetectionWrapper]:
- # TODO: Make traffic lights optional in ArrowScene
- self._lazy_initialize()
- return get_traffic_light_detections_from_arrow_table(self._recording_table, self._get_table_index(iteration))
+ return get_traffic_light_detections_from_arrow_table(
+ self._get_recording_table(), self._get_table_index(iteration)
+ )
def get_detection_recording_at_iteration(self, iteration: int) -> Optional[DetectionRecording]:
- # TODO: Make detection recording optional in ArrowScene
return DetectionRecording(
box_detections=self.get_box_detections_at_iteration(iteration),
traffic_light_detections=self.get_traffic_light_detections_at_iteration(iteration),
)
def get_route_lane_group_ids(self, iteration: int) -> Optional[List[int]]:
- self._lazy_initialize()
route_lane_group_ids: Optional[List[int]] = None
- if "route_lane_group_ids" in self._recording_table.column_names:
- table_index = self._get_table_index(iteration)
- route_lane_group_ids = self._recording_table["route_lane_group_ids"][table_index].as_py()
+ table = self._get_recording_table()
+ if "route_lane_group_ids" in table.column_names:
+ route_lane_group_ids = table["route_lane_group_ids"][self._get_table_index(iteration)].as_py()
return route_lane_group_ids
def get_camera_at_iteration(self, iteration: int, camera_type: PinholeCameraType) -> Optional[PinholeCamera]:
- self._lazy_initialize()
camera: Optional[PinholeCamera] = None
- if camera_type in self._camera_metadata:
- table_index = self._get_table_index(iteration)
+ if camera_type in self.available_camera_types:
camera = get_camera_from_arrow_table(
- self._recording_table,
- table_index,
- self._camera_metadata[camera_type],
+ self._get_recording_table(),
+ self._get_table_index(iteration),
+ camera_type,
self.log_metadata,
)
return camera
def get_lidar_at_iteration(self, iteration: int, lidar_type: LiDARType) -> Optional[LiDAR]:
- self._lazy_initialize()
lidar: Optional[LiDAR] = None
- if lidar_type in self._lidar_metadata:
- table_index = self._get_table_index(iteration)
+ if lidar_type in self.available_lidar_types:
lidar = get_lidar_from_arrow_table(
- self._recording_table,
- table_index,
- self._lidar_metadata[lidar_type],
+ self._get_recording_table(),
+ self._get_table_index(iteration),
+ lidar_type,
self.log_metadata,
)
return lidar
-
- def _lazy_initialize(self) -> None:
- self.open()
-
- def open(self) -> None:
- if self._map_api is None:
- try:
- if self._metadata.dataset in ["wopd", "av2-sensor"]:
- # FIXME:
- split = str(self._arrow_log_path.parent.name)
- self._map_api = get_local_map_api(split, self._metadata.log_name)
- else:
- self._map_api = get_map_api_from_names(self._metadata.dataset, self._metadata.location)
- self._map_api.initialize()
- except Exception as e:
- print(f"Error initializing map API: {e}")
- if self._recording_table is None:
- self._recording_table = open_arrow_table(self._arrow_log_path)
- if self._scene_extraction_info is None:
- self._scene_extraction_info = SceneExtractionInfo(
- initial_idx=0,
- duration_s=self._metadata.timestep_seconds * len(self._recording_table),
- history_s=0.0,
- iteration_duration_s=self._metadata.timestep_seconds,
- )
-
- def close(self) -> None:
- del self._recording_table
- self._recording_table = None
diff --git a/d123/datatypes/scene/arrow/arrow_scene_builder.py b/d123/datatypes/scene/arrow/arrow_scene_builder.py
index 94571715..bf48c96b 100644
--- a/d123/datatypes/scene/arrow/arrow_scene_builder.py
+++ b/d123/datatypes/scene/arrow/arrow_scene_builder.py
@@ -1,16 +1,16 @@
-import json
import random
from functools import partial
from pathlib import Path
from typing import Iterator, List, Optional, Set, Union
from d123.common.multithreading.worker_utils import WorkerPool, worker_map
-from d123.common.utils.arrow_helper import open_arrow_table
+from d123.common.utils.arrow_helper import get_lru_cached_arrow_table
from d123.datatypes.scene.abstract_scene import AbstractScene
from d123.datatypes.scene.abstract_scene_builder import SceneBuilder
from d123.datatypes.scene.arrow.arrow_scene import ArrowScene
+from d123.datatypes.scene.arrow.utils.arrow_metadata_utils import get_log_metadata_from_arrow
from d123.datatypes.scene.scene_filter import SceneFilter
-from d123.datatypes.scene.scene_metadata import LogMetadata, SceneExtractionInfo
+from d123.datatypes.scene.scene_metadata import SceneExtractionMetadata
class ArrowSceneBuilder(SceneBuilder):
@@ -71,29 +71,29 @@ def _extract_scenes_from_logs(log_paths: List[Path], filter: SceneFilter) -> Lis
scenes: List[AbstractScene] = []
for log_path in log_paths:
try:
- scene_extraction_infos = _get_scene_extraction_info(log_path, filter)
+ scene_extraction_metadatas = _get_scene_extraction_metadatas(log_path, filter)
except Exception as e:
print(f"Error extracting scenes from {log_path}: {e}")
continue
- for scene_extraction_info in scene_extraction_infos:
+ for scene_extraction_metadata in scene_extraction_metadatas:
scenes.append(
ArrowScene(
arrow_file_path=log_path,
- scene_extraction_info=scene_extraction_info,
+ scene_extraction_metadata=scene_extraction_metadata,
)
)
return scenes
-def _get_scene_extraction_info(log_path: Union[str, Path], filter: SceneFilter) -> List[SceneExtractionInfo]:
- scene_extraction_infos: List[SceneExtractionInfo] = []
+def _get_scene_extraction_metadatas(log_path: Union[str, Path], filter: SceneFilter) -> List[SceneExtractionMetadata]:
+ scene_extraction_metadatas: List[SceneExtractionMetadata] = []
- recording_table = open_arrow_table(log_path)
- log_metadata = LogMetadata(**json.loads(recording_table.schema.metadata[b"log_metadata"].decode()))
+ recording_table = get_lru_cached_arrow_table(log_path)
+ log_metadata = get_log_metadata_from_arrow(log_path)
# 1. Filter map name
if filter.map_names is not None and log_metadata.map_name not in filter.map_names:
- return scene_extraction_infos
+ return scene_extraction_metadatas
start_idx = int(filter.history_s / log_metadata.timestep_seconds) if filter.history_s is not None else 0
end_idx = (
@@ -103,7 +103,8 @@ def _get_scene_extraction_info(log_path: Union[str, Path], filter: SceneFilter)
)
if filter.duration_s is None:
return [
- SceneExtractionInfo(
+ SceneExtractionMetadata(
+ initial_token=str(recording_table["token"][start_idx].as_py()),
initial_idx=start_idx,
duration_s=(end_idx - start_idx) * log_metadata.timestep_seconds,
history_s=filter.history_s if filter.history_s is not None else 0.0,
@@ -114,27 +115,29 @@ def _get_scene_extraction_info(log_path: Union[str, Path], filter: SceneFilter)
scene_token_set = set(filter.scene_tokens) if filter.scene_tokens is not None else None
for idx in range(start_idx, end_idx):
- scene_extraction_info: Optional[SceneExtractionInfo] = None
+ scene_extraction_metadata: Optional[SceneExtractionMetadata] = None
if scene_token_set is None:
- scene_extraction_info = SceneExtractionInfo(
+ scene_extraction_metadata = SceneExtractionMetadata(
+ initial_token=str(recording_table["token"][idx].as_py()),
initial_idx=idx,
duration_s=filter.duration_s,
history_s=filter.history_s,
iteration_duration_s=log_metadata.timestep_seconds,
)
elif str(recording_table["token"][idx]) in scene_token_set:
- scene_extraction_info = SceneExtractionInfo(
+ scene_extraction_metadata = SceneExtractionMetadata(
+ initial_token=str(recording_table["token"][idx].as_py()),
initial_idx=idx,
duration_s=filter.duration_s,
history_s=filter.history_s,
iteration_duration_s=log_metadata.timestep_seconds,
)
- if scene_extraction_info is not None:
+ if scene_extraction_metadata is not None:
# Check of timestamp threshold exceeded between previous scene, if specified in filter
- if filter.timestamp_threshold_s is not None and len(scene_extraction_infos) > 0:
- iteration_delta = idx - scene_extraction_infos[-1].initial_idx
+ if filter.timestamp_threshold_s is not None and len(scene_extraction_metadatas) > 0:
+ iteration_delta = idx - scene_extraction_metadatas[-1].initial_idx
if (iteration_delta * log_metadata.timestep_seconds) < filter.timestamp_threshold_s:
continue
@@ -148,7 +151,7 @@ def _get_scene_extraction_info(log_path: Union[str, Path], filter: SceneFilter)
if not all(cameras_available):
continue
- scene_extraction_infos.append(scene_extraction_info)
+ scene_extraction_metadatas.append(scene_extraction_metadata)
del recording_table, log_metadata
- return scene_extraction_infos
+ return scene_extraction_metadatas
diff --git a/d123/datatypes/scene/arrow/utils/__init__.py b/d123/datatypes/scene/arrow/utils/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/d123/datatypes/scene/arrow/utils/conversion.py b/d123/datatypes/scene/arrow/utils/arrow_getters.py
similarity index 85%
rename from d123/datatypes/scene/arrow/utils/conversion.py
rename to d123/datatypes/scene/arrow/utils/arrow_getters.py
index 8d36e80b..ac86ca18 100644
--- a/d123/datatypes/scene/arrow/utils/conversion.py
+++ b/d123/datatypes/scene/arrow/utils/arrow_getters.py
@@ -20,18 +20,17 @@
)
from d123.datatypes.detections.detection_types import DetectionType
from d123.datatypes.scene.scene_metadata import LogMetadata
-from d123.datatypes.sensors.camera.pinhole_camera import PinholeCamera, PinholeCameraMetadata
-from d123.datatypes.sensors.lidar.lidar import LiDAR, LiDARMetadata
+from d123.datatypes.sensors.camera.pinhole_camera import PinholeCamera, PinholeCameraType
+from d123.datatypes.sensors.lidar.lidar import LiDAR, LiDARType
from d123.datatypes.time.time_point import TimePoint
from d123.datatypes.vehicle_state.ego_state import EgoStateSE3
from d123.datatypes.vehicle_state.vehicle_parameters import VehicleParameters
-from d123.geometry import BoundingBoxSE3, Vector3D
-from d123.geometry.se import StateSE3
+from d123.geometry import BoundingBoxSE3, StateSE3, Vector3D
DATASET_SENSOR_ROOT: Dict[str, Path] = {
"nuplan": Path(os.environ["NUPLAN_DATA_ROOT"]) / "nuplan-v1.1" / "sensor_blobs",
"carla": Path(os.environ["CARLA_DATA_ROOT"]) / "sensor_blobs",
- # "av2-sensor": Path(os.environ["AV2_SENSOR_DATA_ROOT"]) / "sensor_mini",
+ "av2-sensor": Path(os.environ["AV2_SENSOR_DATA_ROOT"]) / "sensor_mini",
}
@@ -94,13 +93,13 @@ def get_traffic_light_detections_from_arrow_table(arrow_table: pa.Table, index:
def get_camera_from_arrow_table(
arrow_table: pa.Table,
index: int,
- camera_metadata: PinholeCameraMetadata,
+ camera_type: PinholeCameraType,
log_metadata: LogMetadata,
) -> PinholeCamera:
- table_data = arrow_table[camera_metadata.camera_type.serialize()][index].as_py()
- extrinsic_list = arrow_table[f"{camera_metadata.camera_type.serialize()}_extrinsic"][index].as_py()
- extrinsic = StateSE3.from_list(extrinsic_list) if extrinsic_list is not None else None
+ table_data = arrow_table[camera_type.serialize()][index].as_py()
+ extrinsic_values = arrow_table[f"{camera_type.serialize()}_extrinsic"][index].as_py()
+ extrinsic = StateSE3.from_list(extrinsic_values) if extrinsic_values is not None else None
if table_data is None or extrinsic is None:
return None
@@ -120,7 +119,7 @@ def get_camera_from_arrow_table(
raise NotImplementedError("Only string file paths for camera data are supported.")
return PinholeCamera(
- metadata=camera_metadata,
+ metadata=log_metadata.camera_metadata[camera_type],
image=image,
extrinsic=extrinsic,
)
@@ -129,13 +128,15 @@ def get_camera_from_arrow_table(
def get_lidar_from_arrow_table(
arrow_table: pa.Table,
index: int,
- lidar_metadata: LiDARMetadata,
+ lidar_type: LiDARType,
log_metadata: LogMetadata,
) -> LiDAR:
assert (
- lidar_metadata.lidar_type.serialize() in arrow_table.schema.names
- ), f'"{lidar_metadata.lidar_type.serialize()}" field not found in Arrow table schema.'
- lidar_data = arrow_table[lidar_metadata.lidar_type.serialize()][index].as_py()
+ lidar_type.serialize() in arrow_table.schema.names
+ ), f'"{lidar_type.serialize()}" field not found in Arrow table schema.'
+ lidar_data = arrow_table[lidar_type.serialize()][index].as_py()
+ lidar_metadata = log_metadata.lidar_metadata[lidar_type]
+
if isinstance(lidar_data, str):
sensor_root = DATASET_SENSOR_ROOT[log_metadata.dataset]
full_lidar_path = sensor_root / lidar_data
@@ -143,7 +144,7 @@ def get_lidar_from_arrow_table(
# NOTE: We move data specific import into if-else block, to avoid data specific import errors
if log_metadata.dataset == "nuplan":
- from d123.datasets.nuplan.load_sensor import load_nuplan_lidar_from_path
+ from d123.datasets.nuplan.nuplan_load_sensor import load_nuplan_lidar_from_path
lidar = load_nuplan_lidar_from_path(full_lidar_path, lidar_metadata)
elif log_metadata.dataset == "carla":
diff --git a/d123/datatypes/scene/arrow/utils/arrow_metadata_utils.py b/d123/datatypes/scene/arrow/utils/arrow_metadata_utils.py
new file mode 100644
index 00000000..e392d337
--- /dev/null
+++ b/d123/datatypes/scene/arrow/utils/arrow_metadata_utils.py
@@ -0,0 +1,19 @@
+import json
+from pathlib import Path
+from typing import Union
+
+import pyarrow as pa
+
+from d123.common.utils.arrow_helper import get_lru_cached_arrow_table
+from d123.datatypes.scene.scene_metadata import LogMetadata
+
+
+def get_log_metadata_from_arrow(arrow_file_path: Union[Path, str]) -> LogMetadata:
+ table = get_lru_cached_arrow_table(arrow_file_path)
+ log_metadata = LogMetadata.from_dict(json.loads(table.schema.metadata[b"log_metadata"].decode()))
+ return log_metadata
+
+
+def add_log_metadata_to_arrow_schema(schema: pa.Schema, log_metadata: LogMetadata) -> pa.Schema:
+ schema = schema.with_metadata({"log_metadata": json.dumps(log_metadata.to_dict())})
+ return schema
diff --git a/d123/datatypes/scene/scene_metadata.py b/d123/datatypes/scene/scene_metadata.py
index 0fc12b03..ae40aa4d 100644
--- a/d123/datatypes/scene/scene_metadata.py
+++ b/d123/datatypes/scene/scene_metadata.py
@@ -1,27 +1,57 @@
-from dataclasses import dataclass
+from __future__ import annotations
+
+from dataclasses import asdict, dataclass
+from typing import Dict
import d123
+from d123.datatypes.sensors.camera.pinhole_camera import PinholeCameraMetadata, PinholeCameraType
+from d123.datatypes.sensors.lidar.lidar import LiDARMetadata, LiDARType
+from d123.datatypes.vehicle_state.vehicle_parameters import VehicleParameters
-@dataclass
+@dataclass(frozen=True)
class LogMetadata:
- # TODO: add
- # - split
- # - global/local map
-
dataset: str
+ split: str
log_name: str
location: str
timestep_seconds: float
+ vehicle_parameters: VehicleParameters
+ camera_metadata: Dict[PinholeCameraType, PinholeCameraMetadata]
+ lidar_metadata: Dict[LiDARType, LiDARMetadata]
+
map_has_z: bool
+ map_is_local: bool
version: str = str(d123.__version__)
+ @classmethod
+ def from_dict(cls, data_dict: Dict) -> LogMetadata:
+
+ data_dict["vehicle_parameters"] = VehicleParameters(**data_dict["vehicle_parameters"])
+ data_dict["camera_metadata"] = {
+ PinholeCameraType.deserialize(key): PinholeCameraMetadata.from_dict(value)
+ for key, value in data_dict.get("camera_metadata", {}).items()
+ }
+ data_dict["lidar_metadata"] = {
+ LiDARType.deserialize(key): LiDARMetadata.from_dict(value)
+ for key, value in data_dict.get("lidar_metadata", {}).items()
+ }
+ return LogMetadata(**data_dict)
+
+ def to_dict(self) -> Dict:
+ data_dict = asdict(self)
+ data_dict["vehicle_parameters"] = self.vehicle_parameters.to_dict()
+ data_dict["camera_metadata"] = {key.serialize(): value.to_dict() for key, value in self.camera_metadata.items()}
+ data_dict["lidar_metadata"] = {key.serialize(): value.to_dict() for key, value in self.lidar_metadata.items()}
+ return data_dict
+
@dataclass(frozen=True)
-class SceneExtractionInfo:
+class SceneExtractionMetadata:
+ initial_token: str
initial_idx: int
duration_s: float
history_s: float
diff --git a/d123/datatypes/sensors/camera/pinhole_camera.py b/d123/datatypes/sensors/camera/pinhole_camera.py
index 8f7f8d44..33249d17 100644
--- a/d123/datatypes/sensors/camera/pinhole_camera.py
+++ b/d123/datatypes/sensors/camera/pinhole_camera.py
@@ -1,7 +1,6 @@
from __future__ import annotations
-import json
-from dataclasses import dataclass
+from dataclasses import asdict, dataclass
from typing import Any, Dict, Optional
import numpy as np
@@ -30,6 +29,14 @@ class PinholeCameraType(SerialIntEnum):
CAM_STEREO_R = 9
+@dataclass
+class PinholeCamera:
+
+ metadata: PinholeCameraMetadata
+ image: npt.NDArray[np.uint8]
+ extrinsic: StateSE3
+
+
class PinholeIntrinsicsIndex(IntEnum):
FX = 0
@@ -180,27 +187,22 @@ class PinholeCameraMetadata:
height: int
@classmethod
- def from_dict(cls, json_dict: Dict[str, Any]) -> PinholeCameraMetadata:
- return cls(
- camera_type=PinholeCameraType(json_dict["camera_type"]),
- intrinsics=(
- PinholeIntrinsics.from_list(json_dict["intrinsics"]) if json_dict["intrinsics"] is not None else None
- ),
- distortion=(
- PinholeDistortion.from_list(json_dict["distortion"]) if json_dict["distortion"] is not None else None
- ),
- width=int(json_dict["width"]),
- height=int(json_dict["height"]),
+ def from_dict(cls, data_dict: Dict[str, Any]) -> PinholeCameraMetadata:
+ data_dict["camera_type"] = PinholeCameraType(data_dict["camera_type"])
+ data_dict["intrinsics"] = (
+ PinholeIntrinsics.from_list(data_dict["intrinsics"]) if data_dict["intrinsics"] is not None else None
+ )
+ data_dict["distortion"] = (
+ PinholeDistortion.from_list(data_dict["distortion"]) if data_dict["distortion"] is not None else None
)
+ return PinholeCameraMetadata(**data_dict)
def to_dict(self) -> Dict[str, Any]:
- return {
- "camera_type": int(self.camera_type),
- "intrinsics": self.intrinsics.tolist() if self.intrinsics is not None else None,
- "distortion": self.distortion.tolist() if self.distortion is not None else None,
- "width": self.width,
- "height": self.height,
- }
+ data_dict = asdict(self)
+ data_dict["camera_type"] = int(self.camera_type)
+ data_dict["intrinsics"] = self.intrinsics.tolist() if self.intrinsics is not None else None
+ data_dict["distortion"] = self.distortion.tolist() if self.distortion is not None else None
+ return data_dict
@property
def aspect_ratio(self) -> float:
@@ -221,40 +223,3 @@ def fov_y(self) -> float:
"""
fov_y_rad = 2 * np.arctan(self.height / (2 * self.intrinsics.fy))
return fov_y_rad
-
-
-def camera_metadata_dict_to_json(
- camera_metadata: Dict[PinholeCameraType, PinholeCameraMetadata],
-) -> Dict[str, Dict[str, Any]]:
- """
- Converts a dictionary of CameraMetadata to a JSON-serializable format.
- :param camera_metadata: Dictionary of CameraMetadata.
- :return: JSON-serializable dictionary.
- """
- camera_metadata_dict = {
- camera_type.serialize(): metadata.to_dict() for camera_type, metadata in camera_metadata.items()
- }
- return json.dumps(camera_metadata_dict)
-
-
-def camera_metadata_dict_from_json(
- json_dict: Dict[str, Dict[str, Any]],
-) -> Dict[PinholeCameraType, PinholeCameraMetadata]:
- """
- Converts a JSON-serializable dictionary back to a dictionary of CameraMetadata.
- :param json_dict: JSON-serializable dictionary.
- :return: Dictionary of CameraMetadata.
- """
- camera_metadata_dict = json.loads(json_dict)
- return {
- PinholeCameraType.deserialize(camera_type): PinholeCameraMetadata.from_dict(metadata)
- for camera_type, metadata in camera_metadata_dict.items()
- }
-
-
-@dataclass
-class PinholeCamera:
-
- metadata: PinholeCameraMetadata
- image: npt.NDArray[np.uint8]
- extrinsic: StateSE3
diff --git a/d123/datatypes/sensors/lidar/lidar.py b/d123/datatypes/sensors/lidar/lidar.py
index 00356ee4..d32d73fd 100644
--- a/d123/datatypes/sensors/lidar/lidar.py
+++ b/d123/datatypes/sensors/lidar/lidar.py
@@ -1,8 +1,7 @@
from __future__ import annotations
-import json
from dataclasses import dataclass
-from typing import Dict, Optional, Type
+from typing import Optional, Type
import numpy as np
import numpy.typing as npt
@@ -39,40 +38,15 @@ def to_dict(self) -> dict:
}
@classmethod
- def from_dict(cls, json_dict: dict) -> LiDARMetadata:
- lidar_type = LiDARType[json_dict["lidar_type"]]
- if json_dict["lidar_index"] not in LIDAR_INDEX_REGISTRY:
- raise ValueError(f"Unknown lidar index: {json_dict['lidar_index']}")
- lidar_index_class = LIDAR_INDEX_REGISTRY[json_dict["lidar_index"]]
- extrinsic = StateSE3.from_list(json_dict["extrinsic"]) if json_dict["extrinsic"] is not None else None
+ def from_dict(cls, data_dict: dict) -> LiDARMetadata:
+ lidar_type = LiDARType[data_dict["lidar_type"]]
+ if data_dict["lidar_index"] not in LIDAR_INDEX_REGISTRY:
+ raise ValueError(f"Unknown lidar index: {data_dict['lidar_index']}")
+ lidar_index_class = LIDAR_INDEX_REGISTRY[data_dict["lidar_index"]]
+ extrinsic = StateSE3.from_list(data_dict["extrinsic"]) if data_dict["extrinsic"] is not None else None
return cls(lidar_type=lidar_type, lidar_index=lidar_index_class, extrinsic=extrinsic)
-def lidar_metadata_dict_to_json(lidar_metadata: Dict[LiDARType, LiDARMetadata]) -> str:
- """
- Converts a dictionary of LiDARMetadata to a JSON-serializable format.
- :param lidar_metadata: Dictionary of LiDARMetadata.
- :return: JSON string.
- """
- lidar_metadata_dict = {
- lidar_type.serialize(): metadata.to_dict() for lidar_type, metadata in lidar_metadata.items()
- }
- return json.dumps(lidar_metadata_dict)
-
-
-def lidar_metadata_dict_from_json(json_str: str) -> Dict[LiDARType, LiDARMetadata]:
- """
- Converts a JSON string back to a dictionary of LiDARMetadata.
- :param json_str: JSON string.
- :return: Dictionary of LiDARMetadata.
- """
- lidar_metadata_dict = json.loads(json_str)
- return {
- LiDARType.deserialize(lidar_type): LiDARMetadata.from_dict(metadata)
- for lidar_type, metadata in lidar_metadata_dict.items()
- }
-
-
@dataclass
class LiDAR:
diff --git a/d123/datatypes/vehicle_state/vehicle_parameters.py b/d123/datatypes/vehicle_state/vehicle_parameters.py
index 0d7f3d01..5c8a57de 100644
--- a/d123/datatypes/vehicle_state/vehicle_parameters.py
+++ b/d123/datatypes/vehicle_state/vehicle_parameters.py
@@ -1,4 +1,6 @@
-from dataclasses import dataclass
+from __future__ import annotations
+
+from dataclasses import asdict, dataclass
from d123.geometry import StateSE2, StateSE3, Vector2D, Vector3D
from d123.geometry.transform.transform_se2 import translate_se2_along_body_frame
@@ -18,6 +20,13 @@ class VehicleParameters:
rear_axle_to_center_vertical: float
rear_axle_to_center_longitudinal: float
+ @classmethod
+ def from_dict(cls, data_dict: dict) -> VehicleParameters:
+ return VehicleParameters(**data_dict)
+
+ def to_dict(self) -> dict:
+ return asdict(self)
+
def get_nuplan_chrysler_pacifica_parameters() -> VehicleParameters:
# NOTE: use parameters from nuPlan dataset
diff --git a/d123/geometry/utils/rotation_utils.py b/d123/geometry/utils/rotation_utils.py
index 499b98bc..b8a8ddca 100644
--- a/d123/geometry/utils/rotation_utils.py
+++ b/d123/geometry/utils/rotation_utils.py
@@ -147,6 +147,52 @@ def get_quaternion_array_from_rotation_matrix(rotation_matrix: npt.NDArray[np.fl
return get_quaternion_array_from_rotation_matrices(rotation_matrix[None, ...])[0]
+def get_quaternion_array_from_euler_array(euler_angles: npt.NDArray[np.float64]) -> npt.NDArray[np.float64]:
+ assert euler_angles.ndim >= 1 and euler_angles.shape[-1] == len(EulerAnglesIndex)
+
+ # Store original shape for reshaping later
+ original_shape = euler_angles.shape[:-1]
+
+ # Flatten to 2D if needed
+ if euler_angles.ndim > 2:
+ euler_angles_ = euler_angles.reshape(-1, len(EulerAnglesIndex))
+ else:
+ euler_angles_ = euler_angles
+
+ # Extract roll, pitch, yaw
+ roll = euler_angles_[..., EulerAnglesIndex.ROLL]
+ pitch = euler_angles_[..., EulerAnglesIndex.PITCH]
+ yaw = euler_angles_[..., EulerAnglesIndex.YAW]
+
+ # Half angles
+ roll_half = roll / 2.0
+ pitch_half = pitch / 2.0
+ yaw_half = yaw / 2.0
+
+ # Compute sin/cos for half angles
+ cos_roll_half = np.cos(roll_half)
+ sin_roll_half = np.sin(roll_half)
+ cos_pitch_half = np.cos(pitch_half)
+ sin_pitch_half = np.sin(pitch_half)
+ cos_yaw_half = np.cos(yaw_half)
+ sin_yaw_half = np.sin(yaw_half)
+
+ # Compute quaternion components (ZYX intrinsic rotation order)
+ qw = cos_roll_half * cos_pitch_half * cos_yaw_half + sin_roll_half * sin_pitch_half * sin_yaw_half
+ qx = sin_roll_half * cos_pitch_half * cos_yaw_half - cos_roll_half * sin_pitch_half * sin_yaw_half
+ qy = cos_roll_half * sin_pitch_half * cos_yaw_half + sin_roll_half * cos_pitch_half * sin_yaw_half
+ qz = cos_roll_half * cos_pitch_half * sin_yaw_half - sin_roll_half * sin_pitch_half * cos_yaw_half
+
+ # Stack into quaternion array
+ quaternions = np.stack([qw, qx, qy, qz], axis=-1)
+
+ # Reshape back to original batch dimensions + (4,)
+ if len(original_shape) > 1:
+ quaternions = quaternions.reshape(original_shape + (len(QuaternionIndex),))
+
+ return normalize_quaternion_array(quaternions)
+
+
def get_rotation_matrix_from_euler_array(euler_angles: npt.NDArray[np.float64]) -> npt.NDArray[np.float64]:
assert euler_angles.ndim == 1 and euler_angles.shape[0] == len(EulerAnglesIndex)
return get_rotation_matrices_from_euler_array(euler_angles[None, ...])[0]
diff --git a/d123/script/config/dataset_conversion/default_dataset_conversion.yaml b/d123/script/config/dataset_conversion/default_dataset_conversion.yaml
index b126ba58..22743010 100644
--- a/d123/script/config/dataset_conversion/default_dataset_conversion.yaml
+++ b/d123/script/config/dataset_conversion/default_dataset_conversion.yaml
@@ -18,5 +18,5 @@ defaults:
# - av2_sensor_dataset
- _self_
-force_map_conversion: False
+force_map_conversion: True
force_log_conversion: True
diff --git a/d123/simulation/agents/idm_agents.py b/d123/simulation/agents/idm_agents.py
index 648a8145..5227c10d 100644
--- a/d123/simulation/agents/idm_agents.py
+++ b/d123/simulation/agents/idm_agents.py
@@ -9,7 +9,7 @@
from d123.common.datatypes.detection.detection import BoxDetection, BoxDetectionSE2
from d123.datasets.maps.abstract_map import AbstractMap
from d123.datasets.scene.abstract_scene import AbstractScene
-from d123.datatypes.scene.arrow.utils.conversion import BoxDetectionWrapper
+from d123.datatypes.scene.arrow.utils.arrow_getters import BoxDetectionWrapper
from d123.geometry.bounding_box import BoundingBoxSE2
from d123.geometry.point import Point2D
from d123.geometry.polyline import PolylineSE2
@@ -68,7 +68,7 @@ def reset(
self._initial_target_agents = [copy.deepcopy(agent) for agent in target_agents]
future_box_detections = [
- scene.get_box_detections_at_iteration(iteration) for iteration in range(0, scene.get_number_of_iterations())
+ scene.get_box_detections_at_iteration(iteration) for iteration in range(0, scene.number_of_iterations)
]
# TODO: refactor or move for general use
diff --git a/d123/simulation/agents/path_following.py b/d123/simulation/agents/path_following.py
index 347f7f7e..661d4178 100644
--- a/d123/simulation/agents/path_following.py
+++ b/d123/simulation/agents/path_following.py
@@ -47,7 +47,7 @@ def reset(
self._initial_target_agents = [copy.deepcopy(agent) for agent in target_agents]
future_box_detections = [
- scene.get_box_detections_at_iteration(iteration) for iteration in range(0, scene.get_number_of_iterations())
+ scene.get_box_detections_at_iteration(iteration) for iteration in range(0, scene.number_of_iterations)
]
# TODO: refactor or move for general use
diff --git a/d123/simulation/agents/smart_agents.py b/d123/simulation/agents/smart_agents.py
index 4ec342b3..778b144a 100644
--- a/d123/simulation/agents/smart_agents.py
+++ b/d123/simulation/agents/smart_agents.py
@@ -8,7 +8,7 @@
from d123.common.datatypes.detection.detection import BoxDetection, BoxDetectionSE2
from d123.datasets.maps.abstract_map import AbstractMap
from d123.datasets.scene.abstract_scene import AbstractScene
-from d123.datatypes.scene.arrow.utils.conversion import BoxDetectionWrapper, DetectionType
+from d123.datatypes.scene.arrow.utils.arrow_getters import BoxDetectionWrapper, DetectionType
from d123.geometry.bounding_box import BoundingBoxSE2
from d123.geometry.se import StateSE2
from d123.geometry.transform.transform_se2 import convert_relative_to_absolute_point_2d_array
diff --git a/d123/simulation/gym/demo_gym_env.py b/d123/simulation/gym/demo_gym_env.py
index 88eba93f..030c0f69 100644
--- a/d123/simulation/gym/demo_gym_env.py
+++ b/d123/simulation/gym/demo_gym_env.py
@@ -11,7 +11,7 @@
from d123.common.datatypes.recording.detection_recording import DetectionRecording
from d123.datasets.maps.abstract_map import AbstractMap
from d123.datasets.scene.abstract_scene import AbstractScene
-from d123.datatypes.scene.arrow.utils.conversion import EgoStateSE3
+from d123.datatypes.scene.arrow.utils.arrow_getters import EgoStateSE3
from d123.simulation.observation.abstract_observation import AbstractObservation
from d123.simulation.observation.agents_observation import AgentsObservation
@@ -59,7 +59,12 @@ def reset(self, scene: Optional[AbstractScene]) -> Tuple[AbstractMap, EgoState,
# )
detection_observation = self._observation.reset(self._current_scene)
- return self._current_scene.map_api, self._current_ego_vehicle_state, detection_observation, self._current_scene
+ return (
+ self._current_scene.get_map_api(),
+ self._current_ego_vehicle_state,
+ detection_observation,
+ self._current_scene,
+ )
def step(self, action: npt.NDArray[np.float64]) -> Tuple[EgoState, DetectionRecording, bool]:
self._current_scene_index += 1
@@ -73,7 +78,7 @@ def step(self, action: npt.NDArray[np.float64]) -> Tuple[EgoState, DetectionReco
)
detection_observation = self._observation.step()
- is_done = self._current_scene_index == self._current_scene.get_number_of_iterations() - 1
+ is_done = self._current_scene_index == self._current_scene.number_of_iterations - 1
return self._current_ego_vehicle_state, detection_observation, is_done
diff --git a/d123/simulation/gym/environment/reward_builder/default_reward_builder.py b/d123/simulation/gym/environment/reward_builder/default_reward_builder.py
index 940b1f47..ac18ffaa 100644
--- a/d123/simulation/gym/environment/reward_builder/default_reward_builder.py
+++ b/d123/simulation/gym/environment/reward_builder/default_reward_builder.py
@@ -349,7 +349,7 @@ def _add_value_measurements(self, simulation_wrapper: SimulationWrapper, info: D
assert len(self._reward_history) > 0
current_iteration = simulation_wrapper.current_planner_input.iteration.index
- num_simulation_iterations = simulation_wrapper.scenario.get_number_of_iterations()
+ num_simulation_iterations = simulation_wrapper.scenario.number_of_iterations
remaining_time = 1 - (current_iteration / num_simulation_iterations)
remaining_progress = 1 - simulation_wrapper.route_completion
diff --git a/d123/simulation/gym/gym_env.py b/d123/simulation/gym/gym_env.py
index b4953dc3..cf9621c4 100644
--- a/d123/simulation/gym/gym_env.py
+++ b/d123/simulation/gym/gym_env.py
@@ -48,7 +48,12 @@ def reset(self, scene: Optional[AbstractScene]) -> Tuple[AbstractMap, EgoStateSE
).ego_state_se2
detection_observation = self._observation.reset(self._current_scene)
- return self._current_scene.map_api, self._current_ego_state_se2, detection_observation, self._current_scene
+ return (
+ self._current_scene.get_map_api(),
+ self._current_ego_state_se2,
+ detection_observation,
+ self._current_scene,
+ )
def step(self, action: npt.NDArray[np.float64]) -> Tuple[EgoStateSE2, DetectionRecording, bool]:
self._current_scene_index += 1
@@ -64,7 +69,7 @@ def step(self, action: npt.NDArray[np.float64]) -> Tuple[EgoStateSE2, DetectionR
)
detection_observation = self._observation.step()
- is_done = self._current_scene_index == self._current_scene.get_number_of_iterations() - 1
+ is_done = self._current_scene_index == self._current_scene.number_of_iterations - 1
return self._current_ego_state_se2, detection_observation, is_done
diff --git a/d123/simulation/metrics/sim_agents/interaction_based.py b/d123/simulation/metrics/sim_agents/interaction_based.py
index 992c837b..3f9fca21 100644
--- a/d123/simulation/metrics/sim_agents/interaction_based.py
+++ b/d123/simulation/metrics/sim_agents/interaction_based.py
@@ -3,7 +3,7 @@
import numpy as np
import numpy.typing as npt
-from d123.datatypes.scene.arrow.utils.conversion import BoxDetectionWrapper
+from d123.datatypes.scene.arrow.utils.arrow_getters import BoxDetectionWrapper
from d123.geometry.geometry_index import BoundingBoxSE2Index
from d123.geometry.utils.bounding_box_utils import bbse2_array_to_polygon_array
diff --git a/d123/simulation/metrics/sim_agents/sim_agents.py b/d123/simulation/metrics/sim_agents/sim_agents.py
index 225e6a80..5c9f3f63 100644
--- a/d123/simulation/metrics/sim_agents/sim_agents.py
+++ b/d123/simulation/metrics/sim_agents/sim_agents.py
@@ -54,7 +54,7 @@ def get_agent_tokens(agent_rollout: List[BoxDetection]) -> List[str]:
# TODO: Add ego vehicle state to the metrics
log_rollouts: List[BoxDetectionWrapper] = []
- for iteration in range(scene.get_number_of_iterations()):
+ for iteration in range(scene.number_of_iterations):
background_detections = scene.get_box_detections_at_iteration(iteration).box_detections
ego_detection = scene.get_ego_state_at_iteration(iteration).box_detection
log_rollouts.append(BoxDetectionWrapper(background_detections + [ego_detection]))
@@ -63,8 +63,8 @@ def get_agent_tokens(agent_rollout: List[BoxDetection]) -> List[str]:
log_agents_array, log_agents_mask = _get_log_agents_array(scene, initial_agent_tokens)
agents_array, agents_mask = _get_rollout_agents_array(agent_rollouts, initial_agent_tokens)
- log_agents_data = _extract_sim_agent_data(log_agents_array, log_agents_mask, log_rollouts, scene.map_api)
- agents_data = _extract_sim_agent_data(agents_array, agents_mask, agent_rollouts, scene.map_api)
+ log_agents_data = _extract_sim_agent_data(log_agents_array, log_agents_mask, log_rollouts, scene.get_map_api())
+ agents_data = _extract_sim_agent_data(agents_array, agents_mask, agent_rollouts, scene.get_map_api())
results: Dict[str, float] = {}
diff --git a/d123/simulation/metrics/sim_agents/utils.py b/d123/simulation/metrics/sim_agents/utils.py
index 9151ffd6..a08af428 100644
--- a/d123/simulation/metrics/sim_agents/utils.py
+++ b/d123/simulation/metrics/sim_agents/utils.py
@@ -13,15 +13,15 @@ def _get_log_agents_array(
) -> Tuple[npt.NDArray[np.float64], npt.NDArray[np.float64]]:
log_agents_array = np.zeros(
- (len(agent_tokens), scene.get_number_of_iterations(), len(BoundingBoxSE2Index)),
+ (len(agent_tokens), scene.number_of_iterations, len(BoundingBoxSE2Index)),
dtype=np.float64,
)
log_agents_mask = np.zeros(
- (len(agent_tokens), scene.get_number_of_iterations()),
+ (len(agent_tokens), scene.number_of_iterations),
dtype=bool,
)
- for iteration in range(scene.get_number_of_iterations()):
+ for iteration in range(scene.number_of_iterations):
box_detections = scene.get_box_detections_at_iteration(iteration)
for agent_idx, agent_token in enumerate(agent_tokens):
box_detection = box_detections.get_detection_by_track_token(agent_token)
diff --git a/d123/simulation/observation/agents_observation.py b/d123/simulation/observation/agents_observation.py
index be31ce5f..d42d2aeb 100644
--- a/d123/simulation/observation/agents_observation.py
+++ b/d123/simulation/observation/agents_observation.py
@@ -6,7 +6,7 @@
from d123.common.datatypes.recording.detection_recording import DetectionRecording
from d123.common.datatypes.vehicle_state.ego_state import EgoStateSE2
from d123.datasets.scene.abstract_scene import AbstractScene
-from d123.datatypes.scene.arrow.utils.conversion import BoxDetectionWrapper
+from d123.datatypes.scene.arrow.utils.arrow_getters import BoxDetectionWrapper
from d123.simulation.agents.abstract_agents import AbstractAgents
# from d123.simulation.agents.path_following import PathFollowingAgents
@@ -43,7 +43,7 @@ def reset(self, scene: Optional[AbstractScene]) -> DetectionRecording:
detection_types=[DetectionType.VEHICLE],
)
cars = self._agents.reset(
- map_api=self._scene.map_api,
+ map_api=self._scene.get_map_api(),
target_agents=cars,
non_target_agents=non_cars,
scene=self._scene if self._agents.requires_scene else None,
diff --git a/d123/simulation/simulation_2d.py b/d123/simulation/simulation_2d.py
index 8a4a517a..ab0a81c8 100644
--- a/d123/simulation/simulation_2d.py
+++ b/d123/simulation/simulation_2d.py
@@ -82,7 +82,7 @@ def reset(self, scene: AbstractScene) -> Tuple[PlannerInitialization, PlannerInp
# 5. Fill planner input and initialization
planner_initialization = PlannerInitialization(
route_lane_group_ids=self._scene.get_route_lane_group_ids(simulation_iteration.index),
- map_api=self._scene.map_api,
+ map_api=self._scene.get_map_api(),
)
planner_input = PlannerInput(
iteration=simulation_iteration,
diff --git a/d123/simulation/time_controller/log_time_controller.py b/d123/simulation/time_controller/log_time_controller.py
index 363e43e7..cb853418 100644
--- a/d123/simulation/time_controller/log_time_controller.py
+++ b/d123/simulation/time_controller/log_time_controller.py
@@ -41,4 +41,4 @@ def reached_end(self) -> bool:
def number_of_iterations(self) -> int:
"""Inherited, see superclass."""
- return self._scene.get_number_of_iterations()
+ return self._scene.number_of_iterations
diff --git a/d123/training/feature_builder/smart_feature_builder.py b/d123/training/feature_builder/smart_feature_builder.py
index 439a4830..f4e7e10b 100644
--- a/d123/training/feature_builder/smart_feature_builder.py
+++ b/d123/training/feature_builder/smart_feature_builder.py
@@ -69,7 +69,7 @@ def _build_map_features(scene: AbstractScene, origin: StateSE2) -> Dict[str, np.
# create map extent polygon
map_bounding_box = BoundingBoxSE2(origin, height, width)
- map_api = scene.map_api
+ map_api = scene.get_map_api()
map_objects = map_api.query(
map_bounding_box.shapely_polygon,
layers=[
@@ -194,7 +194,7 @@ def _build_map_features(scene: AbstractScene, origin: StateSE2) -> Dict[str, np.
def _build_agent_features(scene: AbstractScene, origin: StateSE2) -> None:
iteration_indices = np.arange(
-scene.get_number_of_history_iterations(),
- scene.get_number_of_iterations(),
+ scene.number_of_iterations,
)
# print(iteration_indices[scene.get_number_of_history_iterations()])
num_steps = len(iteration_indices)
diff --git a/notebooks/gym/test_simulation_2d.ipynb b/notebooks/gym/test_simulation_2d.ipynb
index 60c22ceb..d6eadc86 100644
--- a/notebooks/gym/test_simulation_2d.ipynb
+++ b/notebooks/gym/test_simulation_2d.ipynb
@@ -294,7 +294,7 @@
") -> plt.Axes:\n",
"\n",
" sample = simulation_history.data[iteration]\n",
- " map_api = simulation_history.scene.map_api\n",
+ " map_api = simulation_history.scene.get_map_api()\n",
"\n",
" ego_state = sample.ego_state\n",
" # planner_output = sample.planner_output\n",
diff --git a/notebooks/scene_rendering.ipynb b/notebooks/scene_rendering.ipynb
index 25018fda..77735e19 100644
--- a/notebooks/scene_rendering.ipynb
+++ b/notebooks/scene_rendering.ipynb
@@ -82,7 +82,7 @@
" ego_vehicle_state = scene.get_ego_state_at_iteration(iteration)\n",
" box_detections = scene.get_box_detections_at_iteration(iteration)\n",
" traffic_light_detections = scene.get_traffic_light_detections_at_iteration(iteration)\n",
- " map_api = scene.map_api\n",
+ " map_api = scene.get_map_api()\n",
"\n",
" point_2d = ego_vehicle_state.bounding_box.center.state_se2.point_2d\n",
" add_default_map_on_ax(ax, map_api, point_2d, radius=radius, route_lane_group_ids=None)\n",
diff --git a/notebooks/smarty/smart_rollout.ipynb b/notebooks/smarty/smart_rollout.ipynb
index f8f71593..d437fcfb 100644
--- a/notebooks/smarty/smart_rollout.ipynb
+++ b/notebooks/smarty/smart_rollout.ipynb
@@ -42,7 +42,7 @@
"scenes = scene_builder.get_scenes(scene_filter, worker)\n",
"scene: ArrowScene = scenes[100]\n",
"plot_scene_at_iteration(scene, iteration=0)\n",
- "print(scene.get_number_of_iterations(), scene.get_number_of_history_iterations())"
+ "print(scene.number_of_iterations, scene.get_number_of_history_iterations())"
]
},
{
diff --git a/notebooks/viz/bev_matplotlib.ipynb b/notebooks/viz/bev_matplotlib.ipynb
index 3f2e9f12..e4a8d52b 100644
--- a/notebooks/viz/bev_matplotlib.ipynb
+++ b/notebooks/viz/bev_matplotlib.ipynb
@@ -11,7 +11,7 @@
"from d123.datatypes.scene.scene_filter import SceneFilter\n",
"\n",
"from d123.common.multithreading.worker_sequential import Sequential\n",
- "from d123.datatypes.sensors.camera import CameraType"
+ "from d123.datatypes.sensors.camera.pinhole_camera import PinholeCameraType "
]
},
{
@@ -24,7 +24,7 @@
"from d123.geometry import Point2D\n",
"import numpy as np\n",
"\n",
- "import torch\n",
+ "# import torch\n",
"\n",
"from d123.geometry.polyline import Polyline2D"
]
@@ -226,9 +226,9 @@
" box_detections = scene.get_box_detections_at_iteration(iteration)\n",
"\n",
" point_2d = ego_vehicle_state.bounding_box.center.state_se2.point_2d\n",
- " # add_debug_map_on_ax(ax, scene.map_api, point_2d, radius=radius, route_lane_group_ids=None)\n",
- " add_default_map_on_ax(ax, scene.map_api, point_2d, radius=radius, route_lane_group_ids=None)\n",
- " # add_traffic_lights_to_ax(ax, traffic_light_detections, scene.map_api)\n",
+ " # add_debug_map_on_ax(ax, scene.get_map_api(), point_2d, radius=radius, route_lane_group_ids=None)\n",
+ " add_default_map_on_ax(ax, scene.get_map_api(), point_2d, radius=radius, route_lane_group_ids=None)\n",
+ " # add_traffic_lights_to_ax(ax, traffic_light_detections, scene.get_map_api())\n",
"\n",
" add_box_detections_to_ax(ax, box_detections)\n",
" add_ego_vehicle_to_ax(ax, ego_vehicle_state)\n",
@@ -252,7 +252,7 @@
" return fig, ax\n",
"\n",
"\n",
- "scene_index = 0\n",
+ "scene_index = 19\n",
"iteration = 99\n",
"fig, ax = plot_scene_at_iteration(scenes[scene_index], iteration=iteration, radius=60)\n",
"plt.show()\n",
diff --git a/notebooks/viz/bev_matplotlib_prediction.ipynb b/notebooks/viz/bev_matplotlib_prediction.ipynb
index bed72f71..68c11e2a 100644
--- a/notebooks/viz/bev_matplotlib_prediction.ipynb
+++ b/notebooks/viz/bev_matplotlib_prediction.ipynb
@@ -89,9 +89,9 @@
" box_detections = scene.get_box_detections_at_iteration(iteration)\n",
"\n",
" point_2d = ego_vehicle_state.bounding_box.center.state_se2.point_2d\n",
- " # add_debug_map_on_ax(ax, scene.map_api, point_2d, radius=radiuss, route_lane_group_ids=None)\n",
- " add_default_map_on_ax(ax, scene.map_api, point_2d, radius=radius, route_lane_group_ids=None)\n",
- " # add_traffic_lights_to_ax(ax, traffic_light_detections, scene.map_api)\n",
+ " # add_debug_map_on_ax(ax, scene.get_map_api(), point_2d, radius=radiuss, route_lane_group_ids=None)\n",
+ " add_default_map_on_ax(ax, scene.get_map_api(), point_2d, radius=radius, route_lane_group_ids=None)\n",
+ " # add_traffic_lights_to_ax(ax, traffic_light_detections, scene.get_map_api())\n",
"\n",
" add_box_future_detections_to_ax(ax, scene, iteration=iteration)\n",
" add_box_detections_to_ax(ax, box_detections)\n",
diff --git a/notebooks/viz/camera_matplotlib.ipynb b/notebooks/viz/camera_matplotlib.ipynb
index bbd6221d..2dbc9c77 100644
--- a/notebooks/viz/camera_matplotlib.ipynb
+++ b/notebooks/viz/camera_matplotlib.ipynb
@@ -329,8 +329,8 @@
" scene.open()\n",
"\n",
" if end_idx is None:\n",
- " end_idx = scene.get_number_of_iterations()\n",
- " end_idx = min(end_idx, scene.get_number_of_iterations())\n",
+ " end_idx = scene.number_of_iterations\n",
+ " end_idx = min(end_idx, scene.number_of_iterations)\n",
"\n",
" scale = 1\n",
" fig, ax = plt.subplots(1, 1, figsize=(scale * 16, scale * 9))\n",
diff --git a/notebooks/viz/video_example.ipynb b/notebooks/viz/video_example.ipynb
index f9202dd7..a6beb679 100644
--- a/notebooks/viz/video_example.ipynb
+++ b/notebooks/viz/video_example.ipynb
@@ -106,9 +106,9 @@
" box_detections = scene.get_box_detections_at_iteration(iteration)\n",
"\n",
" point_2d = ego_vehicle_state.bounding_box.center.state_se2.point_2d\n",
- " # add_debug_map_on_ax(ax, scene.map_api, point_2d, radius=radius, route_lane_group_ids=None)\n",
- " add_default_map_on_ax(ax, scene.map_api, point_2d, radius=radius, route_lane_group_ids=None)\n",
- " # add_traffic_lights_to_ax(ax, traffic_light_detections, scene.map_api)\n",
+ " # add_debug_map_on_ax(ax, scene.get_map_api(), point_2d, radius=radius, route_lane_group_ids=None)\n",
+ " add_default_map_on_ax(ax, scene.get_map_api(), point_2d, radius=radius, route_lane_group_ids=None)\n",
+ " # add_traffic_lights_to_ax(ax, traffic_light_detections, scene.get_map_api())\n",
"\n",
" add_box_detections_to_ax(ax, box_detections)\n",
" add_ego_vehicle_to_ax(ax, ego_vehicle_state)\n",
@@ -187,8 +187,8 @@
" scene.open()\n",
"\n",
" if end_idx is None:\n",
- " end_idx = scene.get_number_of_iterations()\n",
- " end_idx = min(end_idx, scene.get_number_of_iterations())\n",
+ " end_idx = scene.number_of_iterations\n",
+ " end_idx = min(end_idx, scene.number_of_iterations)\n",
" fig, ax = plt.subplots(1, 2, figsize=(18, 5))\n",
" gs = fig.add_gridspec(1, 2, width_ratios=[6, 1])\n",
" ax[0].set_position(gs[0].get_position(fig))\n",
diff --git a/test_viser.py b/test_viser.py
index b9fdeec9..d1449ec1 100644
--- a/test_viser.py
+++ b/test_viser.py
@@ -8,21 +8,20 @@
if __name__ == "__main__":
- splits = ["nuplan_private_test"]
+ # splits = ["nuplan_private_test"]
# splits = ["carla"]
- # splits = ["wopd_train"]
+ splits = ["wopd_train"]
# splits = ["av2-sensor-mini_train"]
log_names = None
-
scene_tokens = None
scene_filter = SceneFilter(
split_names=splits,
log_names=log_names,
scene_tokens=scene_tokens,
- duration_s=18,
- history_s=0.5,
- timestamp_threshold_s=10,
+ duration_s=None,
+ history_s=None,
+ timestamp_threshold_s=None,
shuffle=True,
camera_types=[PinholeCameraType.CAM_F0],
)
From 3dd8919f69f8f167f08a65ff3dda30ba25b0d427 Mon Sep 17 00:00:00 2001
From: Daniel Dauner
Date: Mon, 6 Oct 2025 22:00:48 +0200
Subject: [PATCH 057/145] Small type hint fix.
---
d123/datasets/wopd/wopd_data_converter.py | 3 +--
1 file changed, 1 insertion(+), 2 deletions(-)
diff --git a/d123/datasets/wopd/wopd_data_converter.py b/d123/datasets/wopd/wopd_data_converter.py
index 4481a532..ea418d57 100644
--- a/d123/datasets/wopd/wopd_data_converter.py
+++ b/d123/datasets/wopd/wopd_data_converter.py
@@ -3,12 +3,11 @@
import os
from functools import partial
from pathlib import Path
-from typing import Any, Dict, Final, List, Tuple, Union
+from typing import Any, Dict, Final, List, Optional, Tuple, Union
import numpy as np
import numpy.typing as npt
import pyarrow as pa
-from pyparsing import Optional
from d123.common.multithreading.worker_utils import WorkerPool, worker_map
from d123.common.utils.dependencies import check_dependencies
From 44e83b459b78cb8388215a633278d7eec188dbb9 Mon Sep 17 00:00:00 2001
From: Daniel Dauner
Date: Tue, 7 Oct 2025 16:22:11 +0200
Subject: [PATCH 058/145] Add togo test.
---
README.md | 6 +++++-
assets/123D_logo_v1.png | Bin 0 -> 897016 bytes
d123/__init__.py | 12 ++++++++++++
3 files changed, 17 insertions(+), 1 deletion(-)
create mode 100644 assets/123D_logo_v1.png
diff --git a/README.md b/README.md
index ee04aedf..95948bd8 100644
--- a/README.md
+++ b/README.md
@@ -1 +1,5 @@
-# d123
+
+
+
123D: One Interface for 2D and 3D Driving Data
+
+
diff --git a/assets/123D_logo_v1.png b/assets/123D_logo_v1.png
new file mode 100644
index 0000000000000000000000000000000000000000..5173ea3a89c97a2ace0c07a23dfb01826a5317a1
GIT binary patch
literal 897016
zcmeEv2Yi*sk@$eRP(czPfeMnS0wj=72UO@15)!=`OfkkbHnYUkkLJfHT!w)9Zc=YHI)~k3>
zqehLv*sA}*zxrj|x&oBf74@>bjOA)^S_2hc1NdCNgpK?N|7w;~W9fzU5dS{KXO!Ba
zL2KZN8o+z@Wi0T&_(vbmvIM$Mm*Rcexh#chSy}@XNdx%&(ctt){BtRXb{#~SZSWbq
zRSvSNoyZC~p}-q39IwQF{40+e5XHiEya9bFp+RdvYv5-M;B)DOseAZGzZC%z@j2a4
z5gMY~P%$-t_iSBk*;)K6uL&?I=2g5;|40c9S_9Qh1NaQyz&?AGW51{3^Vg>w|T#KQ-LaCF5#;6cG1
zODxH6I|j{WJ1u2un$|#d(f~e}Y1m(jdyHziG+(QPXfw+9X4)bY%HwB~2!%SPlGp3;&)a
zM1##5_}$Qz|5u_ffX!TW#q!iZiI$3oIEcD%^&0$o#88K}j|OYIj~$0yM~{hehT=Qq
zL(HSSSQ@kjiZp=t1dTYxm86b4H}65Cm|$q#fyO`T;lzRSuyr}@6Ey=_)>`6yN=L0|
z&>E-?8X!xB#X(%*{rfn+s|n9!4{(ScE?>I>U(WeXT=~|gDa%*E
z{u8IfBFfX)4tQ&TVb2|Xu-;QHOf#(P8h9Q%P*3ax-agEH@WV$Ijc;NVo726=c984R
zVikSgG-wS}c@5yZItV-YjMz~pX@Pk1kJI4x-P>XjW$6K3dxM*+o6K(lwyUCzdF3`%
zYP`ZI7cVsVzP>1G)7>`+>c=_8VtiuVL@2m9`qJnTToM2%5$jTz2U`4O1yc^Qz_=XfS07ZW#JGMnJ_?1$liiKc{w4
ziLABPZ{2_y%jYthD#XvHHso~ZCbOgOMZQebxH=860fRcWHgTdsd~LC2c-kna>BLt>
z#e7@S5AWZDW2^qfs(X6*z^FHW3&G({86A<)_(7;66E&_iP#rXY_dc!ovQ?+3y8iHT
zN-V?#(W#xqFn!e;*m>kwF~I>TY!6y2`pTTO#X{Cd^~Q%|_t_x6Ra(QHT}HGqxMEEG
z%Swl7DsN1CnKymRN_5GR3u|0!pfYLz-_icq?$Kh~18diX=erF99}jPl4ByV13SRjbh0wqZrmN*bB$^o+~CO
zOr>PEfCdq?U68>G*hT}9mC%URK;_l|eKXk1p;vM?)N=EHthSw4&n{iN4D)|l#AvFJ
zK>xbXB{@@ON5|WCmWdiyqk#%Bp}?-tWP(`%!aIzBnl5Y{qol`VojAJkbH!%I{8OK_0|CG{%j(uplL$zb}xUAqCI;V
zgQLZ)qSKnDeY}d%R3SB;YC_%%28#tPR*g|5L-WxZs7M-k{QY1H#kFhsKxTBiqGsmA
zcL8@-KF0fU&O~_d@PR|RF|5ZRppyzE(D9BWg(ySc|L}teOM#w`Xbrf-MyC$5Nfl%+ckhy}kuwt7pvX~oJznGnrLYoOw2fR<|6HzO}G3tXLD
zi(2UAQ46}Z$zZKLec=o&*|37qR3Q!PheG?NRB8G~D~4m~J*05d*W&1k9+7guNKF)~?ZM|QqvWunHl1}d`#@SRMX
zhS*67p6K_~H8z8Ft5-+Lj!%V8I8oIJkzEH$mhyZ{_Cl2#bQP2-<5&IqDOK9#5QJ4B-sP;y<%RSHZ)8V
zsB`b9d6k^4C*yx^9}g%+l_GL{7t#25yg`&2Jh$y(8P
zW!3;iEMg}m^gy2{nqY`f8|V5Nt+=t^;EBWV)An^@F=gqfh-T0>if;x)<~{NG@}==Nd*+g?M_qLDwPiGCP{+
z7;avF~%DJtK|)dRv(pAkvQ(x*250C%pQX8f}HJr6#$nd_f9-xrS$NXs&W
zdyK&X-OPn_LTjK3Y5q~glx*-2=nXkyVy
zp;w=jX}MA`dQWQQqdspZhe$Lt8rvlj_c`$UdG6&fjhBETHJ^NtE
z=537T2;@y;$Zc9Qkq4WO5UqJu01e=vAPJk1DK@2EnD5Tu#sNH{icrAGxhAyl$HqT!
z`^G(3@YQ;ul)7X
zid}g_@TgA-$ojmu8`4^)KwaN}C$>GjUMRF}%qQ9F
zc5Z~dNB2Lmo~@bbaqS=i$8-{m!uIpiMx+87ExiWHWkP|?qs^ae$ul^<54ic%D}BpL
zS(2<1N0$GCm3DLYz({vv8LcEFf9QBiSv|Ks*9c~XFqB6hP@+3
z<8iX3fmZbbplg$GR&?vZ!!UotCPs4vVj<)<^+8)K9HHzA#1jVwv0
z20g8rE^f|{+Kcb&zk1;&ESa?BX;p2?4{lTk61#AD$fyhU!Fbw%O%ik^t$}jUz`LSU
zvOwhHX-}nre(iD%#VCM7~H*|%$imgUXh6!w@m|V;9wgIn`*HYni?2Wouc14
z0#0w4ftHGsjCI%EBcE!iFjZ)Trv^kPkgGX^e7rfi?VM$6L~Ee(X#nqOn%%R_O=P7=
zmNsHPsb{Za>eD`GLgD7=!umYzuPfof{fCUE3h6s8-4ZLkSY=5onpWX8fbZ8tY-EPm(4?ku
z5LPExB$b$M+Mp4%4C9mR6`NMUkyA%YtYc3`x0H?$6c8x07>oV*IaryTqj9S=Q05Z~
zyl^xnWh*ib+VlhuztY`+*7WWXhG{&q{4-X?#nlZh6)!Q`(+~?53;KY){>dHj`g$FIQZSvy3w%F<0@L!d=cxXiCF_Qfz8
zmJ4aT+G!x{cVkyqCSL%9MYyz<3e%PClfsORMGH~i&Yx_`R{HCV3)zFZv)2dlr^SdT
zZ?sj~imJ=3%qJJ9$_b)6Yv87yi1bk+Nm=^L)>&}-(lN%5g53Gld8%cc)rm>WfXL?D
z2M`Mk!w&-AoyuxyL~Ee(XaMhJPi*ZAVvFhIr>v$CBB@mA*HYr}*g*bj`2bIadhpOO
zE5N2kbxW{lvFL|=BNrJBRZjynm|;gM<-VNYY;89?^lt&qu8a*#{^N3Z^w2SZxq6T7
zAUBbO>%}TrD;lrR8o>8z6gH6gFU5qnfM&tg&68szqoHwq{+(H{W(l0Vbj~6D&rQn$
zKOci};#KUAheHHbC#LUz_`wAJ>34AN@v3z}5vih>py=-4Stnd1DM^#L;_$M+GhSq_
z=sDzNMk_6n_l&{sjtu7pVrlv5a#{lwOanBv4-nh*9B!tdKo!75i9^wY1-xO?*VaL`Tcrg
z|9RN7czk3)}N;N>6DcA>sj&-sHdL?JGYWHzt
z?0560Fq$gF9mj$FI^z-19LWE7BqwRiS`Dz9!P?3;lz*Sq!Cf;E>CZ8eD&)+z*>L;f
zQO2%w-Z1d5Yj}~fG9@9&9eYAh2%k!0*%$C5V3_Zg#6#1y1}dTk@E)dRQg$=}h0gBZ
ziu;CG)AsSWxx&SXzg~WeVYcr-cwh}1Q`veOev1nM;gHoi(_phUl`;HwiI0W0O`FJc
z@;A|D3t20b8-E*{?<#g7>-o3`wuXPm7~B$SI?+6|7$*I3X)&TOh4&)#e4eeb5qI)K
z9IH@_W~DXYFb&{6Ozs*3MFC`XAd`hHkkTw3LhCeORc9=p3zx24W;9hu-_Cj9y#$)QxNF~(3Yhs(p}q!@lTaS9aNzh6vn
zK&o?Fi&lz0_|Y=#jBB2p!=0J0hyum@YZ)_3xhecl!?qC(KxW-g@5>HIVUK>
zoG`q0PYkmyfgj?IG2csqG^#aVs|FrV=tqk6g9Czqj()NSGFqh?f?P~lJOggtzG)BJ
z66+4^hA|D$6-ojMb6=L@Vo7{Cpl8Q@irZkU^aXf&JIh3gc(G!!Dc!{8)M?rQd_!7^
zBqixbj~*I=RX91jpryk8Z4k6KBdecbLPWETVM-dWnP?4^u?FzjBnx7)C^kQ;G0<*p
zdm+%v69z<6aJ^zUasC2){o|5if`d{M23y=?p)D2;(sx<0#rxG4WwDb5oZXx*ruL2s
zx3>-T8~H)gM1GRg=A{SV(Du{zvbI>i%aC~R@n>@+D)&0}k-2qQtgMMz1LdcIS5Y+E
z-P}7FLuJb|b1U0tDJ1u;JGWu#(pgq=DqV4ZA3VCSU01mjp-4gKvC~hLETy&Fhp{tI(Ov?e6e7WDiI~}d9y!l6JbE}C4(C#
zE1{7Js{y=UX*qwK*g%{}TuU3+qLE0mCmj)U?^oYHGWi13gYanX?z(2qE;xPoqCFx?
zteg8>3eY4&f(h7fhkvIfnHtp^D2)bQ!a{6v-wQ)&Qy>Tx;DVkdJ>okvR#&cHg;^`+
zF`6o*LERweoRlH6OTg!^qfFHJ(=|{QFD-I8U~(ce$2`4dHk3HGb0OS3yPws}>@f)H
zHQ<}@SOFW5oIx*wvy0&)@E+cT%pzk0Yh6Wape!};CJJNE=IFNST@qhRjDcFx#_!)x
znhLk>-gRhSJI1w!l$M4)BE9f2jd!SE&D>EMpbtk~k$Jn`%`dpRs~OCg_+|{|&%nb6
z4`JG0SDCVv{(Ad)LdOB*9n65tY;4lcay6ngP!1Zv_of?)Ws`ev4=?Bx(?Mx#vfFls
zTJGE}@Y{KlpwK~k@}A8Z3^k2Iw|A9xs+urkg9+7r#v71Uk(gt%Z)gnoH&SCNW9=nN
z#nMk0i7XZQ!^Sh33WScQ?a<9|!b}_F-AM(1%|mOz(Hg*Kla^uGS^s3ENDgbDT(qBu
zJ3JdpM$2Njccp0q5ViB9u=>z!wq6dW!VD>-O*v#)q6$8WkAT@_uNU(eo-lOfFR8Qw=
z4V0Az@Lr@b-Rq(}cRa4YdmNtxi23%Ua}&FQo8%Gl)trg&5Eqdh0ng?RvBdT*OGHg8
zSpyE6T;L5DhGN-O@~+NZO&xiz-t;7xX9*{7^F#g@n)_9~;PL>e?~3<({&%PeVz
z+g2uOTx*~rX#nq0vMsY&etkUcDXVFO!Z)0Bb!5PvlzK6rr{TSl`5$O4bP?Kugy%G2X@SUTx
zKU)KLZxyH!AaUtEmM_s_wjR((I?`ylXdnlrvV)g1quYbO7dr_`dBf`X_(O*l$&BxT
zj<*NZYbd?Rpo@vtpXi`xYFf6tu)bV7m
z2Kac!9Jp}pnnT5xV6o_l9}8`&_c9|ROj3Kbw_vJ^3
z-Mo4Se*AW$xs1w0O#4V^*47YMi3T)bD&=TiS_4*UfLSMM;&I3MNm*91u&sEXlpY4(
zz%S=~XB)Fp)OseH45JL_!>Gq|OeG~F%3sR(@*}s3(V|EX{}2d@$ree<(ubFP!g!H&
zqW6fhyZ&*hT_C(Eo4iwdMxY+pQ3!8cWaj=P;@+5=HF+}
zhclNhJG2>TF|D9oEAB6k0?B_p(V>DhbDK1P*QEG>j$7ezkqzJX9bjx5;f
z;jnb<6P@?k%Ynx}mDGcFU3x>^`UVU2*vI-PshXy14V1M8XtHkxdPR?jEBg~J+?<`@
zrNq|ec2MGOK>_?}>I_D61fs1Jj?k*Zc)llbew6!iqTxj=l~In83zVyeGvhnuPs@QK
z3D9!&j0>DlP`eHIt#ZL?bzQK^&jsRyI&ke-=xf1vyK1QNxRMLR52?f6FXhns&
z{J44rKuEmd!bnZ94Lu}<(nk5vdpL6&w~eulwL0cL13tCu$hF_VcDc($jcW~*g9hkW
zJodbgj*l*fX8bIY_H@V=i+hEI@cYSA9p0w*h77PIAZ-r|qDK+Q%EVs!RLS>;j*lNx!z&7r?i!C$#Cr_lO_abq2PrIAmAbN~)YaJQ+NF
z4ByeWP*2<^t0V@Rt~Fqv253(O+r!j9wWoa??X2B9xd-F$@Zm%FcK&2WQ-!#@xR&VaAg>d1@MMhJF^zEDnp6;|y
zn*ojI^rW7tq!>{(&>f8|+0tjjcEiBg%`mtwmTY6{^5OMx<;X@>J0U$A!ke^WwDLf_
zeEgte?hsit`LXB8M2%|=l!XTH9OpmK42CJr~%YJ1sP&)7ydzBryIr9FL$?tx?#T1qCJl#
zvGb=Ruy^BehiE8wbShB13<=&t9WftwDPN;n1NLg*Eo8tJpn6lx2)?z#UKv)`C;9ow
zS*%{+{X&?y@CQayh4|F+hJ18`l7K#(j#(*}1XW7({SQBwVE3#7UvMtiduAhWcJqSJ
z6vw#!99sNWgR*-+TXq2|48kQs&mk|P^DU`_!MhBr07&{P>DTS`1*ibs&`q#T1LUL^
zAXXXGHX3MyLqz~GTZ~VQV@-L0w(bA^-Z5VdQP8`3qlKuBijpRC1shi9yprHoQ5u(t
zW(ygn{&|H=Qi2Br`9MY=Zqh@>5A(KM4TkYBzE+f
zHZ6cA6%`3+xHLXBmNn@io*7s+R*L>i?qkGo{E($WnxZzD&x9GE?(tye|!_{kW;
ztrT>=g$&IeU}HdMCnRYAxO$NVO)XqKz7;MXTF02SZQl(Vx8Q>kR-l}a;P9ppo6=d9PR`pE
zud#9~HC~l8@VeL;a(Nntp{*+-bZOKOS_kn>xwC#+2ip%HacFZBn_IM4bi-%0gF^*t
z=1c>q??5Ds5{aJPu9ssvhO6JDCknA4P1cIAmi%5_Iej}&5DP~@-&ZUex@jV&L2E#z
z2A)S|>`2eN#4K=iX7hcO@@(SeLIJ`Oy0N;~Z{2|D%jPheDx^W(`jDBBCbLVzcb3Hm
zQ!ZGYaGNI-s7$KMUTQV*#tU!%tV8s7XwhE`eEW{9c$NxNBlCxjGbP2}H$+6IwFVre
zfyXbqB$0o9RAUIj-P#ou#;5Ro6|@TcU*Ar0cymh#VDU?b3)TWe4ZMbkhe+r)Iu7T+
z2G<|4NV%oc2ef3eDb)6iPggUVDx^tl2((NNm)Z5fXPkDoYtS07TLXA6kqMigK{I%|
zc|h0L3^QJl65Zl5!N-GnX~B2%r$9krfkT=(tj9pZ@oy-o!tI%LNaw9!YV(8wWsqwH
zvjYcXv^zH+T4+)W*G_DMO9y{qOvwV!xCI}8uma_bM72qQX3>dqD+c0&%iK}q0vgvE
zP^1Ai+ooaJ$T&U-QY7bTZ2o85o_SXL0%SylKw=0TZCDKR*KdN2`wkQn9FR(A-V`$0
z7|u@Yg3qnCSU5o6GE3C6FVfhNWp1@xuEu9IR=+_{7H%WnqJ0-Cc1R&>G|%o_P*4mwwB;E5U-in(E8i5~G?fF>9f5!_weAggUB*0f94F2mf_
z3mHun(l|5{lA{x3c4XD=iGPt*;IAq5LGGZnxIL4LcSK<|&?FV!VT4FhmbQ5MQ#ppx
zE^Wv1j1~*}2)tU_`nsIffGrxp7XzJXoGsQ(j|he47y+-M!}w&Qhv0{0V6k|2aDPjx
z`IRN2rYY9|9VkGP3Nv(mHpURn+MHPd613lgLT-ytaPL0M{%nm%QkHJpsVPLXACGMcb#ZgLk=(O6U;ejUgu`l5h;6?>
zYJM@y``0?Sb^UHJK~W0z1bJf&8)a!U#dN1goMuzKG|(Hn+d}Me=hkW9Q`sEI3UQCmg(9$@bbBL@`GhNtl<{7Jf0ZPq15=(wq>e5m{HUN?G
zo`g;iGi%_nW8vweM3S;}(VhzQu@<2sgwhtcRlQocsD25i;<-gEV^
zG7F8`DzJ`Wz;3gEGN|}QJGO37B=JALp6GDN69#FEg~OF>6z~F)*kViiz*hHhf?+IK
z!6;77HK9X)Zn3z1;~vcaa=k-Z+959*>V_ERTrc4>U$IAl4$+2Ys!#*W^MsC(rA^wE
z0rHcwz(tA^_ODsr0-Yr22Oqs0tL~rMr_$6FuEJmsuQ7H
z3D-~Wh70>wFuu{a3!nBasDy|2@53j*c@^Ft-5q|M{)I}`(4kvjs1p<-^L!cg=p_?1
zt~H=Q19VCNtzw!%wWZ=#K_QHrvlRM&KN}VtJZ6@oDiPNp5Yoc=GqZO0URb_uhbj>k
z`NTJC0-fU8$a$UcS=Fa}$XZc=qh8&R4cj1!gY-M?`$hRFU~gt|=Vk#c|86HN{&G89
zzj)hBSCL5Q(HOjHv&Ri);->9_yM-#)GagPDYP;w2?9)s6q^r@aphmO?N~r;S2D@U>
zc(Hg|%M_^V8z7SGPH#E91E#N?2S?AJu$!02pmr@^=!nP5ig4ugG5BfQI+3I-or5Qi
z2Kv*QU6lsv;7D6~Sc6Vju?Fx4T#Ds0%yn=TKJ}HU;1$%=Oj4ED^5s~#uy+|__wOIi
zf)=rM4L@UK7qQfBii>i&;1QR7+zwrT*M
zO|pI+$3OGeAlSzXmW}L(=WExrm93#(=07LHv8&h3^soA6K|6F?QVBcHU4*=EW`Ov1
zifz&e7X8m}ES=z4+c=#rDMrAfhmYWsar5EWt_wvX0~&b4_!V|KSG{ta`QL9Z0r<
z461B`dM!FRWOi}m%wD*#k2}h>jBBTSc5(I6dDy$1Pj9Dvp>~GRC4V?fMV>eChynrL
zM16j&zXXCtbkbf8JckU-vkPK2BCeIv*~RV)m)PurWc+T`2ADB8kCMuO{I)tZ3WG(5
zXu|X}Y&mcU7H!%JUE*U+B;~)cjT;r2Ef#HJqr{H*tfu3$%3nyzrB@$@s81*C?Hgi`
zYdY1qmocc7jk61pQ*nCd%H!r41yL`Zz5(lI?}rZkm_K=O1shVSy_&&_iM!zLt>Ulp
zRE&<-bx1srnL#DEdAJnKR=)o|E%cdzhJb_d?+Y_UC$t8hq5*sc6R}8!SS+b&oYEOZ
z!TkdGe&r0a2-;0OapiRQ)d2SBoAYf+1lFz#?V=K3#jfI2;$4UL!sb0&Auh^bg0@8j
zt5XwK>+;gG8LhG-yHQ`?%$p>3qX8nL@bAwe@o8ym*D(!8qC})S#{61z?2}|p>G#|_
zPwS7>`7^)yv%EYB=g%C6!+W<`&8wseZXTY{rPpvdo18ob%0!K84cMZA#{)F-e=>uw
zyBiE>rPh@AkBvLc{7s2n=Ptp4OIJ*&vfnq_#Y0UriHh)>ZzdX^S215jngUp$K4n(_
zCKEJnmj-A6+*st0klo})Ey@ZYYHF-BYxc|^Vx0T>t%N3Gxo3QE^
zPF;nSGmWxYAsdmLQ7Pc==_=QL2k%*HzvnU!jaM%XyeD@zKUwW&ilsYN!?oMR=53O<
z_3%zOevvF|>Y#6GPs2O=%Q+L(38>4ou3my4r+s0O<@B%qXpwF!F?+xmaCLK+
zt50yuN1(O$ceruCBwqlz5gM;@8t8*PVut0xG0mZttBbOZp1pAkX74*};77aT{<>b>
zWZWd6Ssg#<-Xz?F7X5BJbOh$C-N0z7kv5GZp>tcqDpw{xw;8HsRY!iPN1D04E%qI4
z_=TN@#ncgL_NMo)Jqf1|Fbi=J%WD^I!`8(|>}AjD)6Q*qm-&=GEeGB6Vs=b;3uqS1=KEr{)oHZu!mJGo4fP(Om+S2HM!KP(
z6@<__4IsX8YgS|Zu1&D#2pyZM;b|JE?d1!7I~hC<>SMhzPpd2DmwIx6m1u_mTf&RT
zcwXeCEPZG(SrGVY(tzjRRmR`MXy!M6fxCBZnNaq-cIhlQbDYj0Rs+6%bs#fmh|G*0
z>TH>)ajgLxH1N7u$Hl283~SRuB-x$*Z1YaIj}E1lVD{d_aP$gam9kXMS{fY(CA?
zz5LuEt}C0*!^r~|V8bHE3~2Jkw6_${<9$>A^sm;yQ#3#WDmMP>lalk4D%NGM+rJr3
zT{>g2(z2ba;p#25-;2JfY<^TE_bY9&u&94a#AncA;pJ&KNa$Uhd>V+B>JyFKP$d;Y
zhBPu^(-;591n>)QTT&I9>13%mziTn8-xNa`($P;U;pX)#u;7Q!N@V#62TtS~r4o7$
z84FGrDNq9SK3$RkjaF?9;PaV|JxmgNnb*8A1bV3(pxwk{pC;|xFN$F42e=ybw~gBw
zO$8F+=L@;b`9N*&@e?p()fyG-OXNXTiY{#ptHAA0*O?_&(ix@I06UW$m#H3h8QzAS
zD_dGdC6zn4=?om(dA_7|LwY1tWb>OTaxLOy=Z2E@QYVBrwbmQghgxj94e(
z{i)WZud2aSh>!RBzfP_yz5sZ?gyN-PZrF|MM*vwXRKkq)^Gak__~1S)*s`R=d|NVT
z7kH~MGTIfx^3AK^$f={n1f42T12nPd-z8tpr+4&AGO;AQRFey={0sbJWg{}IvWk=>
z-#|aSPnkt~{Xvjb
zqH!xV@TORvhBu?*4FeCch}G$Dw(Wr%_X=3OK)>1#M8Xfd4#27F{FxH-ZB36)i8X9E
z|IN4G!~F+Lyshj@toN?vJr@5_Vg;S?R1M&}t{WCg5DP}7gj@;;RvUAHj%5-7<>J7f
zeZpWFo;j-${Ofx|bVfL1d0@*)*!h!MS1Xf%KCh%1{K+v-FIJfpq<>4&0No?ZFIKgs
zV(-ZVu=nJ_lH?cB=WSX7h4%}eSjO7SKB+mZ3b|l?GY{v28cL;sffx_LrS^gX{&_WH?PC|DgR(B
z-Ms5SU=d&61He2j*dPFER3Gfq1
zcBRSX>C3IV8IQWYzVL6)4sjBRRPOH^`9Mz0w>90cwh#1f!To#>ojLPM9?N~@x&&UX(Q4mq7{&}MTCWGm%y2e=R}fDm!JVUpLRgk-g0gL
z68PX)F7X(D>VyKzk&hFqUS%|Uknkq0ASN+GW|6Bu0@`G@2n_9OklYri1JD8HDU%FYD_0p7~f3*fs1Fzzrhp36($vKwZabgjx
z(x)$8K#HYkxfAcIh(4H4>(qIh1wSrlUikwU&%e|K03CJOtS~4)1D4&;Dh(L
zj!mISn_#fCy1#eLNjP^@-Qp3oz2nemLl>t0WrcyJf)L#<5}LK8&oqODk8w!AJSUVJYOBmSU2Br{P&Qo{za3C>~>w5pyL-#!uBKF63z+Vm@5DYCH+f2KH6HiievA3_>?331~857`RGh_$sHIiTDKkV~1i#(mo2~ik*e3
z){!VS#Ul)iA_DEBz=uF2*`1y{=~ILCA-ux~3{vD?#5*{?H@JCoYy8~r{{{v3?%FNe
z#GqAt2OzgD3G_!mQiw*Ms)2D>;^!OpaZ%NliWNss0tLJiVQ{Am2=MVPBK6NqgV4Hl
z*b>v%xcB199d*x(N%kwrZNS}`&0pYuCj9{S3f21Ln&g>&-x=J`(tAi-DNG6~#V@|c
zTVt{AV$sm3x|c$lE1gfk;xD)1U`CXJzPsbn&4gYr@fCe4yW;y@=6n^2uoiV8B8KMr
z#js}XZaA_3d@(^`D*uJ{GB@&-f9l6yuB7oQuYs4ahisL(7sjS=HXa~jvm(f)Y1z)7
zSpCMKkq{sC^T8Jby7mDI9w!1>DO3W51(#!P6ciS~q(#$IiYW_TXIlOAFOx4oS;(^}
zqi^SY@bh7}4V8Nh-|7AqD?BFht1Ohnib05u5J*s~ZUoe8)kP#JN#DM79L{d%zIYK$
zV<1kN+1e}Y-ksY9ONEPPZ3s;oU}jy@*$u){2Agq8T)T1+md^f$(d3>{8rLU}tpMCEybu4{
zqILy}I#92cCs6zi5l&vX2wyH-ERvL`V9;$atz*|2I7-B>D;QS0lXL7uy3>&Wd?5#FX$A*C)?&s8xwOkE<%^o0#=(W
z6)3-m)}U?>v~R=reW|KSG{N}N?-(YNrAIp0x$3P(inJiO!+qq$fK`b!|IMB^oB;5nqTQNxJ1R)(XJ
zN~l{Z^4<%VVfC?7tZa{zM2M_U3l7EbOqY%jxoY0>=FR8mI
z25#Z1@|49h8BHT4Yk(r+)%D{uGc5fozR$T&kHEs>gyQj__IaTkc=(4v{no|HlVYAd
z>D!l2z?p5c8H@0y(a^SiH%7A;Arl3iBP)W7hcARC4-!d5X=gVt2ubM2=;u!#gEfn$
zGnzd}4YUL13}XI`#Ew1!WT(`iHBh1k@cAT1AhzV>>+S{vq)i7UGAL!nr+AzbAD#ic
zoS$o`?S=#VvH3Y{37XEIrE_RigTIG63~dv|*q*y`1wNnmBcrKCq9em0JIP?GNWp8-
zRkf@t$nRb36Z_U=y%=vWsz0IM2VQxmZ>fC&E}gmoJC+?|?3%`fKur5aj7CUGwsDDn
z-K_m^`?}f*YWg64D$|3)2Qc%~pA79$LCEYM3x0uoK@9nh!+Vu25UP-+c~mY9JpOjP
zF1Fg;#SOa0breZuvD2YSSzlT10Zw^RgZ4qvLgHoeq9p{FU?l
z!XI!UO1-Zx>vR=CTwPo(nJv7qd+c5kyUSkqLIzc!ie`vNA6>Mg*-TKBII{F_21`Z1
zk#Cu8W{SkXf~o&3@?{eFHEcf&oZVJp9S)V5@GsRe=_Vu67#K1r)LiUzdC&7
z7kP}w7l7qPSt~%7uc{iL7M!T*zRjS3pTcUdT+
zYz4=^f%mm$iNOX%3e~JmF8O*pJVZYu5&FNHDWc}IrPcj{yZ2%3%>CvvDiW>I8$z8B
z{!A?VdIMa)d|L&3#>2(c8FEIYFd8DX%VW4q)OfYiz(DMAW3kuW+ja!tzA3a2i
z!$QW6j(f^Y%VIPW(!WbDQ?lrnBH}IF!Yv)FfDMR?vkUZy=WeRkFt)(#74vN1Rz0ia
zXY>HFI7#pdzSjdKS`liPP+&oFoMJ2H?!JN0ATG~ReYAQ~18jwW&RQMTrbP)wZ6nh$PK6I1vdS;_>d9~s@kDFMm|XtxxN`R%Ox(UtET$yw
zhqB1kON2|;Z@}N@%oj<@(@_mWp+|C}%$F<{YE7%MgvyOiz~;M(UFiN?tdDVCCYG=+
zO&cO;V}uA1E$c(uPL?OXRL;_h{!c&bfx>$aL^gJ%Yoc3H+JKgf$Gtm+F!%GdjHU|7
zdNv6>eGKj_bQYwuDjAwj<tq*D6=sBEm}RiQM1_eeT62=9er$ct(O
zfnNLtTAXWr>SjS9Ox(dI1&tcihb*Hj5Mw?jHyeCw@sG*hHf{$xb5=Dx*R~bZ#z}?<
zfBIo2T)TBkBq>i%7~Ef5EJ~?WyjN*OYlK+br;g|K==6q4Efq_@*#WdaLWKUWW<1g2
zZE1D?=GD8fY5qZxjos
zoWF7b7Oq_^l63kB8t9px4YboAs+6e4@Z*B>#4w09yb^%+L9gn0UgLcNyUe71YbPq26UI!ncEqIqC!
z^e*|s<+}6{=qD33t~G!fcvI8@jm&6ptrEW7z87e3ga|L?i$%cO(*vHvD0w1W
zxOE4n>^>lpl%&1gTw#>dZ<4fH
z7r7s)f*Y6a;3VJ>V;9n-F3_<^PXkM<`zyZRjSF7fjp}K9o|NzG;shP~@wVK&dIuJM
zv++sQY|iWXd^>P;GZ-h{#rsp8(rs>?u2>ElV3vxSHEKeB61OVb+ooN|_rm^D{28QB
z+7{eqm?(0sq2LH2%v`TNB3{qLZb17-MEKXNZ_!t-QM)!WuF=>75y-jHf>^735#cZ4VN#RVJyQ^hX8HV
zE)7KDiC4JP#fqx7SkR_z7gv76qreIJBf!*N?Gd~mnxPW1MMb1XghG=5Kapft`aZs7
zzm(2{t>fzhgF2C631z#DL&B8Yq6?P7ygL7P)q@I&2hWcrc0#K7yR;xJK=r-w}AB9~zw3~A
zv59c`%njJN@))DpizM}I3blOs`+fSyD=`SteS29O>igC6g0$YPWjYb~%9vb2*!*`;!q-9ZbjQcOsx%7|^x1NV1sr^7Mip
z?YpwPqUd->cQcy3NYOdEtpIPd>>gac~
z_8x}Q(%_szX}i0+mP*(D-rg|0dly!g_|4qMSF>0F8xcBUR__0e+jrp4(`MPov2@j%
zhlN5;YLZ;IEfTuR#A=L_)!SR_f9K(?-JP7-?JH(moxXML9;~0kEn$HT{h@u&=u-2d
zV`k;m{iR>y325pJ7U1gc49R)4E>H|t&fkRPc+{*TfFk}`m|PiF79Am5^Q=%Bm>{>N
zR}x#BlC!PGPhC0-n+|Pb^^&6#fFj_P1_ojnXE{3FEVNjt21`-@zL@om(!+ODx49zm
zBX_kCJ%`9mq7{>OMWXn2AB`Ce#LO_-e^A2dNMbo_(?^#59Ue+iQu>Z~L%F453EnUl
z&Yob+qv&?7wP+!&^bnCgiWPo@Uz?fVe8gz>BKgC{;e+KQvw24*XuNuA04MzQu;=WF
zZWUMuQbG-@26idvpSVd|1e!?Dgu)tlrB|+D0`ljzTj7DqQQG4MJ&T*vgQUGAf15oQ
zE?m84O_QxII{{-?SU3slKkzzn)27v&D}M!40lCq>EcV3B)8%$Tb`!%3PAp?>dgY{D
zP;iH@`1gG!4Xl~86s_*};;Z{-4qbsmo6lI=oF|rV*Cz^GJ-A!a4}V^P7JKzB7{QI|
zKw{VCa(g=Cb)iYGhH9Vzyw-G6vln;UMllc>)KF>fr>&i17?cm`UV6`M^3!kEI+?X;
zGe*QadFeEx*^2~t`$IZL#S`J+iNmmNr(@g@MLAU^ot4@J!h%BO?u^EJEy6@BnouBt
zc4EI_BGPZso`WZxjwF_~CVlV51vs^78mm$tXR|376I>;bb%Kt66@lEi8nzoQl1i2K
zZYT;DFWTjNhN+=W<907^A5k`Vcyf*wPkP2wD+Gt=r_LQuMW{_9s%^5Z(
zR$`#LUFR=}wf+C{S#7f~z~D|9K(0{MKs~?OFr;%Pt8(n>b(pvR2&1V$+?<_&j+zzW
zHl9HB$EkeZmYA<1O%{vXRQ^#!zBG7K>$t3_6q_+58V|lxH@CQZs{q!{
z;77sMY2XW){bEJ2rAqf2Yh2x5_8p&Omx|F73ZWYq;N*&xID7OmteLZm(NrOQUm?#I
z189qdK_yj|@E-PzEp+rr%~9IftGBMh3Tbn7^YA7R7uCipIRm@(VdW_>-s}xOGMWmc
zS8@--0S;fz`A!AvYQcl7O2cyp%YCJJgxO+YvQ|8cL^h2LO&Nsy53CzV$dWAKM`%yQ
z{d+8{FYc*uLi37BSU&fApx|{PgrX~#tCw|_3R8WGz%2qzUS@y$SCM3QI&UbA9}V!P
zfuaf3N&`5-^u!L25j!31;|1AG!bOtZX^Jf&0Pala~k|p~d2%
z11uI4r=z|%KXvM}xeMUTrOTpt<>?87Em|yojn|8P$dwDOzznhLZrHrn#3nksI^9dj
zYf2v;Wv~h(Vbi2S1ezGo=4fld((3-fO=sZLem2pzmaU;Y9WyIiQYQatxnZ@}P|*fL
zlh_bwnH(;xV!kcu;D8`VX%^3_Zr!&7wjEHrc)$cyF{Ep9M+k2aCiiMM
z-g7_uyS@M62NQ7MP!y_{EngHZ?WYctYub(9yKxatZk)>a&{BQ7E>GXl!zw)mdHC=F
znk)XuN>MaBifU&o)N9!Zyz0@zS`3S4eg)UBUMwcqnQGi37Fxx3kh$dJHHwmn8n0Fw
zcn*6TAoh4925fLrc)G#9i&tP3nk+=1;0#Sdf<;oP)4_EDpkD^Rmk(UJ0!t2`C^e7L
z7Nme3W0UyO*gZTK_y49$FD(n3$~6lOfu8LwnkstO#IR~q8j0O#Aa*5vK+78LwMw@s
zN32&;nxfUMnYNc@*RJaYT?VyfG;0t`%lgZ|Q)@!-t?LP~U4H)fn%Z`B_gUDo?4Su%
z`rCUf-;)LgXP@I{}0O#2s{##4Rl{kwGSGTgXzvs8b30#;u$`2swx
zjy!X-x$}xElG$Qp&u3(=w0iUlBGQ`TEBMEjF=}9-iglk1qI{)Lc@7!a*?`RP*(Eb&Mbg6Xln?6aA_-oH0|zjOuM
zfAGM}`A=&$L#IYz((B!vZF8c3tETYjhV5|t+6^<0e=S%9FXiVz!$8}gdo!~V67S$f
ziuErCnbAY-5DdqSrQm;smQ3sd_4RDoorf#l8b5rr9O5$BC)i#wMfE8f
zAVmc{vg?eLf^O5
zFQ%IXlykzv%^jR2_W@g*t7E#bnnY(_9A4dnD*r#7@L~Tt`L#NeVAB*QX&mv^T6Ggp4VcSH|g(i_YUt@ajx~L
z)=8NVi8etAhT_FyD}YkIMysv{=mBXYc8`31==dX5aQ5bHn7xlrz~h=WhQ#LFE^V#6
zh@e2olUDP0;s%b@l2yc7zLv^6FnOA#;>u`bvB+BxcWsV~nN>$$O!l%*qW3FUv%YR~Ze^gr050>z#s|2^22jJbd&J
z=4|Af9=vOLK~9?OC*aW$L%ln4iwZ@=TeWAM@(ZJSbBXc>DrR`+-rZ7JP@ZNrp?r!S
zNLC$bGC?h-xx^Iu$8J8#DJ$^An^5&;il2U*2B(j4b8>KeAGA&c+TE=hc+<;~^KM?h
z0*j`9X*X|?LC%eH8x>7Xi4PUA5o}#P4xm
z-xixx)2YUTw1F+@-SuP$Z2xaq!0H$Q>t^i-ihw6V?&wtT@OrvPv5uv*X?0)B+c@ta
z+_=o;iTSpqI}VM;ouj-a9S!9O=#Uo!bwl{`fpRD`uCF8HX`bb$0lcRED1Efpg238!
zA+@>v6K|2{>OJe>+|^>M0_F9=iGdv1POMX!bUsExmU+y;U>LN6TO|bg)>S&$QrWgj
z&MS&07pKar6X#AHGeoMhr!9{#lGn5^|Btbt2|;+;NJg^?AcGAzIDPN{VOv*PT2I8%~{NG!=*wj?!L{EEW&&o$$Ytr>bCIB9Eq_!O*LN
zMTHLHo5dR4onAswDH(>y?~svrAG*
zs+qoS9?K$Em_7=EjgiG8t6dky``Cq(u;b_+Mza@Luw|+8Ns7H~sv`AokpF8w{%sID
z)I25$noEZWi}|*u&zv{}>z2(nS0`5`I;yGHv#*+I_9Rv>o^FUt7u>cNc+_rS4_j83
zBIwbi;_%W>%=Akbo^j=ZDUg{%)_?nf0#{JT?SK3?Tl}LJ#S8fNU-nDT94d(h{zGgh
zEowg3wv|Y-JN?~`y>R^=pA5f@0Vh;$1b+)x^XIPH$d0=C3x?eOqBnOXw7WCNAg5U*
ze1iKb_FlMbW;c1+N_e$TKG4A5Q7~apf0({%4bTKe0J2!jswVp?CJ6n;75EwtGX}R(
zY6&N6!PWD(L@|&vDg|o!s1ws6FB-o2-6FVf@~T0m_Ca9K4KvN&M8@D~IIyvJ
z0zvyLrhmK&UjIu!ds!RmQwZ_N|F;ybU%oBoy@uE5Bb3V@ezAm3R~ilAwWOIlb8D+z
z%Lg)}4Ukz*W310=$aX
z?T^^+;zkIWI$3ozfY&({d&8W)(;`BkNr0cSPTt3X@E4o;>aU--7YxiKBZ)eQiHv}b
zv8`e8=B;L?D~_Io?Po7QOi*1jO+|uSlEx*+!b`Ijo7vHT|F;t-!)I@)S;R0i77~qu
z>laxprYv72P$3o(au}W=602Go@73%y^J#cm|4X6}GNZ*_$F;q1^i
zb31HJxOuoh*6<|w{`X5o7BtfyjDKH~
zDd2?I25Zxyfx*zOO){+9vc_C>)4^?U{NhOns}o|T*_v2|u3HpnLxjAf?$9Pe?V_E?
zzpABaZ_U`hv9%=X;uZY+gxsG9CjR{M^Z#?Fj=-TkTSStQG~Ik@(t*ZwO4Gg_>+R;h
ze)$}@h8sylaB=qm4?p{FLXjt>$^D6}AR^Fyjtwj3i6pzyF0O8n)o--SBM=EgWunHb
zqz3rTHSA*Z05vN@CT-CM?bA1In$V)(=X0{a6Hk{?4YdD2hTp8P|GAfW65g#zxM89~
zmV}*0_$r&Fy7tiMBq(Rx`y#>r!}s13mcgXbl@sNwIslJ44vAJanIM&H`SEbkNlc=8
zx<8u;J^{5vl9Dv7?tA%qFkXwk*aqZ&r4q;jA_9flp7F_Qk)$k5RvnsvN$?Kdnx-*;
zl%>(iq5-^yWJY^UY_F%A2Xu|i5J`5YkE1Kn*26m)pY}NTk7&@4(d-KMkvsc(cH}(H-xX^vm^fLJt325LchE)rH
zC`J^fGJ6a%tmrSDP2%l?Y5)3zfkDCTD{$#x@l4lHzz#xq+9;sC8Y0N2PKkN8q`UPQ
z2_Bwaa#i{W7)C8}A&pmh4H({_vv2}%@BRa29s6t(|2Vk0y1Ntb;8xn+0s4%B{FG6zfQ0(<8|+LA#7cAgwgCpqEbUa*3;sNo8ij2
z;v=8R&tVetopb3Ubm~LU?
z^x*FOREj9#weRQw15a|hu{Sy%xp3`2eTeZ+Z;|ZYq&WXuQ&E0H4WC*op7Oj*w;H
z%dOm9NzAt%d<>F?;D3CNFM<>1)Fmwq#IhKOj<
zWMZv_n}`*90`kM>|5IWvSu3ucaLk}3GUIvN6C$b*9jB#;-X%7ylo`2$Ujk=*K}hgE
zzDQ_MRzua+z$@52wsiSd9DrZF%L9MeqJqC~+`+2R?KCDQo6%Gvf1NSgBD=Z!kHFz8
z*DU7QOMEreusU0`ufkrwPpVHAi{8eQplH;X|D<}AF%R#VHrQ_FtAP6i?u0Z8P}UtZ
zGQn~j$csRG9`avoFOrm|XMVaGPxCV_rd>FG4fd`*rBqg%P7&Z2uj<4dB$D)=GabcIz6>DC8M|m|vRo{)<;2
zc-e*?0!5qmD~DdEp|Exxf-Uz#-%(>(xCwqKpm=gCQv-NF>3J8@Irk
zU3(Z!HS)IQjTZloBRTQit6FS%@cRV{Wh)(>hqqB`uGsU_2{?7&GV5E%ezBH2F{*5
zVzsDgU;II?^AHIeMB@2fi{a+^Ln298IwI{k1FxB1e`KK9Lnt~f3&RIXz+NO$o<^&V
z2FM%_os$RqyfBUU9q|B1(((`1?u{$GZ~UPD8R$5ySJHUXfs7kaMh(o`un^BrH+;u`
zFP4-?smu@R4%(u&tXU+MSC*E8JK%%{md#<}pDJtHx}~spyJ5ykqlI6Jl^@{^b!^qY
zM3S;}?eI9L-7uE%TeElu(9u?^Ah=-@h&LXK&;?&8ZB@z8d@7~}@VU5&E&ETgb$4;s
z_D5^YivnWSQ`6^e-iDd`4zY5vO&UQ`R5M0Xh1|M(7rt7^)+vbf8IeDoZTl$(nQ#Qe
z2M0pCum+6v@@+d{)egP`m=#cfP*`ybn=AeupIP~xt3X=0uy_}xvc7bCu8q6Omh9hf
z8jcHRt`ctwDA6E7?SRSDX4=p&*Jv3a^9Y-N%aa^aeOrH>U7aAc7tPO#;l`EQu=sn!
zYG82zJ5s$~GEB(G%JilkEOpiL(EvSzW~hP2!u;ecGp-^*1IW1>`54frmN!PKqq&S)
z_zr`9NkFUF>^L8ah`00DUa8oV(G3T-8bW|~i_K6T8L+q&ck^m3Gx-7(Q>#kqHg0@b
zvVEo8oqb4{ClcAm0W+rJ-yN}lqmG4Y8l3>a;Y~$RK<-f1>=OUs^Z#Y_5uuO3f8u~_
z0sd|hGf!`s0e7#mF^!mQS6U8`L4BZ*+bW^=@YgM|d&?40)2f^XKEWQb8NuY8`{BSP
zzKgXqz5Zg$E_6~QgI+OEq&P>xH;b0SwOaTrvzu>8
z3`#hPi9Rw5#X5V}oP={ntbg`I9&(x{7am@&j6CfxD=lvHV1;ZzqT5A6v$mmf&B2ea
zt4!B;1=Ijut3+&8hS;>Erg0e5LG3fR65Wk1T)!lWfqtFy!2>-`RKl9=>*2t0_K73*
zpW`2WE7%8X7DmUj2IMnk$wjPl>vU^aRQs~DTb?Er68s*=+U(k}2?b_e#|m@BN}E-+#0}pnoPjI5_>`W5Pts?
zn=^~q7Sp#13gNr$e5I#hU_HoAPOwWY1>-x<`GsX$R#i
z2a2+n9>E7t>-Ow}rCYWsmnAZM)ADgp6u^SLcf~rDBs~G!=_0nZ>&Q0lD$kHOyzLwu
z+0NaJlCxVtSo1(ptguRTVkdB7Yy!sbUj}!v4YO1Xc{ma-KE-6K^>OQ4|T=UWAZa|lY3-ma~|O7X#069L9x#OZ3=+V9_1o-+hn!U*XXYV`Usydd&N9j!k>{zf@?4qC|_TEckNqLDW
zrspN^CGX{X-~YYu%lGoqO)Rl
z@XpNHo!Omp&Ysxl#(iO_SFsTjGv98+_Eis5YE6RLUiF|`-@zBiG1V`7MCYGxdTsGgR;IF+cJpR-ypcFHq={2%aA9Fk^7A$Wn@b
z9YReBgM6gs6X*W>`LTF4GcO;0j^)uj|1(*Y}3ut(!9uN6wvxtq0k7ja^qkykU8b
zMOF4#yn%Lc5p6T{$*#^dYEpQ<8n}Gs7Q`Lmp{l)xwu9DPdHbqH+aFRwzTGQ}%R|Oz
z0+T(fbkugl${69Ye$?I58G4T7ox&^UZbH;H?qSrg3AgOR`!)XAezGVxWXzSB23_ea
z@HZ?G>$^<}7^}RHYY%Wwb4{E?ojz=`ay^SSX*?CA;}er2{8rF_o-HQaePS=9XVDy0
z@sN$XR_#8)J&?Zjn?i>s+$&hT1mg->z`{WvImrITl6BZaNeS@g(RAS&G4b)JFt~G@
zydCO6UbLN`mkSYiB3?RpDbbDA{WeczOsNIKa3WFUTUCknxhtH0bJQ0em;NVy$rMw+IdosZh(u|J
zrDw*B1XpKzeAL6K_)8FRlDnNz%L=T6SXg>F5=HF`aL^-rRK)9gaPnBtM^S`#7cK<~
z)3Ozi$KsJeL9+Oc$e1QGD?5$t4^2%zCzjLQ(RI5&1CU5tK#f%n-*pr9gcvPs!B78GeFiK`3k6L?i
zmI?AQQz7w?Z9FbMy{3Sp8+Qk8`0X3ECGWdW??xAiFN_*GXsTS^veNyAO@ew2H81#!
z*yz+Qt1KgZm9)UySZdZAet*xw2X>HukWRmIa&UmBMh&-_0lr+b-jZzgUfzFKKN1sW
z;Q9|$M~I7Z32)==31iywj_~PA39w;bq?}YKO-F7!+S|)A{~O=g;@V<4ZDpZf!Gd^*
zg&F!p7bgoxHj7EhgeRR(ftdZgqu-@}E9lU>d70#tCGncC_Q=WU{bJ80KCAa+UT$_i
z><>9B=ha1HSQ&I3#Glc)qls{QujT`J^^x%>nQ73K#sY6)Nm%^2
zEj$%;>Y2QSD{UvM8pu;|4?XD;Sc-F}{^=Dl9qmvhpU@{>--27>is0t#3IjU!6w&4{vDep~GVMQbYtjO6|73m97$A9vDXnOFX3=~;t+TTJ{1
z^K1M`5wFw1c=rm(yR8=N9Gsz9z$_+|ygXr>)P-D;y~g5^+Y-=VVvfW!$ZBW-?ARQ_
zYONHjb`}q|TN`s;WHEeS!c{nVBbo6Y9Xt@~d)8q*Rm9EIJFs@|epy@umfcd%ofaV7
z5EK7=_$1K6a$CUbfju>AN#0w6A2l0#ESlD@2h%NkET-F{j<%4AgC7cwc~O+@;^CMp
zhwF&ft-*IM!gEyUlO@2kR|g7|TJ%!PbvP2rmiUl@jrgou`T#A!Ygu9m!k6N1YB>cW
zOE*4fwx8yJx>a9<>6U9rz{tn@fLkqR`O=$szcnLZIgvq^uLba)&|vQ=QLB%4L!e!s
zR6=t4ZJ-!>5xTVL1b(eMi7efCN?Iyx#)FI{AXnv9iAPa1^g;MWIH14`G-O<1N<_c0Ck|S5
zIGre?IDGK9*_un1I^lF;_0a<~)?u(6%e$9x1Ol(6Bl(rUg6uoj;o7lHB1;+GG+-v!
zJJGkd2tx2MyaFrFnJbqr9M4yk1uPX2Kmf5q61Xqis)V
z`%_!N>iM_;S{}hjyFCfIeGq3olANqOIJ`wOAQ7^Z9mTDAp?v_UDuQEs&RYu6DoS7_
z=IY@D!)N!FbGJYSpXHNNR7$$}f}yum#R62y3W|k#4G9*eHo5Ou%|KMlU=6-5YOChJ
zGh28nR_s^;faFJ}qzx
zQTillhiY;vgRb%xz;_@HEAfL^5n4R4=xB_{D#T;aoe_4CPu-02?*(m}@*a#Pqm_&t
ziecKSnFXM3V@L@i=i{{$@j4v=PN)9a0$vU50S9aB^Ebf^H%S
zIw1Xs#H(~9j!0=v`&F@ExF2d>A89{S@wzqm?!_lH?fWCI53)v7sq)bNt$WvNJQGp)
zerbZD?hE`2{_+P08R5%Lp$ojYj!>zJqDeG(b{7qA?bpQ>OOv!r3|hJE=wvVFc^mJ0
z4Vh`s6=MOs6}3?FTv5BXhZh8OQXA7l3ler53m4TuvxZHefA>Jf(@SLKWW$=>8)Px`
zp;{(0O*Fojw-H}xqUo0wg3uxW5pJd3208&xgdVNCKx40lBFn%_us}xET?jk5PfkJZ
zB+bWNOr&WXO&9UzMIufe7c+!!VO|U~Uj;H~>?|gRwt+elvBS)Zk39j0BQ&nD-B?ck
z^qX@OA()0@EPg8D;8^$OMYtKWTVyH2eR1V>O*%ZgS#WmYt`U^?
zS8RBSy{CHg8F?$61^$U8B6pk_=0_cabZP%sa#zS*ofjvLHRn-{{I)v`Zr-^gi~Stm
z!F-u%qVZZ9;$N6(`n`EK3s#&wE3%Z~O=`Krl#Xqfyz^I+VA<{mHYF@iS&28NPqyT?
z_!qvjc4d`S&gq{yDHJJf}iT^lfg2R36|$5|FG$gW#Uc-D7^pvinf
z4`mT!sI@YUe9A5G0!m~-tCR33ZF@V-Kv0+BEk|}~RyofYF;%nEg)SwSIV5Z&+|eF{
z_A!17@*W`jQ@qY*g4d&4MTD)mn1T*j5MgwWAtGtu53s6)kzcs|2-yA%yISRZK
za!QrtrM}(3(T?}&Klp7W#@L2HH{$N6w7SyXjc$0
zslC#f?9e6J8UN|%sX+arYGrsvD$dIOc3752quF&b^FI9s{<;)%4J<7TDK2ek`N6aN
zSW@~HhJ3S^@l+9$UKmig=d2{}Er~YB3a|ja0~CYzim2Md#T|xp86dKB_c@@;FF9wbumk9pwb1VrOI5Q{&X#cSl5NK8&DWaXjk9mm4N%h1rX
zK6G!%-zAp7AeaTnhFhX{F&+|E@sEuS&FZ!*`WVo@
zvJ(7+@1DkeA*G=Y1-Z7r5&212kF_!9M3yqVF&@_Os2`2Ri{SE&o3MC$shgCdjAg*y
zw7k&bpV(U{Jcof>vQ2hp~7WAJI~%_M}bJ_a|h
zq%xig!lzYTOZFaub?>7>z9B=G1)jp(jYJ9KdJhL@N6kumF-e*6Pz-mW)!HIV9Wnu&
z9M!$+Bzs3>C|pavE~+7XKE8VyGP4-^Q;}bIBHsN)6tvPj^)5NTi(pib!8V(iDAH1W
zu<(=nAq7{%OYkM$A5DWPr73wpfdVK{?TDCZ&$g9##HXgs6_Z$v%d_va$~k=VGN5gr
zEa?2F6^j0id063TU4<>ct!@jb-*GS#O{ezJ%6C=Z-M|O>4V@&5^T&@skjymb>SqCb
zKWRq)J+WrA24Q~WQJ~Z0mf>;LA~bJMAI1dui!7!1SL-zKvGlHeQ3fR+W7;e+J%x19
z>3z0<7y5Js7iq8850|Zk?A!;on^qU6Nxix-V@R-^zfyWBupOZp5$2t!X;<@J|7mJF
z`r*Hwf_y0qY5HrymS)UlyIDokD(j6uACU7>4`-3gyidQ3zaHYX3D5V}lrMDYNim5M
zJa|TDCWpec)_oVjc&dmgFKfJKB$_-k22_v*@Es&K>YqjRw2je7>?ErM>XdIdxQ&T)
z#(~77!Q&WD1+jE1+rf}TX5$~NsVf!WcaPjCY{A5?<9iBE#M^wBdsv(t9AU8EfKtgV
zZKBgLo}cqrfYRoydg(SC;?D%ufOc5>xshA-7dLo=T9x1Yv;a&ZMp)M{A
z5@GA=pJD&*bs~%KNPNJy%ghJSm?s9)uzjGC<02p}@zjH|T6Nub;4_+hA&bk>*V1Iy
zFeW_qvZayuTbhIhqq}(!BCwuF~P>60gGU
zBgf^$A^6VmX=pixKx10!;4v3?LTVap!jt@L0rlNnVOCe(QNDIN1r}%*n=4iShUGOD
z{|`G7mCa)@1uNc3ti1o&HZ^K_yNWDbct%P#L~V>?G8+1NLEu>4sb&du5<7p3hP32A
zJbHqtPx1MqP7VXSCsf&&L}s13?!QldaRB%aZ!fap+~Fihj7?@dy+r4rW>B|DEhc8$
z;zO3o&(c^!wCUypodR0OStp|nXz*)5#aQ4`RNGoqJ+kLupztS^5Pkkc;lgK8=GXxv
zp^kfPk)<2oe>@7}5;UKLpYh$hRz~40^n*^vTefE{#9rmH2X+bsckLR~GOBubk{`LC
zeGahpbbm_Ku$88Fu^0ui>B7A>3W2JDot>bO)7jm-nUrA3Q}I76QQ3%h<>YjLg)N|V
zlg{APihGX_hHU~0=TZgDT6Ki(0YhbRqwyoqO=cQ&^|JuJ=rpzdPqAh+yDy(2=ZWDy
zGqtniVm4hkop^_3k!3J9wS2J{nnJp$PGA
zRK&f4d$9ideRA3~WPB?#@6*rWZ$0tagy;L!aCCNnsTOAU!|(tLl~AK*4HzhSEXdok
z0mHX!0aI|GBmvEq8y*YEDCI%oH=N!z_6|DW;_Pv~M<`b>@6B2h`(o3FPgbsH@#f{S
z%mQRs^PQm3{3KD>j^p7XS%{D5u4!jAX$(e@1!zBE38?QsPvR9zKCsosPQq2py+@Ql
z+sfwSh|m@=Wc+MPGw!0QXe2g^jI!WZA9Am-`dmM`4KkB2l|@Xc)3ykj!_rcxYyFb>
zjHj2F@YpMsL>M0dOIE3#JR0<~FhvS~5@F8h5#Z+REV7j0Dd`!oB5bcLFB+e9_MXWJ
zEYXEaBJck9UH-AG9V3k%Iz8odO0Dd<>p3Fw$@dx=*e
zB$i_|D()#N?>4k$4bO(s%xK4Fr_59k3y_(f0|(Z{it3=APaPPj-Ia)?(L+RRI0;Gd
znu)h>@EOzu6Qe!oX1ylr!^0kaA94S%hc?}tK&QUG;$=7)ehHEj(-}`M(PMZ^s8gTs
z`nTc7{g9caajxi91w970g%+I}$#R}VTWFYaB7;_Lfw3sjPn0^yuRnOX*AZE|@$=W>
zfliMTA-HFMXi~qi$kL6+Ts#ZWc+#;1tMT3A?n)_vB@**cXY_0F8hI>4=-9L^v~1X1
zWEpq~7NFAuk6qMEj;uplpDQ67yHwf_*;33k(3CSHEd@UNf6qZfpB5&T(qAupkSWZn
z82A@fP%|xBOxohu|G^99X_?<~8#3}(be#Cl;?i5q_ihsiwR}3U5G&fEclw;SG@)MB
zD(Q&(NVoIzhqEW5L=lhTN8lf5s(6uU;MLCpc#rSlz5Xw(*+xsXqmZZ6jJxo!1vCax
z4^d}gEzRzKqC?YrnDMa0S&$jQtT)xeaO
zg23Lvj`36yw4JO3RDdrfo{4-Mk!B`>e#g!Utl=sgM@_pXojMS2PIB+w@33dZX_)w0
zA2UxcLY|v`Q#-)MuMU|-(O`nYpeD}kZ|12&&>I8uUS27Rb3)=J{Ch`a8o13Z@Rq2d
zCN6cF*k_c;Qi88NuvsIAwv$y2i#BOI6{JGXcYD62WvmwpYfu>~x>gZ(@9fj1`1RjT>Mom=q2H*c}C9lE#c0`uQ4
zQ$O;7Wx9GZ38ugOB;4a0Z|uhBY48IxmyiJM^etX~R!(A9(+*lUY5}ycy(DmRb}i{L
zdX$~;pj3O7CMQ3)aQj-+4bAHMKyBAr?7B?2;E@RwPF5V)*R;o-yoxVpR&F-j&CY~?
zcDlJa8z
zFP263VB&sZKf+)0lb9`^QTsyOtaa(4f;WaP@A^!mYNiT>z7x
zcpW_Ic$ZCGF{yh8je@2v+so@Vp2in|?02)6YzDh}S%7A4Y!h@{HajiDXRfH%Shl>T
z)c=Rb4E!^<%JcR`+iY<0bkEMP;K(UR6+9NSUS!S~TCiBunKTtn#V>g*#^TOgtHL8h
zaqdVwg@50P%yP#6F6$dK%g=ltSDnhDm)WI#DgG
z$K9XgL*r)P8Dgq|)>KFX3sUic0zLCk#xwZG9NAQ-vx(;vh%Z@0+|>&uCs25GMZ`LI
zY0T5g&nTfHZaBDI9#{~Y`pad5ciwyV?NWa!u$NHOY?e)rs?_l)XpJgiC}$b|vB&O8
z)H2A+qQ!e^p#~10eFyxB3C+pMgsrQ7Vmv(reci^*d{Gub<8cjBPGr#4!vgq@Mq*8F
zRd8*nr}nwtGGJY3MuWZY$FALkRj1-_9-AUE
ziBC5n<^7B8$Z*t2e`aXo-?%_NIKh0;3WQc@Q~Z+%9-gjXTC5=QRpIfcufefB=Vfse
zFidUfH&jyMtGd^H~AJl8Y1KszBoYlm4nKM|41WsuWjxKysaMk=hKrRN=
zFl=TosO6=Z&V3!94@cFy40+rFXk(nGgb^aQXR9vC7YJlz-^J77!bLUU+qfC@>)MC$
z^b#p)sjw+xyDa7eGITet_rDVl&8+G2y&
zhJW-0q(Ab7rr7u|3+2;Nv{U#u0v{e#?h4wjrT}Grf`4Yn&$$bi4=gp)Dv`!L@FW>2
zzG~C*?;tztt}-=rM7g{tFA9Gd8z^^WN(@1j-2(lwfD}hVKs}A}cuz?my-)P6zJu>p
zJpQTxa#b*(!#HdO6FRikh>p2*S;JGsxR7)WHb)+k#Zj!OZ5kn2lVy6o%pNkki_+NF
zNL-nnkxGwB5zw@_vvjIl(N+D}sxQJVZBb@%niSj1QK+$}sCCfP?u9GJMV<+_!$lzD
zpY3NN3x6rLOWrG&C9s0P72@DOmU&L#`lZ{jZ{tZTq?Me7;=M-ysjr;K2N|8#L{?N;z!W9ru)l2EL4CrPx2R(W!Ug%ejMqRzQz>Qtb?>Sjg)MDV93md+}Y_C#Dnd
zzX^EZsdtmK`>u*fVEC+FuwveBNV}bB=I_CJ;!)&E_sR{L-mx&gO7u{y`aB%^kM~pF
z5ghw;0IdCHuY3&oTiD@TfVNZbA~9R;;YHB4t+4h}L<_X_X$2jc@Ij?`-5NX(Pa58M
zXa~#cUE2#r1rB9Ay+l?{HmupbK^8+Z;S?>R1n^mW5AEiO!k)rEnysZ78=CC01hg8W
zq{$LTyk1<;D>a04f6UISvkNMsS@jE^RlxHSeuoSzCzYQXI#U#-8!tQs
z@z8cz4vj8;l$oWW%QT=Ujlimu1|P&6d=S6DGXL9D?%a&qka%b{cK+4gIFjGA?;}9R
zdFN*rjRw~({t|{ydJ?xiq32jX44?ETShHXr&|pP?H?f{yV|{Z4o`D(`sO%Q_yIjDx
z2K1I$O6aWod|Xt)JG%Ai)P_d}1u2m)3%PRh7OV)}BePE-W4+8QXL$Hl
z>WVB4PP6+oC|dT(Zu2#&=g}P(q31}}2Qgo_2B}ro6=;vygJ?$+#K&BN=v{Hpb4Xhg
zOUbW$9Tyn#NDtV^565VS!pGrXh!Q1+5ZMB6BZGB#rw5Lg)99u{PewrMU2!L(r$
z!2u^FRYFMEM!1uq2`T>=`yyqY{Vu6oDDSU$o&IbF*oUIkZ|0ZT9#?WN{Gacjfp>l7
zI?u$%!JPkjksZ|3y`>*?@$INo*TJ}>g{#jcq2HlB`NJV4D2}wzc;dT}e1$YsUBX^Qc%2TCt
z-paL*ivt=7{s$ZNik=023(IsvESIHyV8!dD#c75;d>M_Oion^`5ym_fP{dG-rKe=V
zu4PALsb`R}QD)w!$$QOU@{6S#VAAY9)klQTr6&t#Cq3V%H%1(xg#HIb%Y
z>g2985f$}|?O`_R{z%kMC)Gu5h!a^>;YYS#fZItKEHBOQTUZp#(&!=Rq`2%%jg$6$
zO9MA-AF1cY2C+Q`wSiV$8kedFwBUU3Bi&irn+bQ|%z;G4(@S_a_JB@7d|-X{@MVaJ
zx};aMCV$<=wW0qwO$eYr+N!@MvBA)5fxn;t_Ti>ZWDVX7>eT@>Lw)(yRVHWrz|m0K
zy%yu?C3ZxH!nNe&{$Dem
zdm4`;ukBV`Sgk(QpRZbn3nMk1+<#l@%%
z$x=E@VcJmMCsCnj=~m7B2u)U4J^QVqOb?dv;eq`a%~V`00N$q-C}V;sr+3F5(6)JN
zk);e@x@9Hq^_tJ*O$ivwXca_A$N#s`0my&6KFwmp>TAVJCLxPir)gR;y$cz-mrF><1k
zX!g-ADd9tK?L(|18x~&J{jE)^?c&)48u*Q1vg1x2hM1%K8BY&Uzj1TuGl`>1O2DTu!df0s-0b*}ah;I?h92yJ_JUxpT3b8NO
ztiz*HY0{3td)Sb(wufD<7=mW1*n+*#B_}N{dVXN(Xw%5um{$af(WmWXZ3XuV?!o#W
zH0>#kXudB-=iA6BnQtlqy-R8xr095qeD)61X}?<6Qd`csKl=}EjdURcN{
z#e%l<@r6E}dx3gQDG)uTb
zyf=3)ABU9q!y-!=-eMqnIwhC#+J&DhlTU&sJ@J~Q2l1~;l^DVd3#hdKb?h8Pk+hD2
zdM~zsZw?-3V(l;#;Drh5-OHpTB|QTV)7!@MA#oZ1ewCROO=EjVgNZ#oHa}^K1z@u)
zsWYuBYCF$k=EmvS){G(5`tK#?wp0Ts#ZWXO78Y
zR^z*;e}R@P)%Ns@sKQNDL!JtJ?nRaoJR8sRSh;g8lSPBf;7)4igR}5_lI_RBm>wjk
zFRXhPLSt~j&H5iCDI&rQ7sGxq<0=u4hHas48$KHmjq58e#`4aN9-Y2zI)fj&HYFI3
zFGDAZXOM;kD%=A2TsFW${7EcGOV8RcvW>6EQidN*x(SC8De|!h#`o(Bts1Kh1u#kf
zcKs&Yqnyt~;Wb(~oNG!`$$n|}iWPX*!s9^eB+3G^r=ljFH!^W<|FVcEbJ`F*V?lx(
zq`#1OCX!~|*&^8~uV_6Ero^TBoh+SgK^~j%<)=!MwM=P(XYdFl^3qtSdzoYwm)OER
zbHKE(6_?)NKiC4;=8+9w7gM@Axxn!5!6HiuPVw{ir1he+Mom{DTNJWrlg3j)mvzsC
zi#o~!$9v<1dR`E9dDZfSVSNUPEM@qHJzF3(ozHiS_aCWzyI(p@{cd)qT<%ZsJ47BO
zeelkE@4jt}Dg+y%RjEtAMPL;!tVq8aBeIm?zCm*|veqyDO2gB`nE2SMa&{VDzbP{f
z+OR++umA9BNE_sf&Ui|Uo(>6uyZ-G?4sU)Iz$H9e@NwP>fwN1mso`V-A
zME`LSG-&Pxfn#`Yz36J`Fu#5+aazAAOH7AXkgG=}v=3+k4O@CLk+g^I?d$1`r-JA^
zvI8_}Q%@H44BC)QvC9bzx;CxN4)!^O-(5xIxk5H8!cT4WVI1H-!oGoD_8MwJ`2C*F|{
zd4XQpl;0RoXst-D3I_3eey+FXH#Ajo`vS1k))8JUdqbUO-B{*`?W^H>;zh>OL-_R$
zg0|XVz*+b~U=5lc9Y$Wm0(vcQzmxa8D5sH!8%${5N@OX+&)rUi-4`hosR#yj?+V>o
zw=7~P#J=B%(@`0kj^&5=(Pb_Hg(_8v`TzwnmkMofL{spkf!&Kwuw*sxpanVv!IrS*
zt39d&rn4Gw;){;ID^xZ3?;@?CkY_CTVLZ5$M1UI{ZT=(71cx4DNx>%gu-N-|thY
zrcf5$I}3|qC_ZGaa%B&JKy35qA)^TuGdN+#bu11pRKv!@+aVi|eHMWhR$v)KmTsID
zR!~P-f)6FS(vxTxG7NJiIXgNjKh(oyh#gU(ka**&i7fmb*?lm$IlD5RUSj{5LqM@`
z63|J>84}MbQh%#ymD7;++mHY`w%7I+zTv5wvIy}N(|#>?0OzK%MkIbrD4DHCj=3No{E;G12_!BqSHQFAjSv$TEfF;mLoI92X
zCn7G$Qr2KSG(CCjGf-!kHIAP3-642dcR9aRbinA28m~;``dG+J;~hJzgbSIv4sHS7
zKJ3*g+*g-Q|5FXYQ@eqecH6F3@7q$fy6P@(GFGsoSjnMXgP@ixzn9{5Yw*1MJlGPw
zn`Nzsd(aQ-Gnn!85-Dk^uqk4@EQV$jSOkcippw#Kuw3lkjI+U^9;Ui4*|Nff7XCWm
z_MKyTs~=ddJGhnUK_UNB8!M#CiCU57u
zMUa}zKg29S8Hql@W1&&gmU6D=@FUerMz*QEy#5fo@vob|Om|o|JwU
zHk{?1&hBkmK~NWcAuq)h@aHYt;MScq`SQoAW`$(IHlY8FGT9bYJAXa?KsCD;N!|&Z
z^ph7GJWxm(6Ik$0kc5wm5knp^7P>R>`Tlr%sC_ZX%f_Y|eR&~5ZgxH#SQlG1K@Uhx
z$7}bW#`->mr~YkT5cPnZWxwp;Xb)rO1jrfbJMx6gEH&-_=K#DBHjD)*bxA5R;aj3|
z+Dn#JYO97_7y^}gmj~`oA2taC_tiY}rt;E}&^fs3Sb`6*%`2sEQ_5MfU*$ferYy$#6
zIP?`-DT<~;7|0t@9MJW1(R?XGuc7Urm0#oHvgqbtzjPb+Z#pf@3B&U5mzg-AbwVBs
zZ*Z@JD`oKz4V5rxdUwsC9Q#(q=sH=@Qu%t1Y7HLVF6o$<{
zzl;wr{|d<|4BD`O4K0ArpQdBy;p7N2yR^57)>I5dT7EK?$!_h_7{>JVXFOHJuiJOR
zwcE57jscy*mdZGlpZH%aA{&L#c2LEEC2s~D^{mBAtNYYS=nj4H%+jNY$VaGogZ{+#Ru-H>alR+ZyHe(a8Fs)upZY`$a@Q;4&{6P8iR{s
z?i_wVDVCVU39Navako8Ny0t$sv1tX?B4Q?K_WpRevTDFKm
zmx~3k@urU5pG9S^4)*Xw_l_b<8J?M!55FFbVe%Szd%^TUwwXm=x+4@W-_V3ieu-_%
zEmnxCL;RmyR6257hFuF!Xm*F1_Ub@6nNoVgkNY7vn-3IG0qw43sng?dQ1b_K$tBbZ>n<{hA|Pb}z6@OA)`qy}WFZr3ANgaE4}mXE0f`U^{%va>mm`xVU-1(23;rXV3zvsf=R~h6T#U
z0<%y}Yf;tAE*-$r#aU!2!+$$*7E-eKyE}LM7(BYxUYQP)paR@d@ryOq?o>InI5^Qby$Du)84kaf@zlbi+{I&)
zUg!_BAYFpDB-$Va3*aOQO?8hGSkT3{Bee5j&7gSMYJ9EaoghKB2;*70l`XX*H8cx4
zpODH1QUCqe7pkFuJcrlmIHoQ_}Fgak1s9p)a?MNtOR?6a
z9yotBNtXRBwkbDcW)-9VPcCnm;IW{yDX@+#$)j%!Vzg>v>%xOzVH;J7>&}&R;bTW%
z>>eV22rj6u=Tk>yDaUEc+Mr1^nO_97MeliOL3`0v>oE5^&d>`NZi|q3{uUhA!o76j
zb!+j)E$adujwiujw6Se^V3MLLO8yPY&H7za0>&z@%GKM~AS&(%lk4BPC$w$OpADA4
z3S$59D2Pj-y&M?)jPD+slx0^dFEJO3R$DCIqa&u+BsBJRS_*90zf)8NzV(|z*A|@^
zPcKn`eaFqwTo1~mZ_r}BvMuFj54NQQ1`obK8{84?5Fh>%&i<(Dk?*2JU=7txEsenDdaB
z#=Y^+j2(&l!mHOuvreC{)_5vt29JePs;XOg%E698DC*lQ>O2v432t6agWE~zaBSB_
z@lxS#Pu&Kp?Zk)*3UIB!h9CBeEW%Upk37wm;BApv6VF1KXye29O!_9_p?dP{e&|v@
z$Tg+a^BX*)v*z%=74vp$26}pQnDS13X`l%efB!5J2L6WyaG=!`u}4LFv~JX*&|4w$
zRNx_zTQv83=E!L(L^BzSr85@rB6WwrcIdU3)636#
zX(i;ONpspqRs<(U2Y6IFoQ$R7d!a*p?*9%nGADhhSNhoDuMzSO=^l
zgf2M&yBD9-tU{jt=RwMk@D+83uRRVo6E!<)Sq$#RUIeYA=`ZH*ho|#3Zs9GmY=--^
zstf+Z+cWv+4ktlkY%=5NC2H4qgRX;pnV1VFli>K?3yi0NXw%KdQmor#v~?R5@~fE)
z`rfh+y)E1ocQVr;^yEHfy4HAjRqu|>Q^&4bLlkah6rKaBv*
zFlERDaMFf%n6F!l+|9~_wbHC{gF5vfpnWfEMHZWHeUzrpnuCeIEjEMB3z{CLPI=We
zI}O3v5bt8x_kcZq<@I&0-2fVP9nIv%$DV)_hr$_857DSuEAS8EI#8bByVhe=BX6lK
zfSnEM)w9-fe1}%hybi^*s)rrtFF{-~&0-h9W5WhReNXjqFVwmIWW|~yMSozll=wrL
zSygGQ<0dToVX^#2cU*+%t>;Bn;W66&!*~apCD2KbtwX+zkfl&N&Ei&t1b-9oLUbZ7
z5-3lY`m%P4KWynK8BaUt@SNPL$C};i
zAtRH!&nNU533LLYN;q*T1}z~4oA)
zO-(a{pYONVjENmOit`*HGJ6hbYts|?L|rDlP4_0ysjshzEc`tgehHEj(-}`MQNMX@
z=n&Y1i6IZg*~1Bpr-JA)xGl8o+(;JnB-&iVloLHfG<|Sg#QIL|i!m>Xx8TjexSB1|
zyG1oPaZ(S4_-hBUEKwPWt2dJ%ESiNvl7!v(?j0zjs0vSx_h2{{qMKNdG5v>wS1nJG
zr3BB*&x7UL`Giw#*IF>7i)K)+Tc3p!?`Q^mo^HV<2;jZe5)xMHOZVQKfp0-sVgcSq
zdlZN^xfuSVNGikK>a~D+9fFy>;|Iea_T(YP(?hiG*d03e)XdaP!WXTB9=S$d!vZB)
z0H3b_yv|zE;ca}OZC&*ZIh7;o$~B0-$|q+g4G4r54I7kHOslVbvSPKIm)eXEWM(y>
zsiMVBvp4efhQKjuS0BnYqr(^^AX|Sb@l519h|oCJ43<0x=8HB&CP@oA?%`HVdsduQ
zu5r+8ZD`?|uQVM(C3+`3ulbfZq2QO52(RKou+6K5RhTPQ!`s6Pf;#mTSxWGo$HO5l
ziw{&E88I2?pbC|+n6@v%T`(l5svcS9S;PAU(-a@-VY1Wai0yDIm4`cy=`|F$B;*Ip
zn9^JOo0yE>z4OQA?3-}#r+(`OL5td_Ob}e%P`nZ6@j+XK^)fGaPPuppQZF5WI?cN)
zuWpOrXW_=_T}+y$UA}vkwdnHebiTXZ6T6@){N>!2Y6}CwrLpDmCX70lxzshn=5GdAtjF
zc)6HZx__ItuM4fZHv~GV&;*gV6;#668-<~1CZ3Al0TcXS=|^~!eEe-u+>2=Y_wkFEB5jF$6^{=;KqQW
zY*zg@CZ`XRd8X@P*bmVC@%d$wQi{~IoA`lei{6lO@t`?%WatJ+jE@B$-?nC+9;92}
z!O*f@7r1bib-EtMm+YVT5xA;HwvpGcfNTMLuE@naQsf8uHUYl|bw!pk{LIbU5O(o0
zlQ*>nAEUcrTDZKYp$*+WRmSCCQ#lNO<+3qCY1^O#z_^23t>t%i(4oI$F
zVx3<(Nx9PZFm!C^ug78HYc%#z2eht298h4p4r{;O1Fy`RqD~1)qOG*X%?X6>|7(LN
zmuBYgxL(8W;~%jA&mdjgJreU(OB~xj^4h#2R?fr4-6o+g2hJXbYbiHGRk%K29Jsr<
zF`iyx#g4U*qdlef1ANXX)TXKdg@2p-c2xKoBd6M=4&gYFODEuoFuZ#((DR@Y(lXOE
zi}g`Hb@)P6>airt2!*m_m121Kz8u3#Y?yxYY#3x**G!NVBcW{U76YGV8Gc81ezo&x{Fs)hH64zREV)NMNeYBlc2M2Bwr4U(@XGM*kHaOgy6;G>znor^D9
zmG}tjsk@Q4{49XaQ+w1lS=89QQGM`l(oke6!!O@ShmElpnY9&c@_Vb
z>y=%a{Pv|uWTYvw0KPc8v7y+H99n2v2ua5_LaU)Kfr}@#ooa#3t~kEzFJ_?yxO>2+
z<=??$FaOKT(}UEcQSywr@Z(3+FE9hSt)IichoM7cu5@`IGvB-Uh$$AJJoHsk9dBc~
zn0NH$zTItB)E^F?gaTr^wHW{C
z+p-h?G`$bx*#f9HPQ52OwtP_u3WnpgYxrmGNVt1CL(h?|OUP0_mu_E8hm4eL=rg`8
zbQ_|XuL_mAPBg55(yX9lYb=kJnq8h@<#rA{Dtn4(}#{O)4&Z2l+^u5BmV8|nY4M;*
zIZ-8~=U|yw2Z{z@s?Up{dY`*|0isUvxusrhxJVRv`22<0~5V>5_vifwh>5O-WukxC(O8Z>`SgfL-Sj|Cx{lUwf^Gtt;hAImnnV
zGb@f>{QUC~Ws3#CaUVUXFNtOLaC3*T1Jn=vNXM@mf79Hn$$iI&>XqO}&YytGx30>v
ze#GY_S!UXde&@Y+-!?`VR+ty>61~nJiCORE=fLFyOGK73+^6RxaPp|feJxj>Mo0W>;4-k^9|atK&dQ%&kv1M|0E`E=~){_wDA>L%5WN4EIt;)44Z=?CXX4uXwt*fU
zx-p(!V*l|dh)Xyxi}@MfD~c_wYCyv;=Fgcjcs#f`xhSv4)tlE~_rY)`w{7EA(7s6<
z##2G8i`puSDL}^OGSil{F`%&Cx)elwNz9(Ge+lGgtN%dNw0DAL0W(Eaa3|#!>{?Gn
zPy-ZCHDrRuL(vMQPg0}7kn)gP;0>%dYpLh<>1q@A0oixS@fe^OC=p)7qqH6E?M0Rn
z{7k}S*b;R}mKBB{**!ATpeuz1$ZcT;yic%i^%*l=-H|@y+JIXfXI9OSZzCW#Tm53&
z$u9<3Iy#zlvROf@5rG4zfmj_DogdhtH#G91xfQjr0S~C4Bb!B-IZFMiV|n``)JL7w
zPei>{2!Bm1tAo7*Oc|_pt1OdzI-sHeW8_7c8c5-QYT?q&D{wqst
z3@A#rPqA0=M$udRshEHU6ITwd5LwFbrZ}LmclKcN=tR5%Tsfr*#?E*F>>XIB3Wte3#07@MjDz^xe(xG5bwX#HGCpfI^{EgT0eGH1X%N5I3$~f_*#I
zC{yKKzcKV5K3SI63tz&)GSi?93zWhFuV5NBBA$y!X*=M_S+;=Hr_aIF^b98YnK2{5
z&6&^8vIJHV=dUKgx`+dE?nwN|?vR-VU8yYag;<)L%se=_>72-_)_B0AcHrdd!0Nc>
zt36PFr}3$VDKBa|wzP`*LuQCVBX+q~0Qn&hOc(3$$$5TzN6K9}R=_EZ*KdYJ*kB
z3pXyaRQ7W3tXe9B6+H5un*r|^%os7nOshm_yDU23Lxd>-V=Q^`#3W_LZ)c>!p3{+X
zzJvHJD)&Wxw)wKlRjZ~n-mpxp8e6eMtG2HmS#OgX`{K%N3WXA3?XRC(isu%SlnI~s
z_-jz3rslCQJQiiLqLCOIYB#jPi=rj!y1KyBPHjb&GMwgSems1F$#ZdXf;l*#umya)
ze6?oJTGaEuw$NqBRBC~RDDAo^ao?&KxL2T=-4l~k8{E~y3H--$&%yPC6o^>Q-yIbd
z?iA1h+V!N}-xxfKZG-yWHB(Wc1g%V`fs+}g1dcW1s*roQJ@3Xt>EE97T(NV;B0RvvXtT5;^HAL
zIhDzKeE49fTe}wHsUYI7U5Axnnh!3;2W(Oy-;iND3t*>+T*>5RHbctwyKp)p!Aw_o
zg!Y=XcjWKkiqFFEo27n$HF0i#*_`Amc}-?k2O7VTt*{3#h
zsVq5A)k@-z6J0DbsJIkfy%ml1+Je`*U1-+-6CiXdK37^
zVC}-s#U#4%$xmAJQ2#}@U_-z|VFCQGl1q(s8tOPZ!K^OrZC3y95AoSndpkRL0T*f8
z0_Lq;i?c(T34{N|wm^N4%oY_eWYQ!Mi&5RXGUlPNiiejA^cdcf)%SAj4LH0rmhn^&
zeMfeHCT;l6g(&wKY$MoYg9=@hKnAU&U?1n{fqVi`RTen_{Iw{cI#SMma4HDHW!FL_sqX~gS!F14=#(kkg
z;RA#i1BwS2sFasK!F0J|+KZvz0mdbXEG4*$R}*N^X&93gb96tPIj)JPV`<7lwCU6n
z+IQ8s>89aJ*}5!pjIv;kR93a@NzxSSvFPx^h|oKeqI+EEh``f8qvQK66WCRckzWW@o5r$D(v#
z`DeS!UT3$o)i`YTg;7HXO?^OaI*D(>bN%F0ZpfG`GpjO<-^qqp{@G$3eCs!b9<95G
zEG77fOEGZa+9f7yRNzqM3yxRpSPMD1Yz$61KdhpYC@*2zT|}eJ9{q^&bq*wd+ksth
zou4cWqk9YiXGdpIy>6W5vex5HU=pMu{jZKThsV2hEIyOYE8iYG
z&Lm(N;FZY}7*7@P`Kl1e&DTr>y^C#t`qi$g3T?=-WPwj*=Dt;DWLCAMy&8Ey=b+}S
zezDP4;M9S5##2EAPwfU?4cMLyB=%K&PibILU4Ys=)?=JZ3v>B03ng`3bAU%#QhWD^
z;swW+)&^kVui)P@C6y>gA!r|0RL2Ie^*!ss;NI%J2h_=5vUvro9eI*Q^&G-@Du~Fq
zXh=%AE{pmW%Wy|#mKSXdDAZZ{0~EzN6&J&PP$ylGRk9e
za7{(CRvnlQB%Z>TlV)NJFf34<1!m%f7UIQOT|0oM3;WiJmv!MWH*dq9c>2y2!SG%^
zphNSfMGQsQjg(YaxP6x_?Hn@r7er2B(3Q>t_|ZLrg$fr7c04p5?%d22S=AaJG|Nw(
zmx5JaGLtB&zk{PajC&?fmgS9%CuL?;r16{Rh2?%mEPI1G^&qf)Z;_=0Prx$?j$Sy)
zWcBaX7g{vtgF}|U3SwQ@Ce3;P6!S5@H@Q}btLQ9$!UC{xyGL-BOVw)|sFD%KBH=<}
zJfUV7ieXQ6+-jR?Rmi%?t+FVZ>H1P;mM3itC=^*6U$nC*YKY>HJ
zrveCO@;rxlKjHiMa@t8lj)zVBbyn#&yc{i&%&_|QGV@
z0G+5lzV`^mQ$d_dx(G2>Vr5aw@mrK2Gs~5J=e>8|Hbxj~P5!Da&TQsW|sRdu6LcSrRLM#x1TF;A`BiF^^anlda
zATfBRpIH4ue9inl8lGMT?Q2UT3kk@zIZfhKDH6Yh)VQ%(w>oaMV340?A&KsKC8ww0
z!2}1GoDMD8LD#n0d&m-%k=PL#3fGb~u{HCt$C^<_QI#S2L@Y#Wu^cw4Nw9eOS1^t7Fe;(J_7
zQ6TMi_{MY^)2N59)^32i+1aw-f8#r^@mt7=47ze!0Ph{etg@Ja%=8>Mx&!@=4+*sB
zQXhQ%c!y`-##3{J1VcwhC|yl
z2NTR5Jzcq;rCV3ZGINkIUuKpg{qFIjAF7B2ctb2R#XwCRVjGwCg7u3ZEVO^bEV0iank);IB%FPBkPFsY=^%}y!
z9{oj@ZhZgoD2Pj74m^_iGj>+5=@wqj0`UI$qsl;0?GXQg(7>Ayrp4>l;+K*x!#-SG
zAwu_-el|H)eO=TxoK!0iRT2J)%%}+37*Hs-I=*;2P$ctFCLP-VS*ceQYocACYC>~7
za$AI)>@3*4;s=qX3wLsMfe}-ll4VdN?kt&U&=0i*{w~&@rXQc}MSTU^z)#VqAUmHo
zs;^I-1T}3i@%eQKo@R9Su3VA-;JdDIU&x6Jx^i0p?_oNYkj1FpyqpGcht#*%mDLjM
z`!<2bZD`SP5$s%k1a4hr8;g
z4;CNe+nxE;^@d3U*kqIv#hKYT@WVz;?CNzCu5Q#`#*&VECINdar8*;^jFOGyXo1i1
zkBw9xrql8037{jlMJUKCfb~D@7g00QQIK|9{bN4)rGZS(eS&q)EhVUAJX*xn
zQ7jYfBU;Oq+fcl2ExzY;B-~6(X4$6>od7OQ&Wxv*ShQ)GB@rK6VpahjJx_LGIcJQt
zji=(bEvq0;I>d+$D^U&iaLlkddZ#RwI+Y7$W<}A)fWiirzdu@JF2jPnY`A=2sSUIN
zH1&T399-)#+3Ru9c77i3bg=|wBqq*&1!(cM1b>%kgM7#=fHOf9AU#5?_CVhz;Mbt8
z$Wn&q78JlQNB9mKo*wQnd)OdlI!uDHa&mwcX-hy2_%{;IAgjIw=809HNQQ`2+#@Dl
zuWoovJo-9lI`>j!XXe7@Um}%PwM{o4=p5KWmOB~Sk~T8464NZShW3x8McTH2^-)`y
zSV#1POd2$f@$?c`Zze(L;k~k$-Po65q1?m+^Ry%V?_4#G%4!C(oGh_WJHs%`lg;;+BuX+~1
zdrGU54~iv^-gX}D-l0u!9uDX^suk4o<{_`^f7l1vchyZ&nJh_bEF_?Q$(s_dQj+-1
zqqrm1Qw{1A0QG8nD=+WSizng2^-E0dnEu1T8zTr*!s5-#;hwas9?JhvrH*ox^&;wW
z6ZMg&0`Hs1Qi8AA6#|*knwQCa$KV=2_1*0C2e-*(rpD%7jmR{1rq#^yK#52h1
zV}Z}bs^sMqK-7jCRM-^3*K43qYG}9U7GBN*@cD5<
zl`n{Dy=r;Fus(xCmJ&P{qd`{fT*qY5IVHiJ0~t>Rar$yB#9lowi&}#H%p{pvk+dPcN}z$6CnA&5^}?
zi0vNV+DJ}OIqAo-SZsD{`tV6W;bW>{!ya6BcZYv3#`%u`2W`l>Zq@6eEROR&(=AxB
z0QIXI6&J87i^KQvE;c~qo-!|H%}KisNk`U$PcOAI4rC|apr_$-)N-Jmr%bTs*H6Je
zc#MhE{YxH;Bl~xVa>%FnXZ)iV*?@<{0{C1v!>Y~@tJ|@jHw59j3R}SshqVsqtDKmg^h5
z{;nxaT5=}rTYCmN^;6r4Gs(C1H{*6TG;Hl@>0mi)MHc3Ba&>@!3GHClqM~;#IVlZ7
zR~?0+kM`6hTh82nydNz2a3@@gXPvSa@E*{%&3qO?&RlWy-(}Ud@>HxjunB(0ecJBvm{dz;}iGyPP8Tdl~1E2CsBGbUD
zwFTbB+Ob~Yi@2DAnsr-1M8Z`#nsk$io)$a+n%1w!cq)kHyTc*j2G@mhX=R*-sY2hw
zBm>)t+E`x->xCcL2AJnbF<&dMIzR_j*b2Uh;=5^G=ILhxa*aP|NFh)VeMy;xYCwSYwIneYb__}
zHJW#r$*pi;bBtcmn*2j&^u$%-ZZiK3e7~J#W+kBihGk&=rzrtrZBi^tM<}aySI|Ki
z%eJkS%Z~wY6?ZU?T$nP_)V7#`P70hhbfTG7i7eT?f{Am$eg4PxriJ`!AtU=P>^>18
z3q6Sp&59T~QAN<~<*0}PJR~|l#Co#P|Ao+R0c;Sn=vKca)NMC_nf2&_U2x$H?~t(s
zWh8nJ91Trcw3Bnq!H)pN85mIgE$}8*(M7E0^Zs3IaVgDr5sllc6DQ`$U*W|f$vah^n1S~52)A5MiQC@zN6N>%NM425gS*JYXW@g8T$%*syV{ppEi3>M2dsMi4Ssoz*+DZwL89)h!%
z;+U-9&i%m4y$<84AhtyBf^0leTLL=qPVI{*DX^C0ErzvHjC>_p0Pp=3Ot4f;lzue^
zlFuIyS<3L1gPzmK3R(1phG#7UUtSnD^F=v3`TaCZIgvqEZ42PN_re;m2dpt37%{$`
z+7JxUN?%1DlR}GeDd&{&ViF}dwb2&56L~1SQo5RzRJweHJc8=f$E%gS4vq0S?pItk
zd=6-7Y^9t5v{dG>Wv48$b@6J~cY|(2S}++GPbI;z@biqPf*AXB064p8R?+?y?@?vy
zpS*>2W#OK-@=k0yvP*Mp@}ncBD4(!dvW3sGWBn*3aK4IrS|%A|LqGUcZ7cvycrkc%
z_!LH~A{I)g22$T>e4i1j#GB=iH)%`sZZmx!xq-i|UnwW3NSZ=e3@9H9U@Q$hhJWS;
zGd}!B@NTU>5}6=tNHJJr66t2ro_wl|>U*U`mYld^omc
zBax<2b1eQp
zy^c%aYVZv|?iWST3qBtIG_ikhRgwhsZ
zO}y26MZ!<*2U=qz!54UsH0}#2O@)!hfTEmCj}O5aeDK$yx``&0$>;XNoy*671^}v|
zMbI2L690l(>^*F#)-Rb4&%X7snWqP#qqav+e-;*g@&80IG?PsY@8=@Zz^jJ^@cDJa
znlO)4T~`;F+NrJ0D)`O8<4_=ZdS9Jleub3kEyh#vzEUL*sW5zYXt#(SV)X}4?+$cu
zfvsT42Rlj<5pe;|97=%py=VtGbwK;eZvHh23i#f&w0@z&2NYO_*YJ;pdrs~<*3#ikx(yO`
zQrn4~I%EPkIXdZDVf-3ludM5IPoqM
z6@6n9Wr~EF8ze>Y3^65J;63?r{GOj>mJ+&NyCCpr)Sk%--LMo=ZmFAq7&hriaPz3m
zbRh92e()S*ra@N=3(yF)u2_>fz5Kx0-cDpG!*69~!J3$JOkOA7<}kEpH^x&zY>7Gy
zN6%f5MJ>io%Q@=TBUPvxkcvH;@SG
zjLa>Ktjd;#CaSPZo5XURjZB2A=QJ~$ViK$HLFloNVa@zKGD|mIw{dNt{bnWb$LAzS
z;*~3j`fH&=_MCWoctKDn_0KNtCAOtPEoOLq6Y-|qf*-`m}6dT%B%#@>5Z#11O